feat: kubesphere 4.0 (#6115)
* feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> * feat: kubesphere 4.0 Signed-off-by: ci-bot <ci-bot@kubesphere.io> --------- Signed-off-by: ci-bot <ci-bot@kubesphere.io> Co-authored-by: ks-ci-bot <ks-ci-bot@example.com> Co-authored-by: joyceliu <joyceliu@yunify.com>
This commit is contained in:
committed by
GitHub
parent
b5015ec7b9
commit
447a51f08b
@@ -1,76 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Endpoint string `json:"endpoint" yaml:"endpoint"`
|
||||
|
||||
// The following options are for the alerting with v2alpha1 version or higher versions
|
||||
PrometheusEndpoint string `json:"prometheusEndpoint" yaml:"prometheusEndpoint"`
|
||||
ThanosRulerEndpoint string `json:"thanosRulerEndpoint" yaml:"thanosRulerEndpoint"`
|
||||
ThanosRuleResourceLabels string `json:"thanosRuleResourceLabels" yaml:"thanosRuleResourceLabels"`
|
||||
}
|
||||
|
||||
func NewAlertingOptions() *Options {
|
||||
return &Options{
|
||||
Endpoint: "",
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) ApplyTo(options *Options) {
|
||||
reflectutils.Override(options, o)
|
||||
}
|
||||
|
||||
func (o *Options) Validate() []error {
|
||||
errs := []error{}
|
||||
|
||||
if len(o.ThanosRuleResourceLabels) > 0 {
|
||||
lblStrings := strings.Split(o.ThanosRuleResourceLabels, ",")
|
||||
for _, lblString := range lblStrings {
|
||||
if len(lblString) > 0 {
|
||||
lbl := strings.Split(lblString, "=")
|
||||
if len(lbl) != 2 {
|
||||
errs = append(errs, fmt.Errorf("invalid alerting-thanos-rule-resource-labels arg: %s", o.ThanosRuleResourceLabels))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (o *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.StringVar(&o.Endpoint, "alerting-server-endpoint", c.Endpoint,
|
||||
"alerting server endpoint for alerting v1.")
|
||||
|
||||
fs.StringVar(&o.PrometheusEndpoint, "alerting-prometheus-endpoint", c.PrometheusEndpoint,
|
||||
"Prometheus service endpoint from which built-in alerting rules are fetched(alerting v2alpha1 or higher required)")
|
||||
fs.StringVar(&o.ThanosRulerEndpoint, "alerting-thanos-ruler-endpoint", c.ThanosRulerEndpoint,
|
||||
"Thanos ruler service endpoint from which custom alerting rules are fetched(alerting v2alpha1 or higher required)")
|
||||
fs.StringVar(&o.ThanosRuleResourceLabels, "alerting-thanos-rule-resource-labels", c.ThanosRuleResourceLabels,
|
||||
"Labels used by Thanos Ruler to select PrometheusRule custom resources. eg: thanosruler=thanos-ruler,role=custom-alerting-rules (alerting v2alpha1 or higher required)")
|
||||
}
|
||||
@@ -1,182 +0,0 @@
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/prometheus/model/labels"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPrefix = "/api/v1"
|
||||
epRules = apiPrefix + "/rules"
|
||||
statusAPIError = 422
|
||||
|
||||
ErrBadData ErrorType = "bad_data"
|
||||
ErrTimeout ErrorType = "timeout"
|
||||
ErrCanceled ErrorType = "canceled"
|
||||
ErrExec ErrorType = "execution"
|
||||
ErrBadResponse ErrorType = "bad_response"
|
||||
ErrServer ErrorType = "server_error"
|
||||
ErrClient ErrorType = "client_error"
|
||||
)
|
||||
|
||||
type status string
|
||||
|
||||
type ErrorType string
|
||||
|
||||
type Error struct {
|
||||
Type ErrorType
|
||||
Msg string
|
||||
Detail string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
|
||||
}
|
||||
|
||||
type response struct {
|
||||
Status status `json:"status"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
ErrorType ErrorType `json:"errorType,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
}
|
||||
|
||||
type RuleClient interface {
|
||||
PrometheusRules(ctx context.Context) ([]*RuleGroup, error)
|
||||
ThanosRules(ctx context.Context, matchers ...[]*labels.Matcher) ([]*RuleGroup, error)
|
||||
}
|
||||
|
||||
type ruleClient struct {
|
||||
prometheus api.Client
|
||||
thanosruler api.Client
|
||||
}
|
||||
|
||||
func (c *ruleClient) PrometheusRules(ctx context.Context) ([]*RuleGroup, error) {
|
||||
if c.prometheus != nil {
|
||||
return c.rules(c.prometheus, ctx)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *ruleClient) ThanosRules(ctx context.Context, matchers ...[]*labels.Matcher) ([]*RuleGroup, error) {
|
||||
if c.thanosruler != nil {
|
||||
return c.rules(c.thanosruler, ctx, matchers...)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *ruleClient) rules(client api.Client, ctx context.Context, matchers ...[]*labels.Matcher) ([]*RuleGroup, error) {
|
||||
u := client.URL(epRules, nil)
|
||||
q := u.Query()
|
||||
q.Add("type", "alert")
|
||||
|
||||
for _, ms := range matchers {
|
||||
vs := parser.VectorSelector{
|
||||
LabelMatchers: ms,
|
||||
}
|
||||
q.Add("match[]", vs.String())
|
||||
}
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating request: ")
|
||||
}
|
||||
|
||||
resp, body, _, err := c.do(client, ctx, req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error doing request: ")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result struct {
|
||||
Groups []*RuleGroup
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "")
|
||||
}
|
||||
return result.Groups, nil
|
||||
}
|
||||
|
||||
func (c *ruleClient) do(client api.Client, ctx context.Context, req *http.Request) (*http.Response, []byte, []string, error) {
|
||||
resp, body, e := client.Do(ctx, req)
|
||||
if e != nil {
|
||||
return resp, body, nil, e
|
||||
}
|
||||
|
||||
code := resp.StatusCode
|
||||
|
||||
if code/100 != 2 && !apiError(code) {
|
||||
errorType, errorMsg := errorTypeAndMsgFor(resp)
|
||||
return resp, body, nil, &Error{
|
||||
Type: errorType,
|
||||
Msg: errorMsg,
|
||||
Detail: string(body),
|
||||
}
|
||||
}
|
||||
|
||||
var result response
|
||||
if http.StatusNoContent != code {
|
||||
if jsonErr := json.Unmarshal(body, &result); jsonErr != nil {
|
||||
return resp, body, nil, &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: jsonErr.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
if apiError(code) && result.Status == "success" {
|
||||
err = &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: "inconsistent body for response code",
|
||||
}
|
||||
}
|
||||
if result.Status == "error" {
|
||||
err = &Error{
|
||||
Type: result.ErrorType,
|
||||
Msg: result.Error,
|
||||
}
|
||||
}
|
||||
|
||||
return resp, []byte(result.Data), result.Warnings, err
|
||||
}
|
||||
|
||||
func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) {
|
||||
switch resp.StatusCode / 100 {
|
||||
case 4:
|
||||
return ErrClient, fmt.Sprintf("client error: %d", resp.StatusCode)
|
||||
case 5:
|
||||
return ErrServer, fmt.Sprintf("server error: %d", resp.StatusCode)
|
||||
}
|
||||
return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
func apiError(code int) bool {
|
||||
// These are the codes that rule server sends when it returns an error.
|
||||
return code == statusAPIError || code == http.StatusBadRequest ||
|
||||
code == http.StatusServiceUnavailable || code == http.StatusInternalServerError
|
||||
}
|
||||
|
||||
func NewRuleClient(options *Options) (RuleClient, error) {
|
||||
var (
|
||||
c ruleClient
|
||||
e error
|
||||
)
|
||||
if options.PrometheusEndpoint != "" {
|
||||
c.prometheus, e = api.NewClient(api.Config{Address: options.PrometheusEndpoint})
|
||||
}
|
||||
if options.ThanosRulerEndpoint != "" {
|
||||
c.thanosruler, e = api.NewClient(api.Config{Address: options.ThanosRulerEndpoint})
|
||||
}
|
||||
return &c, e
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestListRules(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
fakeCode int
|
||||
fakeResp string
|
||||
expectError bool
|
||||
}{{
|
||||
description: "list alerting rules from prometheus endpoint",
|
||||
expectError: false,
|
||||
fakeCode: 200,
|
||||
fakeResp: `
|
||||
{
|
||||
"status": "success",
|
||||
"data": {
|
||||
"groups": [
|
||||
{
|
||||
"name": "kubernetes-resources",
|
||||
"file": "/etc/prometheus/rules/prometheus-k8s-rulefiles-0/kubesphere-monitoring-system-prometheus-k8s-rules.yaml",
|
||||
"rules": [
|
||||
{
|
||||
"state": "firing",
|
||||
"name": "KubeCPUOvercommit",
|
||||
"query": "sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum) / sum(kube_node_status_allocatable{resource='cpu'}) > (count(kube_node_status_allocatable{resource='cpu'}) - 1) / count(kube_node_status_allocatable{resource='cpu'})",
|
||||
"duration": 300,
|
||||
"labels": {
|
||||
"severity": "warning"
|
||||
},
|
||||
"annotations": {
|
||||
"message": "Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure.",
|
||||
"runbook_url": "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit"
|
||||
},
|
||||
"alerts": [
|
||||
{
|
||||
"labels": {
|
||||
"alertname": "KubeCPUOvercommit",
|
||||
"severity": "warning"
|
||||
},
|
||||
"annotations": {
|
||||
"message": "Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure.",
|
||||
"runbook_url": "https://github.com/ kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit"
|
||||
},
|
||||
"state": "firing",
|
||||
"activeAt": "2020-09-22T06:18:47.55260138Z",
|
||||
"value": "4.405e-01"
|
||||
}
|
||||
],
|
||||
"health": "ok",
|
||||
"evaluationTime": 0.000894038,
|
||||
"lastEvaluation": "2020-09-22T08:57:17.566233983Z",
|
||||
"type": "alerting"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`,
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
mock := MockService(epRules, test.fakeCode, test.fakeResp)
|
||||
defer mock.Close()
|
||||
c, e := NewRuleClient(&Options{PrometheusEndpoint: mock.URL})
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
}
|
||||
rgs, e := c.PrometheusRules(context.TODO())
|
||||
if test.expectError {
|
||||
} else {
|
||||
if e != nil {
|
||||
t.Fatal(e)
|
||||
} else if len(rgs) == 1 && len(rgs[0].Rules) == 1 {
|
||||
|
||||
} else {
|
||||
t.Fatalf("expect %d group and %d rule but got %d group and %d rule", 1, 1, len(rgs), len(rgs[0].Rules))
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func MockService(pattern string, fakeCode int, fakeResp string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(fakeCode)
|
||||
res.Write([]byte(fakeResp))
|
||||
})
|
||||
return httptest.NewServer(mux)
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package alerting
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type RuleGroup struct {
|
||||
Name string `json:"name"`
|
||||
File string `json:"file"`
|
||||
Rules []*AlertingRule `json:"rules"`
|
||||
Interval float64 `json:"interval"`
|
||||
EvaluationTime float64 `json:"evaluationTime"`
|
||||
LastEvaluation *time.Time `json:"lastEvaluation"`
|
||||
}
|
||||
|
||||
type AlertingRule struct {
|
||||
// State can be "pending", "firing", "inactive".
|
||||
State string `json:"state"`
|
||||
Name string `json:"name"`
|
||||
Query string `json:"query"`
|
||||
Duration float64 `json:"duration"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
Alerts []*Alert `json:"alerts"`
|
||||
// Health can be "ok", "err", "unknown".
|
||||
Health string `json:"health"`
|
||||
LastError string `json:"lastError,omitempty"`
|
||||
EvaluationTime *float64 `json:"evaluationTime"`
|
||||
LastEvaluation *time.Time `json:"lastEvaluation"`
|
||||
// Type of an alertingRule is always "alerting".
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type Alert struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
Annotations map[string]string `json:"annotations"`
|
||||
State string `json:"state"`
|
||||
ActiveAt *time.Time `json:"activeAt,omitempty"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
594
pkg/simple/client/application/helper.go
Normal file
594
pkg/simple/client/application/helper.go
Normal file
@@ -0,0 +1,594 @@
|
||||
package application
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/utils/helm"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
|
||||
pkgconstants "kubesphere.io/kubesphere/pkg/constants"
|
||||
|
||||
k8serr "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"helm.sh/helm/v3/pkg/kube"
|
||||
helmrelease "helm.sh/helm/v3/pkg/release"
|
||||
"helm.sh/helm/v3/pkg/storage"
|
||||
"helm.sh/helm/v3/pkg/storage/driver"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/golang/example/stringutil"
|
||||
"github.com/speps/go-hashids"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/idutils"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
|
||||
"helm.sh/helm/v3/pkg/getter"
|
||||
helmrepo "helm.sh/helm/v3/pkg/repo"
|
||||
|
||||
"kubesphere.io/utils/s3"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
appv2 "kubesphere.io/api/application/v2"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/scheme"
|
||||
)
|
||||
|
||||
type AppRequest struct {
|
||||
RepoName string `json:"repoName,omitempty"`
|
||||
AppName string `json:"appName,omitempty"`
|
||||
OriginalName string `json:"originalName,omitempty"`
|
||||
AliasName string `json:"aliasName,omitempty"`
|
||||
VersionName string `json:"versionName,omitempty"`
|
||||
AppHome string `json:"appHome,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Icon string `json:"icon,omitempty"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Workspace string `json:"workspace,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
CategoryName string `json:"categoryName,omitempty"`
|
||||
AppType string `json:"appType,omitempty"`
|
||||
Package []byte `json:"package,omitempty"`
|
||||
PullUrl string `json:"pullUrl,omitempty"`
|
||||
Credential appv2.RepoCredential `json:"credential,omitempty"`
|
||||
Maintainers []appv2.Maintainer `json:"maintainers,omitempty"`
|
||||
Abstraction string `json:"abstraction,omitempty"`
|
||||
Attachments []string `json:"attachments,omitempty"`
|
||||
FromRepo bool `json:"fromRepo,omitempty"`
|
||||
Resources []appv2.GroupVersionResource `json:"resources,omitempty"`
|
||||
}
|
||||
|
||||
func GetMaintainers(maintainers []*chart.Maintainer) []appv2.Maintainer {
|
||||
result := make([]appv2.Maintainer, len(maintainers))
|
||||
for i, maintainer := range maintainers {
|
||||
result[i] = appv2.Maintainer{
|
||||
Name: maintainer.Name,
|
||||
Email: maintainer.Email,
|
||||
URL: maintainer.URL,
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func CreateOrUpdateApp(client runtimeclient.Client, vRequests []AppRequest, cmStore, ossStore s3.Interface, owns ...metav1.OwnerReference) error {
|
||||
ctx := context.Background()
|
||||
if len(vRequests) == 0 {
|
||||
return errors.New("version request is empty")
|
||||
}
|
||||
request := vRequests[0]
|
||||
|
||||
app := appv2.Application{}
|
||||
app.Name = request.AppName
|
||||
|
||||
operationResult, err := controllerutil.CreateOrUpdate(ctx, client, &app, func() error {
|
||||
app.Spec = appv2.ApplicationSpec{
|
||||
Icon: request.Icon,
|
||||
AppHome: request.AppHome,
|
||||
AppType: request.AppType,
|
||||
Abstraction: request.Abstraction,
|
||||
Attachments: request.Attachments,
|
||||
}
|
||||
if len(owns) > 0 {
|
||||
app.OwnerReferences = owns
|
||||
}
|
||||
|
||||
labels := app.GetLabels()
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels[appv2.RepoIDLabelKey] = request.RepoName
|
||||
labels[appv2.AppTypeLabelKey] = request.AppType
|
||||
|
||||
if request.CategoryName != "" {
|
||||
labels[appv2.AppCategoryNameKey] = request.CategoryName
|
||||
} else {
|
||||
labels[appv2.AppCategoryNameKey] = appv2.UncategorizedCategoryID
|
||||
}
|
||||
|
||||
labels[constants.WorkspaceLabelKey] = request.Workspace
|
||||
app.SetLabels(labels)
|
||||
|
||||
ant := app.GetAnnotations()
|
||||
if ant == nil {
|
||||
ant = make(map[string]string)
|
||||
}
|
||||
ant[constants.DisplayNameAnnotationKey] = request.AliasName
|
||||
ant[constants.DescriptionAnnotationKey] = request.Description
|
||||
ant[appv2.AppOriginalNameLabelKey] = request.OriginalName
|
||||
|
||||
if len(request.Maintainers) > 0 {
|
||||
ant[appv2.AppMaintainersKey] = request.Maintainers[0].Name
|
||||
}
|
||||
app.SetAnnotations(ant)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("failed create or update app %s, err:%v", app.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if operationResult == controllerutil.OperationResultCreated {
|
||||
if request.FromRepo {
|
||||
app.Status.State = appv2.ReviewStatusActive
|
||||
} else {
|
||||
app.Status.State = appv2.ReviewStatusDraft
|
||||
}
|
||||
}
|
||||
|
||||
app.Status.UpdateTime = &metav1.Time{Time: time.Now()}
|
||||
patch, _ := json.Marshal(app)
|
||||
if err = client.Status().Patch(ctx, &app, runtimeclient.RawPatch(runtimeclient.Merge.Type(), patch)); err != nil {
|
||||
klog.Errorf("failed to update app status, err:%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vRequest := range vRequests {
|
||||
if err = CreateOrUpdateAppVersion(ctx, client, app, vRequest, cmStore, ossStore); err != nil {
|
||||
klog.Errorf("failed to create or update app version, err:%v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = UpdateLatestAppVersion(ctx, client, app)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update latest app version, err:%v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateOrUpdateAppVersion(ctx context.Context, client runtimeclient.Client, app appv2.Application, vRequest AppRequest, cmStore, ossStore s3.Interface) error {
|
||||
|
||||
//1. create or update app version
|
||||
appVersion := appv2.ApplicationVersion{}
|
||||
appVersion.Name = fmt.Sprintf("%s-%s", app.Name, vRequest.VersionName)
|
||||
|
||||
mutateFn := func() error {
|
||||
if err := controllerutil.SetControllerReference(&app, &appVersion, scheme.Scheme); err != nil {
|
||||
klog.Errorf("%s SetControllerReference failed, err:%v", appVersion.Name, err)
|
||||
return err
|
||||
}
|
||||
appVersion.Spec = appv2.ApplicationVersionSpec{
|
||||
VersionName: vRequest.VersionName,
|
||||
AppHome: vRequest.AppHome,
|
||||
Icon: vRequest.Icon,
|
||||
Created: &metav1.Time{Time: time.Now()},
|
||||
Digest: vRequest.Digest,
|
||||
AppType: vRequest.AppType,
|
||||
Maintainer: vRequest.Maintainers,
|
||||
PullUrl: vRequest.PullUrl,
|
||||
}
|
||||
appVersion.Finalizers = []string{appv2.StoreCleanFinalizer}
|
||||
|
||||
labels := appVersion.GetLabels()
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels[appv2.RepoIDLabelKey] = vRequest.RepoName
|
||||
labels[appv2.AppIDLabelKey] = vRequest.AppName
|
||||
labels[appv2.AppTypeLabelKey] = vRequest.AppType
|
||||
labels[constants.WorkspaceLabelKey] = vRequest.Workspace
|
||||
appVersion.SetLabels(labels)
|
||||
|
||||
ant := appVersion.GetAnnotations()
|
||||
if ant == nil {
|
||||
ant = make(map[string]string)
|
||||
}
|
||||
ant[constants.DisplayNameAnnotationKey] = vRequest.AliasName
|
||||
ant[constants.DescriptionAnnotationKey] = vRequest.Description
|
||||
if len(vRequest.Maintainers) > 0 {
|
||||
ant[appv2.AppMaintainersKey] = vRequest.Maintainers[0].Name
|
||||
}
|
||||
appVersion.SetAnnotations(ant)
|
||||
return nil
|
||||
}
|
||||
_, err := controllerutil.CreateOrUpdate(ctx, client, &appVersion, mutateFn)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("failed create or update app version %s, err:%v", appVersion.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if !vRequest.FromRepo {
|
||||
err = FailOverUpload(cmStore, ossStore, appVersion.Name, bytes.NewReader(vRequest.Package), len(vRequest.Package))
|
||||
if err != nil {
|
||||
klog.Errorf("upload package failed, error: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//3. update app version status
|
||||
if vRequest.FromRepo {
|
||||
appVersion.Status.State = appv2.ReviewStatusActive
|
||||
} else {
|
||||
appVersion.Status.State = appv2.ReviewStatusDraft
|
||||
}
|
||||
appVersion.Status.Updated = &metav1.Time{Time: time.Now()}
|
||||
patch, _ := json.Marshal(appVersion)
|
||||
if err = client.Status().Patch(ctx, &appVersion, runtimeclient.RawPatch(runtimeclient.Merge.Type(), patch)); err != nil {
|
||||
klog.Errorf("failed to update app version status, err:%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func UpdateLatestAppVersion(ctx context.Context, client runtimeclient.Client, app appv2.Application) (err error) {
|
||||
//4. update app latest version
|
||||
err = client.Get(ctx, runtimeclient.ObjectKey{Name: app.Name, Namespace: app.Namespace}, &app)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get app, err:%v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
appVersionList := appv2.ApplicationVersionList{}
|
||||
lbs := labels.SelectorFromSet(labels.Set{appv2.AppIDLabelKey: app.Name})
|
||||
opt := runtimeclient.ListOptions{LabelSelector: lbs}
|
||||
err = client.List(ctx, &appVersionList, &opt)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list app version, err:%v", err)
|
||||
return err
|
||||
}
|
||||
if len(appVersionList.Items) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
latestAppVersion := appVersionList.Items[0].Spec.VersionName
|
||||
for _, v := range appVersionList.Items {
|
||||
parsedVersion, err := semver.Make(strings.TrimPrefix(v.Spec.VersionName, "v"))
|
||||
if err != nil {
|
||||
klog.Warningf("failed to parse version: %s, use first version %s", v.Spec.VersionName, latestAppVersion)
|
||||
continue
|
||||
}
|
||||
if parsedVersion.GT(semver.MustParse(strings.TrimPrefix(latestAppVersion, "v"))) {
|
||||
latestAppVersion = v.Spec.VersionName
|
||||
}
|
||||
}
|
||||
|
||||
ant := app.GetAnnotations()
|
||||
ant[appv2.LatestAppVersionKey] = latestAppVersion
|
||||
app.SetAnnotations(ant)
|
||||
err = client.Update(ctx, &app)
|
||||
return err
|
||||
}
|
||||
|
||||
func HelmPull(u string, cred appv2.RepoCredential) (*bytes.Buffer, error) {
|
||||
parsedURL, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *bytes.Buffer
|
||||
|
||||
skipTLS := true
|
||||
if cred.InsecureSkipTLSVerify != nil && !*cred.InsecureSkipTLSVerify {
|
||||
skipTLS = false
|
||||
}
|
||||
|
||||
indexURL := parsedURL.String()
|
||||
g, _ := getter.NewHTTPGetter()
|
||||
options := []getter.Option{
|
||||
getter.WithTimeout(5 * time.Minute),
|
||||
getter.WithURL(u),
|
||||
getter.WithInsecureSkipVerifyTLS(skipTLS),
|
||||
getter.WithTLSClientConfig(cred.CertFile, cred.KeyFile, cred.CAFile),
|
||||
getter.WithBasicAuth(cred.Username, cred.Password)}
|
||||
|
||||
if skipTLS {
|
||||
options = append(options, getter.WithTransport(
|
||||
&http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}},
|
||||
))
|
||||
}
|
||||
|
||||
resp, err = g.Get(indexURL, options...)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func LoadRepoIndex(u string, cred appv2.RepoCredential) (idx helmrepo.IndexFile, err error) {
|
||||
if !strings.HasSuffix(u, "/") {
|
||||
u = fmt.Sprintf("%s/index.yaml", u)
|
||||
} else {
|
||||
u = fmt.Sprintf("%sindex.yaml", u)
|
||||
}
|
||||
|
||||
resp, err := HelmPull(u, cred)
|
||||
if err != nil {
|
||||
return idx, err
|
||||
}
|
||||
if err = yaml.Unmarshal(resp.Bytes(), &idx); err != nil {
|
||||
return idx, err
|
||||
}
|
||||
idx.SortEntries()
|
||||
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
func ReadYaml(data []byte) (jsonList []json.RawMessage, err error) {
|
||||
reader := bytes.NewReader(data)
|
||||
bufReader := bufio.NewReader(reader)
|
||||
r := yaml.NewYAMLReader(bufReader)
|
||||
for {
|
||||
d, err := r.Read()
|
||||
if err != nil && err == io.EOF {
|
||||
break
|
||||
}
|
||||
jsonData, err := yaml.ToJSON(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, _, err = Decode(jsonData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jsonList = append(jsonList, jsonData)
|
||||
}
|
||||
return jsonList, nil
|
||||
}
|
||||
|
||||
func Decode(data []byte) (obj runtime.Object, gvk *schema.GroupVersionKind, err error) {
|
||||
decoder := unstructured.UnstructuredJSONScheme
|
||||
obj, gvk, err = decoder.Decode(data, nil, nil)
|
||||
return obj, gvk, err
|
||||
}
|
||||
|
||||
func UpdateHelmStatus(kubeConfig []byte, release *helmrelease.Release) (deployed bool, err error) {
|
||||
config, err := clientcmd.RESTConfigFromKubeConfig(kubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get rest config, err:%v", err)
|
||||
return deployed, err
|
||||
}
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get kubernetes client, err:%v", err)
|
||||
return deployed, err
|
||||
}
|
||||
|
||||
actionConfig := new(action.Configuration)
|
||||
store := storage.Init(driver.NewSecrets(clientSet.CoreV1().Secrets(release.Namespace)))
|
||||
actionConfig.Releases = store
|
||||
|
||||
deployed, err = checkReady(release, clientSet, kubeConfig)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to check helm ready, err:%v", err)
|
||||
return deployed, err
|
||||
}
|
||||
if !deployed {
|
||||
klog.Infof("helm release %s not ready", release.Name)
|
||||
return deployed, nil
|
||||
}
|
||||
|
||||
klog.Infof("helm release %s now ready", release.Name)
|
||||
release.SetStatus("deployed", "Successfully deployed")
|
||||
|
||||
if err = actionConfig.Releases.Update(release); err != nil {
|
||||
klog.Errorf("failed to update release: %v", err)
|
||||
return deployed, err
|
||||
}
|
||||
klog.Infof("update release %s status successfully", release.Name)
|
||||
return true, err
|
||||
}
|
||||
|
||||
func checkReady(release *helmrelease.Release, clientSet *kubernetes.Clientset, kubeConfig []byte) (allReady bool, err error) {
|
||||
|
||||
checker := kube.NewReadyChecker(clientSet, nil, kube.PausedAsReady(true), kube.CheckJobs(true))
|
||||
|
||||
helmConf, err := helm.InitHelmConf(kubeConfig, release.Namespace)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to init helm conf, err:%v", err)
|
||||
return allReady, err
|
||||
}
|
||||
|
||||
allResources := make([]*resource.Info, 0)
|
||||
for _, i := range release.Hooks {
|
||||
hookResources, err := helmConf.KubeClient.Build(bytes.NewBufferString(i.Manifest), false)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get helm hookResources, err:%v", err)
|
||||
return allReady, err
|
||||
}
|
||||
allResources = append(allResources, hookResources...)
|
||||
}
|
||||
klog.Infof("%s get helm hookResources %d", release.Name, len(allResources))
|
||||
|
||||
chartResources, err := helmConf.KubeClient.Build(bytes.NewBufferString(release.Manifest), false)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get helm resources, err:%v", err)
|
||||
return allReady, err
|
||||
}
|
||||
allResources = append(allResources, chartResources...)
|
||||
klog.Infof("%s get helm chartResources %d", release.Name, len(chartResources))
|
||||
|
||||
for idx, j := range allResources {
|
||||
kind := j.Object.GetObjectKind().GroupVersionKind().Kind
|
||||
klog.Infof("[%d/%d] check helm release %s %s: %s/%s", idx+1, len(allResources),
|
||||
release.Name, kind, j.Namespace, j.Name)
|
||||
ready, err := checker.IsReady(context.Background(), j)
|
||||
if k8serr.IsNotFound(err) {
|
||||
//pre-job-->chart-resource-->post-job
|
||||
//If a certain step times out, the subsequent steps will not be created,
|
||||
//and the status is considered failed, no repair will be made.
|
||||
klog.Warningf("[%d/%d] helm release %s resource %s: %s/%s not found", idx+1, len(allResources), release.Name, kind, j.Namespace, j.Name)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("failed to check resource ready, err:%v", err)
|
||||
return allReady, err
|
||||
}
|
||||
if !ready {
|
||||
klog.Infof("[%d/%d] helm release %s resource %s: %s/%s not ready", idx+1, len(allResources), release.Name, kind, j.Namespace, j.Name)
|
||||
return allReady, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func GvkToGvr(gvk *schema.GroupVersionKind, mapper meta.RESTMapper) (schema.GroupVersionResource, error) {
|
||||
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
|
||||
if meta.IsNoMatchError(err) || err != nil {
|
||||
return schema.GroupVersionResource{}, err
|
||||
}
|
||||
return mapping.Resource, nil
|
||||
}
|
||||
func GetInfoFromBytes(bytes json.RawMessage, mapper meta.RESTMapper) (gvr schema.GroupVersionResource, utd *unstructured.Unstructured, err error) {
|
||||
obj, gvk, err := Decode(bytes)
|
||||
if err != nil {
|
||||
return gvr, utd, err
|
||||
}
|
||||
gvr, err = GvkToGvr(gvk, mapper)
|
||||
if err != nil {
|
||||
return gvr, utd, err
|
||||
}
|
||||
utd, err = ConvertToUnstructured(obj)
|
||||
return gvr, utd, err
|
||||
}
|
||||
func ConvertToUnstructured(obj any) (*unstructured.Unstructured, error) {
|
||||
objMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
|
||||
return &unstructured.Unstructured{Object: objMap}, err
|
||||
}
|
||||
|
||||
func ComplianceCheck(values, tempLate []byte, mapper meta.RESTMapper, ns string) (result []json.RawMessage, err error) {
|
||||
yamlList, err := ReadYaml(values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
yamlTempList, err := ReadYaml(tempLate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(yamlTempList) != len(yamlList) {
|
||||
return nil, errors.New("yamlList and yamlTempList length not equal")
|
||||
}
|
||||
for idx := range yamlTempList {
|
||||
_, utd, err := GetInfoFromBytes(yamlList[idx], mapper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, utdTemp, err := GetInfoFromBytes(yamlTempList[idx], mapper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if utdTemp.GetKind() != utd.GetKind() || utdTemp.GetAPIVersion() != utd.GetAPIVersion() {
|
||||
return nil, errors.New("yamlList and yamlTempList not equal")
|
||||
}
|
||||
if utd.GetNamespace() != ns {
|
||||
return nil, errors.New("subresource must have same namespace with app release")
|
||||
}
|
||||
}
|
||||
return yamlList, nil
|
||||
}
|
||||
|
||||
func GetUuid36(prefix string) string {
|
||||
id := idutils.GetIntId()
|
||||
hd := hashids.NewData()
|
||||
hd.Alphabet = idutils.Alphabet36
|
||||
h, err := hashids.NewWithData(hd)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
i, err := h.Encode([]int{int(id)})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//hashids.minAlphabetLength = 16
|
||||
add := stringutil.Reverse(i)[:5]
|
||||
|
||||
return prefix + add
|
||||
}
|
||||
|
||||
func GenerateShortNameMD5Hash(input string) string {
|
||||
input = strings.ToLower(input)
|
||||
errs := validation.IsDNS1123Subdomain(input)
|
||||
if len(input) > 14 || len(errs) != 0 {
|
||||
hash := md5.New()
|
||||
hash.Write([]byte(input))
|
||||
hashInBytes := hash.Sum(nil)
|
||||
hashString := hex.EncodeToString(hashInBytes)
|
||||
return hashString[:10]
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
func FormatVersion(input string) string {
|
||||
re := regexp.MustCompile(`[^a-z0-9-.]`)
|
||||
errs := validation.IsDNS1123Subdomain(input)
|
||||
if len(errs) != 0 {
|
||||
klog.Warningf("Version %s does not meet the Kubernetes naming standard, replacing invalid characters with '-'", input)
|
||||
input = re.ReplaceAllStringFunc(input, func(s string) string {
|
||||
return "-"
|
||||
})
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
func GetHelmKubeConfig(ctx context.Context, cluster *clusterv1alpha1.Cluster, runClient client.Client) (config []byte, err error) {
|
||||
|
||||
if cluster.Spec.Connection.Type == clusterv1alpha1.ConnectionTypeProxy {
|
||||
klog.Infof("cluster %s is proxy cluster", cluster.Name)
|
||||
secret := &corev1.Secret{}
|
||||
key := types.NamespacedName{Namespace: pkgconstants.KubeSphereNamespace, Name: "kubeconfig-admin"}
|
||||
err = runClient.Get(ctx, key, secret)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get kubeconfig-admin secret: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
config = secret.Data["config"]
|
||||
return config, err
|
||||
}
|
||||
return cluster.Spec.Connection.KubeConfig, nil
|
||||
}
|
||||
171
pkg/simple/client/application/store.go
Normal file
171
pkg/simple/client/application/store.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
s3lib "github.com/aws/aws-sdk-go/service/s3"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
appv2 "kubesphere.io/api/application/v2"
|
||||
"kubesphere.io/utils/s3"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
runtimeclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type CmStore struct {
|
||||
Client runtimeclient.Client
|
||||
}
|
||||
|
||||
var _ s3.Interface = CmStore{}
|
||||
|
||||
func InitStore(s3opts *s3.Options, Client client.Client) (cmStore, ossStore s3.Interface, err error) {
|
||||
if s3opts != nil && len(s3opts.Endpoint) != 0 {
|
||||
klog.Infof("init s3 client with endpoint: %s", s3opts.Endpoint)
|
||||
var err error
|
||||
ossStore, err = s3.NewS3Client(s3opts)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create s3 client: %v", err)
|
||||
}
|
||||
}
|
||||
klog.Infof("init configmap store")
|
||||
cmStore = CmStore{
|
||||
Client: Client,
|
||||
}
|
||||
return cmStore, ossStore, nil
|
||||
}
|
||||
|
||||
func (c CmStore) Read(key string) ([]byte, error) {
|
||||
cm := corev1.ConfigMap{}
|
||||
nameKey := runtimeclient.ObjectKey{Name: key, Namespace: appv2.ApplicationNamespace}
|
||||
err := c.Client.Get(context.TODO(), nameKey, &cm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cm.BinaryData[appv2.BinaryKey], nil
|
||||
}
|
||||
|
||||
func (c CmStore) Upload(key, fileName string, body io.Reader, size int) error {
|
||||
data, _ := io.ReadAll(body)
|
||||
obj := corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: key,
|
||||
Namespace: appv2.ApplicationNamespace,
|
||||
},
|
||||
BinaryData: map[string][]byte{appv2.BinaryKey: data},
|
||||
}
|
||||
err := c.Client.Create(context.TODO(), &obj)
|
||||
//ignore already exists error
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
klog.Warningf("save to store ignore already exists %s", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c CmStore) Delete(ids []string) error {
|
||||
for _, id := range ids {
|
||||
obj := corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: id,
|
||||
Namespace: appv2.ApplicationNamespace,
|
||||
},
|
||||
}
|
||||
err := c.Client.Delete(context.TODO(), &obj)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func FailOverGet(cm, oss s3.Interface, key string, cli client.Client, isApp bool) (data []byte, err error) {
|
||||
|
||||
if isApp {
|
||||
fromRepo, pullUrl, repoName, err := FromRepo(cli, key)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get app version, err: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if fromRepo {
|
||||
return DownLoadChart(cli, pullUrl, repoName)
|
||||
}
|
||||
}
|
||||
|
||||
if oss == nil {
|
||||
klog.Infof("read from configMap %s", key)
|
||||
return cm.Read(key)
|
||||
}
|
||||
klog.Infof("read from oss %s", key)
|
||||
data, err = oss.Read(key)
|
||||
if err != nil {
|
||||
var aerr awserr.Error
|
||||
if errors.As(err, &aerr) && aerr.Code() == s3lib.ErrCodeNoSuchKey {
|
||||
klog.Infof("FailOver read from configMap %s", key)
|
||||
return cm.Read(key)
|
||||
}
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
func FromRepo(cli runtimeclient.Client, key string) (fromRepo bool, url, repoName string, err error) {
|
||||
|
||||
appVersion := appv2.ApplicationVersion{}
|
||||
err = cli.Get(context.TODO(), client.ObjectKey{Name: key}, &appVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get app version, err: %v", err)
|
||||
return fromRepo, url, repoName, err
|
||||
}
|
||||
if appVersion.Spec.PullUrl != "" {
|
||||
klog.Infof("load chart from pull url: %s", appVersion.Spec.PullUrl)
|
||||
} else {
|
||||
klog.Infof("load chart from local store")
|
||||
}
|
||||
fromRepo = appVersion.GetLabels()[appv2.RepoIDLabelKey] != appv2.UploadRepoKey
|
||||
repoName = appVersion.GetLabels()[appv2.RepoIDLabelKey]
|
||||
return fromRepo, appVersion.Spec.PullUrl, repoName, nil
|
||||
}
|
||||
|
||||
func DownLoadChart(cli runtimeclient.Client, pullUrl, repoName string) (data []byte, err error) {
|
||||
|
||||
repo := appv2.Repo{}
|
||||
err = cli.Get(context.TODO(), client.ObjectKey{Name: repoName}, &repo)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get app repo, err: %v", err)
|
||||
return data, err
|
||||
}
|
||||
buf, err := HelmPull(pullUrl, repo.Spec.Credential)
|
||||
if err != nil {
|
||||
klog.Errorf("load chart failed, error: %s", err)
|
||||
return data, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func FailOverUpload(cm, oss s3.Interface, key string, body io.Reader, size int) error {
|
||||
if oss == nil {
|
||||
klog.Infof("upload to cm %s", key)
|
||||
return cm.Upload(key, key, body, size)
|
||||
}
|
||||
klog.Infof("upload to oss %s", key)
|
||||
return oss.Upload(key, key, body, size)
|
||||
}
|
||||
func FailOverDelete(cm, oss s3.Interface, key []string) error {
|
||||
if oss == nil {
|
||||
klog.Infof("delete from cm %v", key)
|
||||
return cm.Delete(key)
|
||||
}
|
||||
klog.Infof("delete from oss %v", key)
|
||||
return oss.Delete(key)
|
||||
}
|
||||
97
pkg/simple/client/application/yaml.go
Normal file
97
pkg/simple/client/application/yaml.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
helmrelease "helm.sh/helm/v3/pkg/release"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/klog/v2"
|
||||
"kubesphere.io/utils/helm"
|
||||
)
|
||||
|
||||
var _ helm.Executor = &YamlInstaller{}
|
||||
|
||||
type YamlInstaller struct {
|
||||
Mapper meta.RESTMapper
|
||||
DynamicCli *dynamic.DynamicClient
|
||||
GvrListInfo []InsInfo
|
||||
Namespace string
|
||||
}
|
||||
type InsInfo struct {
|
||||
schema.GroupVersionResource
|
||||
Name string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (t YamlInstaller) Install(ctx context.Context, release, chart string, values []byte, options ...helm.HelmOption) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (t YamlInstaller) Upgrade(ctx context.Context, release, chart string, values []byte, options ...helm.HelmOption) (string, error) {
|
||||
yamlList, err := ReadYaml(values)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
klog.Infof("attempting to apply %d yaml files", len(yamlList))
|
||||
|
||||
err = t.ForApply(yamlList)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (t YamlInstaller) Uninstall(ctx context.Context, release string, options ...helm.HelmOption) (string, error) {
|
||||
for _, i := range t.GvrListInfo {
|
||||
err := t.DynamicCli.Resource(i.GroupVersionResource).Namespace(i.Namespace).
|
||||
Delete(ctx, i.Name, metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (t YamlInstaller) ForceDelete(ctx context.Context, release string, options ...helm.HelmOption) error {
|
||||
_, err := t.Uninstall(ctx, release, options...)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t YamlInstaller) Get(ctx context.Context, releaseName string, options ...helm.HelmOption) (*helmrelease.Release, error) {
|
||||
rv := &helmrelease.Release{}
|
||||
rv.Info = &helmrelease.Info{Status: helmrelease.StatusDeployed}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (t YamlInstaller) WaitingForResourcesReady(ctx context.Context, release string, timeout time.Duration, options ...helm.HelmOption) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (t YamlInstaller) ForApply(tasks []json.RawMessage) (err error) {
|
||||
|
||||
for idx, js := range tasks {
|
||||
|
||||
gvr, utd, err := GetInfoFromBytes(js, t.Mapper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt := metav1.PatchOptions{FieldManager: "v1.FieldManager"}
|
||||
_, err = t.DynamicCli.Resource(gvr).
|
||||
Namespace(utd.GetNamespace()).
|
||||
Patch(context.TODO(), utd.GetName(), types.ApplyPatchType, js, opt)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
klog.Infof("[%d/%d] %s/%s applied", idx+1, len(tasks), gvr.Resource, utd.GetName())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/auditing"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/query"
|
||||
)
|
||||
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
|
||||
type client struct {
|
||||
c *es.Client
|
||||
}
|
||||
|
||||
func (c *client) SearchAuditingEvent(filter *auditing.Filter, from, size int64,
|
||||
sort string) (*auditing.Events, error) {
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(filter)).
|
||||
WithSort("RequestReceivedTimestamp", sort).
|
||||
WithFrom(from).
|
||||
WithSize(size)
|
||||
|
||||
resp, err := c.c.Search(b, filter.StartTime, filter.EndTime, false)
|
||||
if err != nil || resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
events := &auditing.Events{Total: c.c.GetTotalHitCount(resp.Total)}
|
||||
for _, hit := range resp.AllHits {
|
||||
events.Records = append(events.Records, hit.Source)
|
||||
}
|
||||
return events, nil
|
||||
}
|
||||
|
||||
func (c *client) CountOverTime(filter *auditing.Filter, interval string) (*auditing.Histogram, error) {
|
||||
|
||||
if interval == "" {
|
||||
interval = "15m"
|
||||
}
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(filter)).
|
||||
WithAggregations(query.NewAggregations().
|
||||
WithDateHistogramAggregation("RequestReceivedTimestamp", interval)).
|
||||
WithSize(0)
|
||||
|
||||
resp, err := c.c.Search(b, filter.StartTime, filter.EndTime, false)
|
||||
if err != nil || resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h := auditing.Histogram{Total: c.c.GetTotalHitCount(resp.Total)}
|
||||
for _, bucket := range resp.Buckets {
|
||||
h.Buckets = append(h.Buckets,
|
||||
auditing.Bucket{Time: bucket.Key, Count: bucket.Count})
|
||||
}
|
||||
return &h, nil
|
||||
}
|
||||
|
||||
func (c *client) StatisticsOnResources(filter *auditing.Filter) (*auditing.Statistics, error) {
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(filter)).
|
||||
WithAggregations(query.NewAggregations().
|
||||
WithCardinalityAggregation("AuditID.keyword")).
|
||||
WithSize(0)
|
||||
|
||||
resp, err := c.c.Search(b, filter.StartTime, filter.EndTime, false)
|
||||
if err != nil || resp == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &auditing.Statistics{
|
||||
Resources: resp.Value,
|
||||
Events: c.c.GetTotalHitCount(resp.Total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewClient(options *auditing.Options) (auditing.Client, error) {
|
||||
c := &client{}
|
||||
|
||||
var err error
|
||||
c.c, err = es.NewClient(options.Host, options.BasicAuth, options.Username, options.Password, options.IndexPrefix, options.Version)
|
||||
return c, err
|
||||
}
|
||||
|
||||
func parseToQueryPart(f *auditing.Filter) *query.Query {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var mini int32 = 1
|
||||
b := query.NewBool()
|
||||
|
||||
bi := query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for k, v := range f.ObjectRefNamespaceMap {
|
||||
bi.AppendShould(query.NewBool().
|
||||
AppendFilter(query.NewMatchPhrase("ObjectRef.Namespace.keyword", k)).
|
||||
AppendFilter(query.NewRange("RequestReceivedTimestamp").
|
||||
WithGTE(v)))
|
||||
}
|
||||
|
||||
for k, v := range f.WorkspaceMap {
|
||||
bi.AppendShould(query.NewBool().
|
||||
AppendFilter(query.NewMatchPhrase("Workspace.keyword", k)).
|
||||
AppendFilter(query.NewRange("RequestReceivedTimestamp").
|
||||
WithGTE(v)))
|
||||
}
|
||||
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("ObjectRef.Namespace.keyword", f.ObjectRefNamespaces)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
bi = query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, ns := range f.ObjectRefNamespaceFuzzy {
|
||||
bi.AppendShould(query.NewWildcard("ObjectRef.Namespace.keyword", fmt.Sprintf("*"+ns+"*")))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("Workspace.keyword", f.Workspaces)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
bi = query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, ws := range f.WorkspaceFuzzy {
|
||||
bi.AppendShould(query.NewWildcard("Workspace.keyword", fmt.Sprintf("*"+ws+"*")))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("ObjectRef.Name.keyword", f.ObjectRefNames)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
bi = query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, name := range f.ObjectRefNameFuzzy {
|
||||
bi.AppendShould(query.NewWildcard("ObjectRef.Name.keyword", fmt.Sprintf("*"+name+"*")))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("Verb.keyword", f.Verbs)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("Level.keyword", f.Levels)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
bi = query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, ip := range f.SourceIpFuzzy {
|
||||
bi.AppendShould(query.NewWildcard("SourceIPs.keyword", fmt.Sprintf("*"+ip+"*")))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("User.Username.keyword", f.Users)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
bi = query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, user := range f.UserFuzzy {
|
||||
bi.AppendShould(query.NewWildcard("User.Username.keyword", fmt.Sprintf("*"+user+"*")))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
bi = query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, group := range f.GroupFuzzy {
|
||||
bi.AppendShould(query.NewWildcard("User.Groups.keyword", fmt.Sprintf("*"+group+"*")))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("ObjectRef.Resource", f.ObjectRefResources)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("ObjectRef.Subresource", f.ObjectRefSubresources)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendShould(query.NewTerms("ResponseStatus.code", f.ResponseCodes)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("ResponseStatus.status.keyword", f.ResponseStatus)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
r := query.NewRange("RequestReceivedTimestamp")
|
||||
if !f.StartTime.IsZero() {
|
||||
r.WithGTE(f.StartTime)
|
||||
}
|
||||
if !f.EndTime.IsZero() {
|
||||
r.WithLTE(f.EndTime)
|
||||
}
|
||||
|
||||
b.AppendFilter(r)
|
||||
|
||||
return query.NewQuery().WithBool(b)
|
||||
}
|
||||
@@ -1,396 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/auditing"
|
||||
)
|
||||
|
||||
func MockElasticsearchService(pattern string, fakeCode int, fakeResp string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(fakeCode)
|
||||
_, _ = res.Write([]byte(fakeResp))
|
||||
})
|
||||
return httptest.NewServer(mux)
|
||||
}
|
||||
|
||||
func TestStatisticsOnResources(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
filter auditing.Filter
|
||||
fakeVersion string
|
||||
fakeCode int
|
||||
fakeResp string
|
||||
expected auditing.Statistics
|
||||
expectedError bool
|
||||
}{{
|
||||
description: "ES index exists",
|
||||
filter: auditing.Filter{},
|
||||
fakeVersion: "6",
|
||||
fakeCode: 200,
|
||||
fakeResp: `
|
||||
{
|
||||
"took": 16,
|
||||
"timed_out": false,
|
||||
"_shards": {
|
||||
"total": 1,
|
||||
"successful": 1,
|
||||
"skipped": 0,
|
||||
"failed": 0
|
||||
},
|
||||
"hits": {
|
||||
"total": 10000,
|
||||
"max_score": null,
|
||||
"hits": [
|
||||
|
||||
]
|
||||
},
|
||||
"aggregations": {
|
||||
"cardinality_aggregation": {
|
||||
"value": 100
|
||||
}
|
||||
}
|
||||
}
|
||||
`,
|
||||
expected: auditing.Statistics{
|
||||
Events: 10000,
|
||||
Resources: 100,
|
||||
},
|
||||
expectedError: false,
|
||||
}, {
|
||||
description: "ES index not exists",
|
||||
filter: auditing.Filter{},
|
||||
fakeVersion: "6",
|
||||
fakeCode: 404,
|
||||
fakeResp: `
|
||||
{
|
||||
"error": {
|
||||
"root_cause": [
|
||||
{
|
||||
"type": "index_not_found_exception",
|
||||
"reason": "no such index [events]",
|
||||
"resource.type": "index_or_alias",
|
||||
"resource.id": "events",
|
||||
"index_uuid": "_na_",
|
||||
"index": "events"
|
||||
}
|
||||
],
|
||||
"type": "index_not_found_exception",
|
||||
"reason": "no such index [events]",
|
||||
"resource.type": "index_or_alias",
|
||||
"resource.id": "events",
|
||||
"index_uuid": "_na_",
|
||||
"index": "events"
|
||||
},
|
||||
"status": 404
|
||||
}
|
||||
`,
|
||||
expectedError: true,
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
mes := MockElasticsearchService("/", test.fakeCode, test.fakeResp)
|
||||
defer mes.Close()
|
||||
|
||||
c, err := NewClient(&auditing.Options{Host: mes.URL, IndexPrefix: "ks-logstash-events", Version: test.fakeVersion})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
stats, err := c.StatisticsOnResources(&test.filter)
|
||||
|
||||
if test.expectedError {
|
||||
if err == nil {
|
||||
t.Fatalf("expected err like %s", test.fakeResp)
|
||||
} else if !strings.Contains(err.Error(), "index_not_found_exception") {
|
||||
t.Fatalf("err does not contain expected code: %d", test.fakeCode)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if diff := cmp.Diff(stats, &test.expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseToQueryPart(t *testing.T) {
|
||||
q := `
|
||||
{
|
||||
"query":{
|
||||
"bool":{
|
||||
"filter":[
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"bool":{
|
||||
"filter":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"ObjectRef.Namespace.keyword":"kubesphere-system"
|
||||
}
|
||||
},
|
||||
{
|
||||
"range":{
|
||||
"RequestReceivedTimestamp":{
|
||||
"gte":"2020-01-01T01:01:01.000000001Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"filter":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"Workspace.keyword":"system-workspace"
|
||||
}
|
||||
},
|
||||
{
|
||||
"range":{
|
||||
"RequestReceivedTimestamp":{
|
||||
"gte":"2020-01-01T01:01:01.000000001Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"ObjectRef.Name.keyword":"devops"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"wildcard":{
|
||||
"ObjectRef.Name.keyword":"*dev*"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"Verb.keyword":"create"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"Level.keyword":"Metadata"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"wildcard":{
|
||||
"SourceIPs.keyword":"*192.168*"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"User.Username.keyword":"system:serviceaccount:kubesphere-system:kubesphere"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"wildcard":{
|
||||
"User.Username.keyword":"*system*"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"wildcard":{
|
||||
"User.Groups.keyword":"*system*"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase_prefix":{
|
||||
"ObjectRef.Resource":"pods"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase_prefix":{
|
||||
"ObjectRef.Subresource":"exec"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"terms":{
|
||||
"ResponseStatus.code":[
|
||||
404
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"ResponseStatus.status.keyword":"Failure"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"range":{
|
||||
"RequestReceivedTimestamp":{
|
||||
"gte":"2019-12-01T01:01:01.000000001Z",
|
||||
"lte":"2020-01-01T01:01:01.000000001Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
nsCreateTime := time.Date(2020, time.Month(1), 1, 1, 1, 1, 1, time.UTC)
|
||||
startTime := nsCreateTime.AddDate(0, -1, 0)
|
||||
endTime := nsCreateTime.AddDate(0, 0, 0)
|
||||
|
||||
filter := &auditing.Filter{
|
||||
ObjectRefNamespaceMap: map[string]time.Time{
|
||||
"kubesphere-system": nsCreateTime,
|
||||
},
|
||||
WorkspaceMap: map[string]time.Time{
|
||||
"system-workspace": nsCreateTime,
|
||||
},
|
||||
ObjectRefNames: []string{"devops"},
|
||||
ObjectRefNameFuzzy: []string{"dev"},
|
||||
Levels: []string{"Metadata"},
|
||||
Verbs: []string{"create"},
|
||||
Users: []string{"system:serviceaccount:kubesphere-system:kubesphere"},
|
||||
UserFuzzy: []string{"system"},
|
||||
GroupFuzzy: []string{"system"},
|
||||
SourceIpFuzzy: []string{"192.168"},
|
||||
ObjectRefResources: []string{"pods"},
|
||||
ObjectRefSubresources: []string{"exec"},
|
||||
ResponseCodes: []int32{404},
|
||||
ResponseStatus: []string{"Failure"},
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
}
|
||||
|
||||
qp := parseToQueryPart(filter)
|
||||
bs, err := json.Marshal(qp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
queryPart := &map[string]interface{}{}
|
||||
if err := json.Unmarshal(bs, queryPart); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
expectedQueryPart := &map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(q), expectedQueryPart); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedQueryPart, queryPart)
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auditing
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
SearchAuditingEvent(filter *Filter, from, size int64, sort string) (*Events, error)
|
||||
CountOverTime(filter *Filter, interval string) (*Histogram, error)
|
||||
StatisticsOnResources(filter *Filter) (*Statistics, error)
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
ObjectRefNamespaceMap map[string]time.Time
|
||||
WorkspaceMap map[string]time.Time
|
||||
ObjectRefNamespaces []string
|
||||
ObjectRefNamespaceFuzzy []string
|
||||
Workspaces []string
|
||||
WorkspaceFuzzy []string
|
||||
ObjectRefNames []string
|
||||
ObjectRefNameFuzzy []string
|
||||
Levels []string
|
||||
Verbs []string
|
||||
Users []string
|
||||
UserFuzzy []string
|
||||
GroupFuzzy []string
|
||||
SourceIpFuzzy []string
|
||||
ObjectRefResources []string
|
||||
ObjectRefSubresources []string
|
||||
ResponseCodes []int32
|
||||
ResponseStatus []string
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
}
|
||||
|
||||
type Event map[string]interface{}
|
||||
|
||||
type Events struct {
|
||||
Total int64 `json:"total" description:"total number of matched results"`
|
||||
Records []interface{} `json:"records" description:"actual array of results"`
|
||||
}
|
||||
|
||||
type Histogram struct {
|
||||
Total int64 `json:"total" description:"total number of events"`
|
||||
Buckets []Bucket `json:"buckets" description:"actual array of histogram results"`
|
||||
}
|
||||
type Bucket struct {
|
||||
Time int64 `json:"time" description:"timestamp"`
|
||||
Count int64 `json:"count" description:"total number of events at intervals"`
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
Resources int64 `json:"resources" description:"total number of resources"`
|
||||
Events int64 `json:"events" description:"total number of events"`
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auditing
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Enable bool `json:"enable" yaml:"enable"`
|
||||
WebhookUrl string `json:"webhookUrl" yaml:"webhookUrl"`
|
||||
// The maximum concurrent senders which send auditing events to the auditing webhook.
|
||||
EventSendersNum int `json:"eventSendersNum" yaml:"eventSendersNum"`
|
||||
// The batch size of auditing events.
|
||||
EventBatchSize int `json:"eventBatchSize" yaml:"eventBatchSize"`
|
||||
// The batch interval of auditing events.
|
||||
EventBatchInterval time.Duration `json:"eventBatchInterval" yaml:"eventBatchInterval"`
|
||||
Host string `json:"host" yaml:"host"`
|
||||
BasicAuth bool `json:"basicAuth" yaml:"basicAuth"`
|
||||
Username string `json:"username" yaml:"username"`
|
||||
Password string `json:"password" yaml:"password"`
|
||||
IndexPrefix string `json:"indexPrefix,omitempty" yaml:"indexPrefix,omitempty"`
|
||||
Version string `json:"version" yaml:"version"`
|
||||
}
|
||||
|
||||
func NewAuditingOptions() *Options {
|
||||
return &Options{
|
||||
Host: "",
|
||||
IndexPrefix: "ks-logstash-auditing",
|
||||
Version: "",
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Options) ApplyTo(options *Options) {
|
||||
if s.Host != "" {
|
||||
reflectutils.Override(options, s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Options) Validate() []error {
|
||||
errs := make([]error, 0)
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.BoolVar(&s.Enable, "auditing-enabled", c.Enable, "Enable auditing component or not. ")
|
||||
|
||||
fs.StringVar(&s.WebhookUrl, "auditing-webhook-url", c.WebhookUrl, "Auditing wehook url")
|
||||
|
||||
fs.BoolVar(&s.BasicAuth, "auditing-elasticsearch-basicAuth", c.BasicAuth, ""+
|
||||
"Elasticsearch auditing service basic auth enabled. KubeSphere is using elastic as auditing store, "+
|
||||
"if it is set to true, KubeSphere will connect to ElasticSearch using provided username and password by "+
|
||||
"auditing-elasticsearch-username and auditing-elasticsearch-username. Otherwise, KubeSphere will "+
|
||||
"anonymously access the Elasticsearch.")
|
||||
|
||||
fs.StringVar(&s.Username, "auditing-elasticsearch-username", c.Username, ""+
|
||||
"ElasticSearch authentication username, only needed when auditing-elasticsearch-basicAuth is"+
|
||||
"set to true. ")
|
||||
|
||||
fs.StringVar(&s.Password, "auditing-elasticsearch-password", c.Password, ""+
|
||||
"ElasticSearch authentication password, only needed when auditing-elasticsearch-basicAuth is"+
|
||||
"set to true. ")
|
||||
|
||||
fs.IntVar(&s.EventSendersNum, "auditing-event-senders-num", c.EventSendersNum,
|
||||
"The maximum concurrent senders which send auditing events to the auditing webhook.")
|
||||
fs.IntVar(&s.EventBatchSize, "auditing-event-batch-size", c.EventBatchSize,
|
||||
"The batch size of auditing events.")
|
||||
fs.DurationVar(&s.EventBatchInterval, "auditing-event-batch-interval", c.EventBatchInterval,
|
||||
"The batch interval of auditing events.")
|
||||
|
||||
fs.StringVar(&s.Host, "auditing-elasticsearch-host", c.Host, ""+
|
||||
"Elasticsearch service host. KubeSphere is using elastic as auditing store, "+
|
||||
"if this filed left blank, KubeSphere will use kubernetes builtin event API instead, and"+
|
||||
" the following elastic search options will be ignored.")
|
||||
|
||||
fs.StringVar(&s.IndexPrefix, "auditing-index-prefix", c.IndexPrefix, ""+
|
||||
"Index name prefix. KubeSphere will retrieve auditing against indices matching the prefix.")
|
||||
|
||||
fs.StringVar(&s.Version, "auditing-elasticsearch-version", c.Version, ""+
|
||||
"Elasticsearch major version, e.g. 5/6/7, if left blank, will detect automatically."+
|
||||
"Currently, minimum supported version is 5.x")
|
||||
}
|
||||
16
pkg/simple/client/cache/cache.go
vendored
16
pkg/simple/client/cache/cache.go
vendored
@@ -1,19 +1,3 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
|
||||
78
pkg/simple/client/cache/inmemory_cache.go
vendored
78
pkg/simple/client/cache/inmemory_cache.go
vendored
@@ -1,24 +1,9 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/server/options"
|
||||
@@ -62,13 +47,25 @@ type InMemoryCacheOptions struct {
|
||||
|
||||
// imMemoryCache implements cache.Interface use memory objects, it should be used only for testing
|
||||
type inMemoryCache struct {
|
||||
store *threadSafeStore
|
||||
}
|
||||
|
||||
type threadSafeStore struct {
|
||||
store map[string]simpleObject
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
func newThreadSafeStore() *threadSafeStore {
|
||||
return &threadSafeStore{
|
||||
store: make(map[string]simpleObject),
|
||||
mutex: sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func NewInMemoryCache(options *InMemoryCacheOptions, stopCh <-chan struct{}) (Interface, error) {
|
||||
var cleanupPeriod time.Duration
|
||||
cache := &inMemoryCache{
|
||||
store: make(map[string]simpleObject),
|
||||
store: newThreadSafeStore(),
|
||||
}
|
||||
|
||||
if options == nil || options.CleanupPeriod == 0 {
|
||||
@@ -82,13 +79,42 @@ func NewInMemoryCache(options *InMemoryCacheOptions, stopCh <-chan struct{}) (In
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) cleanInvalidToken() {
|
||||
for k, v := range s.store {
|
||||
if v.IsExpired() {
|
||||
delete(s.store, k)
|
||||
for _, k := range s.store.Keys() {
|
||||
if v, ok := s.store.Get(k); ok && v.IsExpired() {
|
||||
s.store.Delete(k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *threadSafeStore) Delete(key string) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
delete(s.store, key)
|
||||
}
|
||||
|
||||
func (s *threadSafeStore) Get(key string) (simpleObject, bool) {
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
object, exist := s.store[key]
|
||||
return object, exist
|
||||
}
|
||||
|
||||
func (s *threadSafeStore) Set(key string, obj simpleObject) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
s.store[key] = obj
|
||||
}
|
||||
|
||||
func (s *threadSafeStore) Keys() []string {
|
||||
var keys []string
|
||||
s.mutex.RLock()
|
||||
defer s.mutex.RUnlock()
|
||||
for k := range s.store {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Keys(pattern string) ([]string, error) {
|
||||
// There is a little difference between go regexp and redis key pattern
|
||||
// In redis, * means any character, while in go . means match everything.
|
||||
@@ -99,7 +125,7 @@ func (s *inMemoryCache) Keys(pattern string) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for k := range s.store {
|
||||
for _, k := range s.store.Keys() {
|
||||
if re.MatchString(k) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@@ -119,19 +145,19 @@ func (s *inMemoryCache) Set(key string, value string, duration time.Duration) er
|
||||
sobject.neverExpire = true
|
||||
}
|
||||
|
||||
s.store[key] = sobject
|
||||
s.store.Set(key, sobject)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Del(keys ...string) error {
|
||||
for _, key := range keys {
|
||||
delete(s.store, key)
|
||||
s.store.Delete(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Get(key string) (string, error) {
|
||||
if sobject, ok := s.store[key]; ok {
|
||||
if sobject, ok := s.store.Get(key); ok {
|
||||
if sobject.neverExpire || time.Now().Before(sobject.expiredAt) {
|
||||
return sobject.value, nil
|
||||
}
|
||||
@@ -142,7 +168,7 @@ func (s *inMemoryCache) Get(key string) (string, error) {
|
||||
|
||||
func (s *inMemoryCache) Exists(keys ...string) (bool, error) {
|
||||
for _, key := range keys {
|
||||
if _, ok := s.store[key]; !ok {
|
||||
if _, ok := s.store.Get(key); !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@@ -166,7 +192,7 @@ func (s *inMemoryCache) Expire(key string, duration time.Duration) error {
|
||||
sobject.neverExpire = true
|
||||
}
|
||||
|
||||
s.store[key] = sobject
|
||||
s.store.Set(key, sobject)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
16
pkg/simple/client/cache/inmemory_cache_test.go
vendored
16
pkg/simple/client/cache/inmemory_cache_test.go
vendored
@@ -1,19 +1,3 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
|
||||
16
pkg/simple/client/cache/options.go
vendored
16
pkg/simple/client/cache/options.go
vendored
@@ -1,19 +1,3 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
|
||||
16
pkg/simple/client/cache/redis.go
vendored
16
pkg/simple/client/cache/redis.go
vendored
@@ -1,19 +1,3 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
approvers:
|
||||
- shaowenchen
|
||||
- linuxsuren
|
||||
|
||||
reviewers:
|
||||
- runzexia
|
||||
- soulseen
|
||||
- shaowenchen
|
||||
- linuxsuren
|
||||
|
||||
labels:
|
||||
- area/devops
|
||||
@@ -1,131 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
const (
|
||||
LastBuild = "lastBuild"
|
||||
LastCompletedBuild = "lastCompletedBuild"
|
||||
LastFailedBuild = "lastFailedBuild"
|
||||
LastStableBuild = "lastStableBuild"
|
||||
LastSuccessfulBuild = "lastSuccessfulBuild"
|
||||
LastUnstableBuild = "lastUnstableBuild"
|
||||
LastUnsuccessfulBuild = "lastUnsuccessfulBuild"
|
||||
FirstBuild = "firstBuild"
|
||||
)
|
||||
|
||||
type GeneralParameter struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
type Branch struct {
|
||||
SHA1 string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type BuildRevision struct {
|
||||
SHA1 string `json:"SHA1,omitempty"`
|
||||
Branch []Branch `json:"Branch,omitempty"`
|
||||
}
|
||||
|
||||
type Builds struct {
|
||||
BuildNumber int64 `json:"buildNumber"`
|
||||
BuildResult interface{} `json:"buildResult"`
|
||||
Marked BuildRevision `json:"marked"`
|
||||
Revision BuildRevision `json:"revision"`
|
||||
}
|
||||
|
||||
type Culprit struct {
|
||||
AbsoluteUrl string
|
||||
FullName string
|
||||
}
|
||||
|
||||
type GeneralAction struct {
|
||||
Parameters []GeneralParameter `json:"parameters,omitempty"`
|
||||
Causes []map[string]interface{} `json:"causes,omitempty"`
|
||||
BuildsByBranchName map[string]Builds `json:"buildsByBranchName,omitempty"`
|
||||
LastBuiltRevision *BuildRevision `json:"lastBuiltRevision,omitempty"`
|
||||
RemoteUrls []string `json:"remoteUrls,omitempty"`
|
||||
ScmName string `json:"scmName,omitempty"`
|
||||
Subdir interface{} `json:"subdir,omitempty"`
|
||||
ClassName string `json:"_class,omitempty"`
|
||||
SonarTaskId string `json:"ceTaskId,omitempty"`
|
||||
SonarServerUrl string `json:"serverUrl,omitempty"`
|
||||
SonarDashboardUrl string `json:"sonarqubeDashboardUrl,omitempty"`
|
||||
TotalCount int64 `json:",omitempty"`
|
||||
UrlName string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Build struct {
|
||||
Actions []GeneralAction
|
||||
Artifacts []struct {
|
||||
DisplayPath string `json:"displayPath"`
|
||||
FileName string `json:"fileName"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
} `json:"artifacts"`
|
||||
Building bool `json:"building"`
|
||||
BuiltOn string `json:"builtOn"`
|
||||
ChangeSet struct {
|
||||
Items []struct {
|
||||
AffectedPaths []string `json:"affectedPaths"`
|
||||
Author struct {
|
||||
AbsoluteUrl string `json:"absoluteUrl"`
|
||||
FullName string `json:"fullName"`
|
||||
} `json:"author"`
|
||||
Comment string `json:"comment"`
|
||||
CommitID string `json:"commitId"`
|
||||
Date string `json:"date"`
|
||||
ID string `json:"id"`
|
||||
Msg string `json:"msg"`
|
||||
Paths []struct {
|
||||
EditType string `json:"editType"`
|
||||
File string `json:"file"`
|
||||
} `json:"paths"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
} `json:"items"`
|
||||
Kind string `json:"kind"`
|
||||
Revisions []struct {
|
||||
Module string
|
||||
Revision int
|
||||
} `json:"revision"`
|
||||
} `json:"changeSet"`
|
||||
Culprits []Culprit `json:"culprits"`
|
||||
Description interface{} `json:"description"`
|
||||
Duration int64 `json:"duration"`
|
||||
EstimatedDuration int64 `json:"estimatedDuration"`
|
||||
Executor interface{} `json:"executor"`
|
||||
FullDisplayName string `json:"fullDisplayName"`
|
||||
ID string `json:"id"`
|
||||
KeepLog bool `json:"keepLog"`
|
||||
Number int64 `json:"number"`
|
||||
QueueID int64 `json:"queueId"`
|
||||
Result string `json:"result"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
URL string `json:"url"`
|
||||
Runs []struct {
|
||||
Number int64
|
||||
URL string
|
||||
} `json:"runs"`
|
||||
}
|
||||
|
||||
type BuildGetter interface {
|
||||
// GetProjectPipelineBuildByType get the last build of the pipeline, status can specify the status of the last build.
|
||||
GetProjectPipelineBuildByType(projectId, pipelineId string, status string) (*Build, error)
|
||||
|
||||
// GetMultiBranchPipelineBuildByType get the last build of the pipeline, status can specify the status of the last build.
|
||||
GetMultiBranchPipelineBuildByType(projectId, pipelineId, branch string, status string) (*Build, error)
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type Credential struct {
|
||||
Id string `json:"id" description:"Id of Credential, e.g. dockerhub-id"`
|
||||
Type string `json:"type" description:"Type of Credential, e.g. ssh/kubeconfig"`
|
||||
DisplayName string `json:"display_name,omitempty" description:"Credential's display name"`
|
||||
Fingerprint *struct {
|
||||
FileName string `json:"file_name,omitempty" description:"Credential's display name and description"`
|
||||
Hash string `json:"hash,omitempty" description:"Credential's hash"`
|
||||
Usage []*struct {
|
||||
Name string `json:"name,omitempty" description:"pipeline full name"`
|
||||
Ranges struct {
|
||||
Ranges []*struct {
|
||||
Start int `json:"start,omitempty" description:"Start build number"`
|
||||
End int `json:"end,omitempty" description:"End build number"`
|
||||
} `json:"ranges,omitempty"`
|
||||
} `json:"ranges,omitempty" description:"The build number of all pipelines that use this credential"`
|
||||
} `json:"usage,omitempty" description:"all usage of Credential"`
|
||||
} `json:"fingerprint,omitempty" description:"usage of the Credential"`
|
||||
Description string `json:"description,omitempty" description:"Credential's description'"`
|
||||
Domain string `json:"domain,omitempty" description:"Credential's domain,In ks we only use the default domain, default '_''"`
|
||||
}
|
||||
|
||||
type UsernamePasswordCredential struct {
|
||||
Username string `json:"username,omitempty" description:"username of username_password credential"`
|
||||
Password string `json:"password,omitempty" description:"password of username_password credential"`
|
||||
}
|
||||
|
||||
type SshCredential struct {
|
||||
Username string `json:"username,omitempty" description:"username of ssh credential"`
|
||||
Passphrase string `json:"passphrase,omitempty" description:"passphrase of ssh credential, password of ssh credential"`
|
||||
PrivateKey string `json:"private_key,omitempty" mapstructure:"private_key" description:"private key of ssh credential"`
|
||||
}
|
||||
|
||||
type SecretTextCredential struct {
|
||||
Secret string `json:"secret,omitempty" description:"secret content of credential"`
|
||||
}
|
||||
|
||||
type KubeconfigCredential struct {
|
||||
Content string `json:"content,omitempty" description:"content of kubeconfig"`
|
||||
}
|
||||
|
||||
type CredentialOperator interface {
|
||||
CreateCredentialInProject(projectId string, credential *v1.Secret) (string, error)
|
||||
|
||||
UpdateCredentialInProject(projectId string, credential *v1.Secret) (string, error)
|
||||
|
||||
GetCredentialInProject(projectId, id string) (*Credential, error)
|
||||
|
||||
DeleteCredentialInProject(projectId, id string) (string, error)
|
||||
}
|
||||
@@ -1,597 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
type Devops struct {
|
||||
Data map[string]interface{}
|
||||
|
||||
Projects map[string]interface{}
|
||||
|
||||
Pipelines map[string]map[string]*devopsv1alpha3.Pipeline
|
||||
|
||||
Credentials map[string]map[string]*v1.Secret
|
||||
}
|
||||
|
||||
func New(projects ...string) *Devops {
|
||||
d := &Devops{
|
||||
Data: nil,
|
||||
Projects: map[string]interface{}{},
|
||||
Pipelines: map[string]map[string]*devopsv1alpha3.Pipeline{},
|
||||
Credentials: map[string]map[string]*v1.Secret{},
|
||||
}
|
||||
for _, p := range projects {
|
||||
d.Projects[p] = true
|
||||
}
|
||||
return d
|
||||
}
|
||||
func NewWithPipelines(project string, pipelines ...*devopsv1alpha3.Pipeline) *Devops {
|
||||
d := &Devops{
|
||||
Data: nil,
|
||||
Projects: map[string]interface{}{},
|
||||
Pipelines: map[string]map[string]*devopsv1alpha3.Pipeline{},
|
||||
Credentials: map[string]map[string]*v1.Secret{},
|
||||
}
|
||||
|
||||
d.Projects[project] = true
|
||||
d.Pipelines[project] = map[string]*devopsv1alpha3.Pipeline{}
|
||||
for _, f := range pipelines {
|
||||
d.Pipelines[project][f.Name] = f
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func NewWithCredentials(project string, credentials ...*v1.Secret) *Devops {
|
||||
d := &Devops{
|
||||
Data: nil,
|
||||
Projects: map[string]interface{}{},
|
||||
Credentials: map[string]map[string]*v1.Secret{},
|
||||
}
|
||||
|
||||
d.Projects[project] = true
|
||||
d.Credentials[project] = map[string]*v1.Secret{}
|
||||
for _, f := range credentials {
|
||||
d.Credentials[project][f.Name] = f
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Devops) CreateDevOpsProject(projectId string) (string, error) {
|
||||
if _, ok := d.Projects[projectId]; ok {
|
||||
return projectId, nil
|
||||
}
|
||||
d.Projects[projectId] = true
|
||||
d.Pipelines[projectId] = map[string]*devopsv1alpha3.Pipeline{}
|
||||
d.Credentials[projectId] = map[string]*v1.Secret{}
|
||||
return projectId, nil
|
||||
}
|
||||
|
||||
func (d *Devops) DeleteDevOpsProject(projectId string) error {
|
||||
if _, ok := d.Projects[projectId]; ok {
|
||||
delete(d.Projects, projectId)
|
||||
delete(d.Pipelines, projectId)
|
||||
delete(d.Credentials, projectId)
|
||||
return nil
|
||||
} else {
|
||||
return &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Devops) GetDevOpsProject(projectId string) (string, error) {
|
||||
if _, ok := d.Projects[projectId]; ok {
|
||||
return projectId, nil
|
||||
} else {
|
||||
return "", &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
Request: &http.Request{
|
||||
Method: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "",
|
||||
Opaque: "",
|
||||
User: nil,
|
||||
Host: "",
|
||||
Path: "",
|
||||
RawPath: "",
|
||||
ForceQuery: false,
|
||||
RawQuery: "",
|
||||
Fragment: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewFakeDevops(data map[string]interface{}) *Devops {
|
||||
var fakeData Devops
|
||||
fakeData.Data = data
|
||||
return &fakeData
|
||||
}
|
||||
|
||||
// Pipelinne operator interface
|
||||
func (d *Devops) GetPipeline(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.Pipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *Devops) ListPipelines(httpParameters *devops.HttpParameters) (*devops.PipelineList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetPipelineRun(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) (*devops.PipelineRun, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) ListPipelineRuns(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.PipelineRunList, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) StopPipeline(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) (*devops.StopPipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) ReplayPipeline(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) (*devops.ReplayPipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) RunPipeline(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.RunPipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetArtifacts(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) ([]devops.Artifacts, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetRunLog(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetStepLog(projectName, pipelineName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (d *Devops) GetNodeSteps(projectName, pipelineName, runId, nodeId string, httpParameters *devops.HttpParameters) ([]devops.NodeSteps, error) {
|
||||
s := []string{projectName, pipelineName, runId, nodeId}
|
||||
key := strings.Join(s, "-")
|
||||
res := d.Data[key].([]devops.NodeSteps)
|
||||
return res, nil
|
||||
}
|
||||
func (d *Devops) GetPipelineRunNodes(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) ([]devops.PipelineRunNodes, error) {
|
||||
s := []string{projectName, pipelineName, runId}
|
||||
key := strings.Join(s, "-")
|
||||
res := d.Data[key].([]devops.PipelineRunNodes)
|
||||
return res, nil
|
||||
}
|
||||
func (d *Devops) SubmitInputStep(projectName, pipelineName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// BranchPipelinne operator interface
|
||||
func (d *Devops) GetBranchPipeline(projectName, pipelineName, branchName string, httpParameters *devops.HttpParameters) (*devops.BranchPipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetBranchPipelineRun(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) (*devops.PipelineRun, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) StopBranchPipeline(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) (*devops.StopPipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) ReplayBranchPipeline(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) (*devops.ReplayPipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) RunBranchPipeline(projectName, pipelineName, branchName string, httpParameters *devops.HttpParameters) (*devops.RunPipeline, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetBranchArtifacts(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) ([]devops.Artifacts, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetBranchRunLog(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetBranchStepLog(projectName, pipelineName, branchName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, http.Header, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (d *Devops) GetBranchNodeSteps(projectName, pipelineName, branchName, runId, nodeId string, httpParameters *devops.HttpParameters) ([]devops.NodeSteps, error) {
|
||||
s := []string{projectName, pipelineName, branchName, runId, nodeId}
|
||||
key := strings.Join(s, "-")
|
||||
res := d.Data[key].([]devops.NodeSteps)
|
||||
return res, nil
|
||||
}
|
||||
func (d *Devops) GetBranchPipelineRunNodes(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) ([]devops.BranchPipelineRunNodes, error) {
|
||||
s := []string{projectName, pipelineName, branchName, runId}
|
||||
key := strings.Join(s, "-")
|
||||
res := d.Data[key].([]devops.BranchPipelineRunNodes)
|
||||
return res, nil
|
||||
}
|
||||
func (d *Devops) SubmitBranchInputStep(projectName, pipelineName, branchName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetPipelineBranch(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.PipelineBranch, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) ScanBranch(projectName, pipelineName string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Common pipeline operator interface
|
||||
func (d *Devops) GetConsoleLog(projectName, pipelineName string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetCrumb(httpParameters *devops.HttpParameters) (*devops.Crumb, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// SCM operator interface
|
||||
func (d *Devops) GetSCMServers(scmId string, httpParameters *devops.HttpParameters) ([]devops.SCMServer, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetSCMOrg(scmId string, httpParameters *devops.HttpParameters) ([]devops.SCMOrg, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetOrgRepo(scmId, organizationId string, httpParameters *devops.HttpParameters) (devops.OrgRepo, error) {
|
||||
return devops.OrgRepo{}, nil
|
||||
}
|
||||
func (d *Devops) CreateSCMServers(scmId string, httpParameters *devops.HttpParameters) (*devops.SCMServer, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) Validate(scmId string, httpParameters *devops.HttpParameters) (*devops.Validates, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Webhook operator interface
|
||||
func (d *Devops) GetNotifyCommit(httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GithubWebhook(httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *Devops) CheckScriptCompile(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.CheckScript, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) CheckCron(projectName string, httpParameters *devops.HttpParameters) (*devops.CheckCronRes, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) ToJenkinsfile(httpParameters *devops.HttpParameters) (*devops.ResJenkinsfile, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) ToJson(httpParameters *devops.HttpParameters) (map[string]interface{}, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CredentialOperator
|
||||
func (d *Devops) CreateCredentialInProject(projectId string, credential *v1.Secret) (string, error) {
|
||||
if _, ok := d.Credentials[projectId][credential.Name]; ok {
|
||||
err := fmt.Errorf("credential name [%s] has been used", credential.Name)
|
||||
return "", restful.NewError(http.StatusConflict, err.Error())
|
||||
}
|
||||
d.Credentials[projectId][credential.Name] = credential
|
||||
return credential.Name, nil
|
||||
}
|
||||
func (d *Devops) UpdateCredentialInProject(projectId string, credential *v1.Secret) (string, error) {
|
||||
if _, ok := d.Credentials[projectId][credential.Name]; !ok {
|
||||
err := &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
Request: &http.Request{
|
||||
Method: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "",
|
||||
Opaque: "",
|
||||
User: nil,
|
||||
Host: "",
|
||||
Path: "",
|
||||
RawPath: "",
|
||||
ForceQuery: false,
|
||||
RawQuery: "",
|
||||
Fragment: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
d.Credentials[projectId][credential.Name] = credential
|
||||
return credential.Name, nil
|
||||
}
|
||||
|
||||
func (d *Devops) GetCredentialInProject(projectId, id string) (*devops.Credential, error) {
|
||||
if _, ok := d.Credentials[projectId][id]; !ok {
|
||||
err := &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
Request: &http.Request{
|
||||
Method: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "",
|
||||
Opaque: "",
|
||||
User: nil,
|
||||
Host: "",
|
||||
Path: "",
|
||||
RawPath: "",
|
||||
ForceQuery: false,
|
||||
RawQuery: "",
|
||||
Fragment: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return &devops.Credential{Id: id}, nil
|
||||
}
|
||||
func (d *Devops) GetCredentialsInProject(projectId string) ([]*devops.Credential, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) DeleteCredentialInProject(projectId, id string) (string, error) {
|
||||
if _, ok := d.Credentials[projectId][id]; !ok {
|
||||
err := &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
Request: &http.Request{
|
||||
Method: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "",
|
||||
Opaque: "",
|
||||
User: nil,
|
||||
Host: "",
|
||||
Path: "",
|
||||
RawPath: "",
|
||||
ForceQuery: false,
|
||||
RawQuery: "",
|
||||
Fragment: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
delete(d.Credentials[projectId], id)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// BuildGetter
|
||||
func (d *Devops) GetProjectPipelineBuildByType(projectId, pipelineId string, status string) (*devops.Build, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (d *Devops) GetMultiBranchPipelineBuildByType(projectId, pipelineId, branch string, status string) (*devops.Build, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// ProjectPipelineOperator
|
||||
func (d *Devops) CreateProjectPipeline(projectId string, pipeline *devopsv1alpha3.Pipeline) (string, error) {
|
||||
if _, ok := d.Pipelines[projectId][pipeline.Name]; ok {
|
||||
err := fmt.Errorf("pipeline name [%s] has been used", pipeline.Name)
|
||||
return "", restful.NewError(http.StatusConflict, err.Error())
|
||||
}
|
||||
d.Pipelines[projectId][pipeline.Name] = pipeline
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *Devops) DeleteProjectPipeline(projectId string, pipelineId string) (string, error) {
|
||||
if _, ok := d.Pipelines[projectId][pipelineId]; !ok {
|
||||
err := &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
Request: &http.Request{
|
||||
Method: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "",
|
||||
Opaque: "",
|
||||
User: nil,
|
||||
Host: "",
|
||||
Path: "",
|
||||
RawPath: "",
|
||||
ForceQuery: false,
|
||||
RawQuery: "",
|
||||
Fragment: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
delete(d.Pipelines[projectId], pipelineId)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *Devops) UpdateProjectPipeline(projectId string, pipeline *devopsv1alpha3.Pipeline) (string, error) {
|
||||
if _, ok := d.Pipelines[projectId][pipeline.Name]; !ok {
|
||||
err := &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
Request: &http.Request{
|
||||
Method: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "",
|
||||
Opaque: "",
|
||||
User: nil,
|
||||
Host: "",
|
||||
Path: "",
|
||||
RawPath: "",
|
||||
ForceQuery: false,
|
||||
RawQuery: "",
|
||||
Fragment: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
d.Pipelines[projectId][pipeline.Name] = pipeline
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (d *Devops) GetProjectPipelineConfig(projectId, pipelineId string) (*devopsv1alpha3.Pipeline, error) {
|
||||
if _, ok := d.Pipelines[projectId][pipelineId]; !ok {
|
||||
err := &devops.ErrorResponse{
|
||||
Body: []byte{},
|
||||
Response: &http.Response{
|
||||
Status: "404 Not Found",
|
||||
StatusCode: 404,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
ContentLength: 50,
|
||||
Header: http.Header{
|
||||
"Foo": []string{"Bar"},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("foo")), // shouldn't be used
|
||||
Request: &http.Request{
|
||||
Method: "",
|
||||
URL: &url.URL{
|
||||
Scheme: "",
|
||||
Opaque: "",
|
||||
User: nil,
|
||||
Host: "",
|
||||
Path: "",
|
||||
RawPath: "",
|
||||
ForceQuery: false,
|
||||
RawQuery: "",
|
||||
Fragment: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Message: "",
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return d.Pipelines[projectId][pipelineId], nil
|
||||
}
|
||||
|
||||
func (d *Devops) AddGlobalRole(roleName string, ids devops.GlobalPermissionIds, overwrite bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) AddProjectRole(roleName string, pattern string, ids devops.ProjectPermissionIds, overwrite bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) DeleteProjectRoles(roleName ...string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) AssignProjectRole(roleName string, sid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) UnAssignProjectRole(roleName string, sid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) AssignGlobalRole(roleName string, sid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) UnAssignGlobalRole(roleName string, sid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) DeleteUserInProject(sid string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Devops) GetGlobalRole(roleName string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/asaskevich/govalidator"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
CredentialOperator
|
||||
|
||||
BuildGetter
|
||||
|
||||
PipelineOperator
|
||||
|
||||
ProjectPipelineOperator
|
||||
|
||||
ProjectOperator
|
||||
|
||||
RoleOperator
|
||||
}
|
||||
|
||||
func GetDevOpsStatusCode(devopsErr error) int {
|
||||
if code, err := strconv.Atoi(devopsErr.Error()); err == nil {
|
||||
message := http.StatusText(code)
|
||||
if !govalidator.IsNull(message) {
|
||||
return code
|
||||
}
|
||||
}
|
||||
if jErr, ok := devopsErr.(*ErrorResponse); ok {
|
||||
return jErr.Response.StatusCode
|
||||
}
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Body []byte
|
||||
Response *http.Response
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *ErrorResponse) Error() string {
|
||||
u := fmt.Sprintf("%s://%s%s", e.Response.Request.URL.Scheme, e.Response.Request.URL.Host, e.Response.Request.URL.RequestURI())
|
||||
return fmt.Sprintf("%s %s: %d %s", e.Response.Request.Method, u, e.Response.StatusCode, e.Message)
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
# Jenkins API Client for Go
|
||||
|
||||
|
||||
## About
|
||||
|
||||
Jenkins is the most popular Open Source Continuous Integration system. This Library will help you interact with Jenkins in a more developer-friendly way.
|
||||
|
||||
Fork From https://github.com/bndr/gojenkins
|
||||
|
||||
These are some of the features that are currently implemented:
|
||||
|
||||
* Get information on test-results of completed/failed build
|
||||
* Ability to query Nodes, and manipulate them. Start, Stop, set Offline.
|
||||
* Ability to query Jobs, and manipulate them.
|
||||
* Get Plugins, Builds, Artifacts, Fingerprints
|
||||
* Validate Fingerprints of Artifacts
|
||||
* Get Current Queue, Cancel Tasks
|
||||
* etc. For all methods go to GoDoc Reference.
|
||||
|
||||
Add some features:
|
||||
|
||||
* Credentials Management
|
||||
* Pipeline Model Converter
|
||||
* RBAC control
|
||||
@@ -1,425 +0,0 @@
|
||||
// Copyright 2015 Vadim Kravcenko
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
const (
|
||||
Git = "git"
|
||||
Hg = "hg"
|
||||
Svn = "svc"
|
||||
)
|
||||
|
||||
type Build struct {
|
||||
Raw *devops.Build
|
||||
Job *Job
|
||||
Jenkins *Jenkins
|
||||
Base string
|
||||
Depth int
|
||||
}
|
||||
|
||||
type Parameter struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
type Branch struct {
|
||||
SHA1 string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type BuildRevision struct {
|
||||
SHA1 string `json:"SHA1,omitempty"`
|
||||
Branch []Branch `json:"Branch,omitempty"`
|
||||
}
|
||||
|
||||
type Builds struct {
|
||||
BuildNumber int64 `json:"buildNumber"`
|
||||
BuildResult interface{} `json:"buildResult"`
|
||||
Marked BuildRevision `json:"marked"`
|
||||
Revision BuildRevision `json:"revision"`
|
||||
}
|
||||
|
||||
type Culprit struct {
|
||||
AbsoluteUrl string
|
||||
FullName string
|
||||
}
|
||||
|
||||
type GeneralObj struct {
|
||||
Parameters []Parameter `json:"parameters,omitempty"`
|
||||
Causes []map[string]interface{} `json:"causes,omitempty"`
|
||||
BuildsByBranchName map[string]Builds `json:"buildsByBranchName,omitempty"`
|
||||
LastBuiltRevision *BuildRevision `json:"lastBuiltRevision,omitempty"`
|
||||
RemoteUrls []string `json:"remoteUrls,omitempty"`
|
||||
ScmName string `json:"scmName,omitempty"`
|
||||
MercurialNodeName string `json:"mercurialNodeName,omitempty"`
|
||||
MercurialRevisionNumber string `json:"mercurialRevisionNumber,omitempty"`
|
||||
Subdir interface{} `json:"subdir,omitempty"`
|
||||
ClassName string `json:"_class,omitempty"`
|
||||
SonarTaskId string `json:"ceTaskId,omitempty"`
|
||||
SonarServerUrl string `json:"serverUrl,omitempty"`
|
||||
SonarDashboardUrl string `json:"sonarqubeDashboardUrl,omitempty"`
|
||||
TotalCount int64 `json:",omitempty"`
|
||||
UrlName string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type TestResult struct {
|
||||
Duration int64 `json:"duration"`
|
||||
Empty bool `json:"empty"`
|
||||
FailCount int64 `json:"failCount"`
|
||||
PassCount int64 `json:"passCount"`
|
||||
SkipCount int64 `json:"skipCount"`
|
||||
Suites []struct {
|
||||
Cases []struct {
|
||||
Age int64 `json:"age"`
|
||||
ClassName string `json:"className"`
|
||||
Duration int64 `json:"duration"`
|
||||
ErrorDetails interface{} `json:"errorDetails"`
|
||||
ErrorStackTrace interface{} `json:"errorStackTrace"`
|
||||
FailedSince int64 `json:"failedSince"`
|
||||
Name string `json:"name"`
|
||||
Skipped bool `json:"skipped"`
|
||||
SkippedMessage interface{} `json:"skippedMessage"`
|
||||
Status string `json:"status"`
|
||||
Stderr interface{} `json:"stderr"`
|
||||
Stdout interface{} `json:"stdout"`
|
||||
} `json:"cases"`
|
||||
Duration int64 `json:"duration"`
|
||||
ID interface{} `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Stderr interface{} `json:"stderr"`
|
||||
Stdout interface{} `json:"stdout"`
|
||||
Timestamp interface{} `json:"timestamp"`
|
||||
} `json:"suites"`
|
||||
}
|
||||
|
||||
type BuildResponse struct {
|
||||
Actions []devops.GeneralAction
|
||||
Artifacts []struct {
|
||||
DisplayPath string `json:"displayPath"`
|
||||
FileName string `json:"fileName"`
|
||||
RelativePath string `json:"relativePath"`
|
||||
} `json:"artifacts"`
|
||||
Building bool `json:"building"`
|
||||
BuiltOn string `json:"builtOn"`
|
||||
ChangeSet struct {
|
||||
Items []struct {
|
||||
AffectedPaths []string `json:"affectedPaths"`
|
||||
Author struct {
|
||||
AbsoluteUrl string `json:"absoluteUrl"`
|
||||
FullName string `json:"fullName"`
|
||||
} `json:"author"`
|
||||
Comment string `json:"comment"`
|
||||
CommitID string `json:"commitId"`
|
||||
Date string `json:"date"`
|
||||
ID string `json:"id"`
|
||||
Msg string `json:"msg"`
|
||||
Paths []struct {
|
||||
EditType string `json:"editType"`
|
||||
File string `json:"file"`
|
||||
} `json:"paths"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
} `json:"items"`
|
||||
Kind string `json:"kind"`
|
||||
Revisions []struct {
|
||||
Module string
|
||||
Revision int
|
||||
} `json:"revision"`
|
||||
} `json:"changeSet"`
|
||||
Culprits []devops.Culprit `json:"culprits"`
|
||||
Description interface{} `json:"description"`
|
||||
Duration int64 `json:"duration"`
|
||||
EstimatedDuration int64 `json:"estimatedDuration"`
|
||||
Executor interface{} `json:"executor"`
|
||||
FullDisplayName string `json:"fullDisplayName"`
|
||||
ID string `json:"id"`
|
||||
KeepLog bool `json:"keepLog"`
|
||||
Number int64 `json:"number"`
|
||||
QueueID int64 `json:"queueId"`
|
||||
Result string `json:"result"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
URL string `json:"url"`
|
||||
MavenArtifacts interface{} `json:"mavenArtifacts"`
|
||||
MavenVersionUsed string `json:"mavenVersionUsed"`
|
||||
Runs []struct {
|
||||
Number int64
|
||||
URL string
|
||||
} `json:"runs"`
|
||||
}
|
||||
|
||||
// Builds
|
||||
func (b *Build) Info() *devops.Build {
|
||||
return b.Raw
|
||||
}
|
||||
|
||||
func (b *Build) GetUrl() string {
|
||||
return b.Raw.URL
|
||||
}
|
||||
|
||||
func (b *Build) GetBuildNumber() int64 {
|
||||
return b.Raw.Number
|
||||
}
|
||||
func (b *Build) GetResult() string {
|
||||
return b.Raw.Result
|
||||
}
|
||||
|
||||
func (b *Build) Stop() (bool, error) {
|
||||
if b.IsRunning() {
|
||||
response, err := b.Jenkins.Requester.Post(b.Base+"/stop", nil, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return false, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *Build) GetConsoleOutput() string {
|
||||
url := b.Base + "/consoleText"
|
||||
var content string
|
||||
rsp, _ := b.Jenkins.Requester.GetXML(url, &content, nil)
|
||||
rsp.Body.Close()
|
||||
return content
|
||||
}
|
||||
|
||||
func (b *Build) GetCauses() ([]map[string]interface{}, error) {
|
||||
_, err := b.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range b.Raw.Actions {
|
||||
if a.Causes != nil {
|
||||
return a.Causes, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("No Causes")
|
||||
}
|
||||
|
||||
func (b *Build) GetInjectedEnvVars() (map[string]string, error) {
|
||||
var envVars struct {
|
||||
EnvMap map[string]string `json:"envMap"`
|
||||
}
|
||||
endpoint := b.Base + "/injectedEnvVars"
|
||||
rsp, err := b.Jenkins.Requester.GetJSON(endpoint, &envVars, nil)
|
||||
if err != nil {
|
||||
return envVars.EnvMap, err
|
||||
}
|
||||
rsp.Body.Close()
|
||||
return envVars.EnvMap, nil
|
||||
}
|
||||
|
||||
func (b *Build) GetDownstreamBuilds() ([]*Build, error) {
|
||||
result := make([]*Build, 0)
|
||||
downstreamJobs, err := b.Job.GetDownstreamJobs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, job := range downstreamJobs {
|
||||
allBuildIDs, err := job.GetAllBuildIds()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, buildID := range allBuildIDs {
|
||||
build, err := job.GetBuild(buildID.Number)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upstreamBuild, _ := build.GetUpstreamBuild()
|
||||
// cannot compare only id, it can be from different job
|
||||
if b.GetUrl() == upstreamBuild.GetUrl() {
|
||||
result = append(result, build)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (b *Build) GetUpstreamJob() (*Job, error) {
|
||||
causes, err := b.GetCauses()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(causes) > 0 {
|
||||
if job, ok := causes[0]["upstreamProject"]; ok {
|
||||
return b.Jenkins.GetJob(job.(string))
|
||||
}
|
||||
}
|
||||
return nil, errors.New("Unable to get Upstream Job")
|
||||
}
|
||||
|
||||
func (b *Build) GetUpstreamBuildNumber() (int64, error) {
|
||||
causes, err := b.GetCauses()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(causes) > 0 {
|
||||
if build, ok := causes[0]["upstreamBuild"]; ok {
|
||||
switch t := build.(type) {
|
||||
default:
|
||||
return t.(int64), nil
|
||||
case float64:
|
||||
return int64(t), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (b *Build) GetUpstreamBuild() (*Build, error) {
|
||||
job, err := b.GetUpstreamJob()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if job != nil {
|
||||
buildNumber, err := b.GetUpstreamBuildNumber()
|
||||
if err == nil {
|
||||
return job.GetBuild(buildNumber)
|
||||
}
|
||||
}
|
||||
return nil, errors.New("Build not found")
|
||||
}
|
||||
|
||||
func (b *Build) GetResultSet() (*TestResult, error) {
|
||||
|
||||
url := b.Base + "/testReport"
|
||||
var report TestResult
|
||||
|
||||
rsp, err := b.Jenkins.Requester.GetJSON(url, &report, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rsp.Body.Close()
|
||||
return &report, nil
|
||||
}
|
||||
|
||||
func (b *Build) GetTimestamp() time.Time {
|
||||
msInt := int64(b.Raw.Timestamp)
|
||||
return time.Unix(0, msInt*int64(time.Millisecond))
|
||||
}
|
||||
|
||||
func (b *Build) GetDuration() int64 {
|
||||
return b.Raw.Duration
|
||||
}
|
||||
|
||||
func (b *Build) GetRevisionBranch() string {
|
||||
vcs := b.Raw.ChangeSet.Kind
|
||||
if vcs == Git {
|
||||
for _, a := range b.Raw.Actions {
|
||||
if len(a.LastBuiltRevision.Branch) > 0 && a.LastBuiltRevision.Branch[0].SHA1 != "" {
|
||||
return a.LastBuiltRevision.Branch[0].SHA1
|
||||
}
|
||||
}
|
||||
} else {
|
||||
panic("Not implemented")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (b *Build) IsGood() bool {
|
||||
return !b.IsRunning() && b.Raw.Result == STATUS_SUCCESS
|
||||
}
|
||||
|
||||
func (b *Build) IsRunning() bool {
|
||||
_, err := b.Poll()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return b.Raw.Building
|
||||
}
|
||||
|
||||
func (b *Build) SetDescription(description string) error {
|
||||
data := url.Values{}
|
||||
data.Set("description", description)
|
||||
resp, err := b.Jenkins.Requester.Post(b.Base+"/submitDescription", bytes.NewBufferString(data.Encode()), nil, nil)
|
||||
if err != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
func (b *Build) PauseToggle() error {
|
||||
response, err := b.Jenkins.Requester.Post(b.Base+"/pause/toggle", nil, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Poll for current data. Optional Parameter - depth.
|
||||
// More about depth here: https://wiki.jenkins-ci.org/display/JENKINS/Remote+access+API
|
||||
func (b *Build) Poll(options ...interface{}) (int, error) {
|
||||
depth := "-1"
|
||||
|
||||
for _, o := range options {
|
||||
switch v := o.(type) {
|
||||
case string:
|
||||
depth = v
|
||||
case int:
|
||||
depth = strconv.Itoa(v)
|
||||
case int64:
|
||||
depth = strconv.FormatInt(v, 10)
|
||||
}
|
||||
}
|
||||
if depth == "-1" {
|
||||
depth = strconv.Itoa(b.Depth)
|
||||
}
|
||||
|
||||
qr := map[string]string{
|
||||
"depth": depth,
|
||||
}
|
||||
response, err := b.Jenkins.Requester.GetJSON(b.Base, b.Raw, qr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response.Body.Close()
|
||||
return response.StatusCode, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetProjectPipelineBuildByType(projectId, pipelineId string, status string) (*devops.Build, error) {
|
||||
job, err := j.GetJob(pipelineId, projectId)
|
||||
if err != nil {
|
||||
return nil, restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
//nolint:staticcheck
|
||||
build, err := job.getBuildByType(status)
|
||||
return build.Raw, nil
|
||||
}
|
||||
func (j *Jenkins) GetMultiBranchPipelineBuildByType(projectId, pipelineId, branch string, status string) (*devops.Build, error) {
|
||||
job, err := j.GetJob(branch, projectId, pipelineId)
|
||||
if err != nil {
|
||||
return nil, restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
//nolint:staticcheck
|
||||
build, err := job.getBuildByType(status)
|
||||
return build.Raw, nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
const (
|
||||
STATUS_FAIL = "FAIL"
|
||||
STATUS_ERROR = "ERROR"
|
||||
STATUS_ABORTED = "ABORTED"
|
||||
STATUS_REGRESSION = "REGRESSION"
|
||||
STATUS_SUCCESS = "SUCCESS"
|
||||
STATUS_FIXED = "FIXED"
|
||||
STATUS_PASSED = "PASSED"
|
||||
RESULT_STATUS_FAILURE = "FAILURE"
|
||||
RESULT_STATUS_FAILED = "FAILED"
|
||||
RESULT_STATUS_SKIPPED = "SKIPPED"
|
||||
STR_RE_SPLIT_VIEW = "(.*)/view/([^/]*)/?"
|
||||
)
|
||||
|
||||
const (
|
||||
GLOBAL_ROLE = "globalRoles"
|
||||
PROJECT_ROLE = "projectRoles"
|
||||
)
|
||||
|
||||
var ParameterTypeMap = map[string]string{
|
||||
"hudson.model.StringParameterDefinition": "string",
|
||||
"hudson.model.ChoiceParameterDefinition": "choice",
|
||||
"hudson.model.TextParameterDefinition": "text",
|
||||
"hudson.model.BooleanParameterDefinition": "boolean",
|
||||
"hudson.model.FileParameterDefinition": "file",
|
||||
"hudson.model.PasswordParameterDefinition": "password",
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
const SSHCrenditalStaplerClass = "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey"
|
||||
const DirectSSHCrenditalStaplerClass = "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey$DirectEntryPrivateKeySource"
|
||||
const UsernamePassswordCredentialStaplerClass = "com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl"
|
||||
const SecretTextCredentialStaplerClass = "org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl"
|
||||
const KubeconfigCredentialStaplerClass = "com.microsoft.jenkins.kubernetes.credentials.KubeconfigCredentials"
|
||||
const DirectKubeconfigCredentialStaperClass = "com.microsoft.jenkins.kubernetes.credentials.KubeconfigCredentials$DirectEntryKubeconfigSource"
|
||||
const GLOBALScope = "GLOBAL"
|
||||
|
||||
type UsernamePasswordCredential struct {
|
||||
Scope string `json:"scope"`
|
||||
Id string `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Description string `json:"description"`
|
||||
StaplerClass string `json:"stapler-class"`
|
||||
}
|
||||
|
||||
type SshCredential struct {
|
||||
Scope string `json:"scope"`
|
||||
Id string `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Passphrase string `json:"passphrase"`
|
||||
KeySource PrivateKeySource `json:"privateKeySource"`
|
||||
Description string `json:"description"`
|
||||
StaplerClass string `json:"stapler-class"`
|
||||
}
|
||||
|
||||
type SecretTextCredential struct {
|
||||
Scope string `json:"scope"`
|
||||
Id string `json:"id"`
|
||||
Secret string `json:"secret"`
|
||||
Description string `json:"description"`
|
||||
StaplerClass string `json:"stapler-class"`
|
||||
}
|
||||
|
||||
type KubeconfigCredential struct {
|
||||
Scope string `json:"scope"`
|
||||
Id string `json:"id"`
|
||||
Description string `json:"description"`
|
||||
KubeconfigSource KubeconfigSource `json:"kubeconfigSource"`
|
||||
StaplerClass string `json:"stapler-class"`
|
||||
}
|
||||
|
||||
type PrivateKeySource struct {
|
||||
StaplerClass string `json:"stapler-class"`
|
||||
PrivateKey string `json:"privateKey"`
|
||||
}
|
||||
|
||||
type KubeconfigSource struct {
|
||||
StaplerClass string `json:"stapler-class"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type CredentialResponse struct {
|
||||
Id string `json:"id"`
|
||||
TypeName string `json:"typeName"`
|
||||
DisplayName string `json:"displayName"`
|
||||
Fingerprint *struct {
|
||||
FileName string `json:"file_name,omitempty" description:"Credential's display name and description"`
|
||||
Hash string `json:"hash,omitempty" description:"Credential's hash"`
|
||||
Usage []*struct {
|
||||
Name string `json:"name,omitempty" description:"Jenkins pipeline full name"`
|
||||
Ranges struct {
|
||||
Ranges []*struct {
|
||||
Start int `json:"start,omitempty" description:"Start build number"`
|
||||
End int `json:"end,omitempty" description:"End build number"`
|
||||
} `json:"ranges,omitempty"`
|
||||
} `json:"ranges,omitempty" description:"The build number of all pipelines that use this credential"`
|
||||
} `json:"usage,omitempty" description:"all usage of Credential"`
|
||||
} `json:"fingerprint,omitempty" description:"usage of the Credential"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Domain string `json:"domain"`
|
||||
}
|
||||
|
||||
func NewSshCredential(secret *v1.Secret) *SshCredential {
|
||||
id := secret.Name
|
||||
username := string(secret.Data[devopsv1alpha3.SSHAuthUsernameKey])
|
||||
passphrase := string(secret.Data[devopsv1alpha3.SSHAuthPassphraseKey])
|
||||
privatekey := string(secret.Data[devopsv1alpha3.SSHAuthPrivateKey])
|
||||
|
||||
keySource := PrivateKeySource{
|
||||
StaplerClass: DirectSSHCrenditalStaplerClass,
|
||||
PrivateKey: privatekey,
|
||||
}
|
||||
|
||||
return &SshCredential{
|
||||
Scope: GLOBALScope,
|
||||
Id: id,
|
||||
Username: username,
|
||||
Passphrase: passphrase,
|
||||
KeySource: keySource,
|
||||
StaplerClass: SSHCrenditalStaplerClass,
|
||||
}
|
||||
}
|
||||
|
||||
func NewUsernamePasswordCredential(secret *v1.Secret) *UsernamePasswordCredential {
|
||||
id := secret.Name
|
||||
username := string(secret.Data[devopsv1alpha3.BasicAuthUsernameKey])
|
||||
password := string(secret.Data[devopsv1alpha3.BasicAuthPasswordKey])
|
||||
return &UsernamePasswordCredential{
|
||||
Scope: GLOBALScope,
|
||||
Id: id,
|
||||
Username: username,
|
||||
Password: password,
|
||||
StaplerClass: UsernamePassswordCredentialStaplerClass,
|
||||
}
|
||||
}
|
||||
|
||||
func NewSecretTextCredential(secret *v1.Secret) *SecretTextCredential {
|
||||
id := secret.Name
|
||||
secretContent := string(secret.Data[devopsv1alpha3.SecretTextSecretKey])
|
||||
return &SecretTextCredential{
|
||||
Scope: GLOBALScope,
|
||||
Id: id,
|
||||
Secret: secretContent,
|
||||
StaplerClass: SecretTextCredentialStaplerClass,
|
||||
}
|
||||
}
|
||||
|
||||
func NewKubeconfigCredential(secret *v1.Secret) *KubeconfigCredential {
|
||||
id := secret.Name
|
||||
secretContent := string(secret.Data[devopsv1alpha3.KubeConfigSecretKey])
|
||||
|
||||
credentialSource := KubeconfigSource{
|
||||
StaplerClass: DirectKubeconfigCredentialStaperClass,
|
||||
Content: secretContent,
|
||||
}
|
||||
|
||||
return &KubeconfigCredential{
|
||||
Scope: GLOBALScope,
|
||||
Id: id,
|
||||
KubeconfigSource: credentialSource,
|
||||
StaplerClass: KubeconfigCredentialStaplerClass,
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetCredentialInProject(projectId, id string) (*devops.Credential, error) {
|
||||
responseStruct := &devops.Credential{}
|
||||
|
||||
domain := "_"
|
||||
|
||||
response, err := j.Requester.GetJSON(
|
||||
fmt.Sprintf("/job/%s/credentials/store/folder/domain/_/credential/%s", projectId, id),
|
||||
responseStruct, map[string]string{
|
||||
"depth": "2",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
responseStruct.Domain = domain
|
||||
return responseStruct, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetCredentialsInProject(projectId string) ([]*devops.Credential, error) {
|
||||
domain := "_"
|
||||
var responseStruct = &struct {
|
||||
Credentials []*devops.Credential `json:"credentials"`
|
||||
}{}
|
||||
response, err := j.Requester.GetJSON(
|
||||
fmt.Sprintf("/job/%s/credentials/store/folder/domain/_", projectId),
|
||||
responseStruct, map[string]string{
|
||||
"depth": "2",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
for _, credential := range responseStruct.Credentials {
|
||||
credential.Domain = domain
|
||||
}
|
||||
return responseStruct.Credentials, nil
|
||||
|
||||
}
|
||||
|
||||
func (j *Jenkins) CreateCredentialInProject(projectId string, credential *v1.Secret) (string, error) {
|
||||
|
||||
var request interface{}
|
||||
responseString := ""
|
||||
switch credential.Type {
|
||||
case devopsv1alpha3.SecretTypeBasicAuth:
|
||||
request = NewUsernamePasswordCredential(credential)
|
||||
case devopsv1alpha3.SecretTypeSSHAuth:
|
||||
request = NewSshCredential(credential)
|
||||
case devopsv1alpha3.SecretTypeSecretText:
|
||||
request = NewSecretTextCredential(credential)
|
||||
case devopsv1alpha3.SecretTypeKubeConfig:
|
||||
request = NewKubeconfigCredential(credential)
|
||||
default:
|
||||
err := fmt.Errorf("error unsupport credential type")
|
||||
klog.Errorf("%+v", err)
|
||||
return "", restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
|
||||
response, err := j.Requester.Post(
|
||||
fmt.Sprintf("/job/%s/credentials/store/folder/domain/_/createCredentials", projectId),
|
||||
nil, &responseString, map[string]string{
|
||||
"json": makeJson(map[string]interface{}{
|
||||
"credentials": request,
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return "", errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return credential.Name, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) UpdateCredentialInProject(projectId string, credential *v1.Secret) (string, error) {
|
||||
|
||||
requestContent := ""
|
||||
switch credential.Type {
|
||||
case devopsv1alpha3.SecretTypeBasicAuth:
|
||||
requestStruct := NewUsernamePasswordCredential(credential)
|
||||
requestContent = makeJson(requestStruct)
|
||||
case devopsv1alpha3.SecretTypeSSHAuth:
|
||||
requestStruct := NewSshCredential(credential)
|
||||
requestContent = makeJson(requestStruct)
|
||||
case devopsv1alpha3.SecretTypeSecretText:
|
||||
requestStruct := NewSecretTextCredential(credential)
|
||||
requestContent = makeJson(requestStruct)
|
||||
case devopsv1alpha3.SecretTypeKubeConfig:
|
||||
requestStruct := NewKubeconfigCredential(credential)
|
||||
requestContent = makeJson(requestStruct)
|
||||
default:
|
||||
err := fmt.Errorf("error unsupport credential type")
|
||||
klog.Errorf("%+v", err)
|
||||
return "", restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
response, err := j.Requester.Post(
|
||||
fmt.Sprintf("/job/%s/credentials/store/folder/domain/_/credential/%s/updateSubmit", projectId, credential.Name),
|
||||
nil, nil, map[string]string{
|
||||
"json": requestContent,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return "", errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return credential.Name, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) DeleteCredentialInProject(projectId, id string) (string, error) {
|
||||
response, err := j.Requester.Post(
|
||||
fmt.Sprintf("/job/%s/credentials/store/folder/domain/_/credential/%s/doDelete", projectId, id),
|
||||
nil, nil, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return "", errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestNewUsernamePasswordCredential(t *testing.T) {
|
||||
username := "test-user"
|
||||
password := "password"
|
||||
name := "test-secret"
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "test",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"username": []byte(username),
|
||||
"password": []byte(password),
|
||||
},
|
||||
Type: "credential.devops.kubesphere.io/basic-auth",
|
||||
}
|
||||
credential := NewUsernamePasswordCredential(secret)
|
||||
if credential.StaplerClass != "com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl" {
|
||||
t.Fatalf("credential's stapler class should be com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl"+
|
||||
"other than %s ", credential.StaplerClass)
|
||||
}
|
||||
if credential.Id != name {
|
||||
t.Fatalf("credential's id should be %s "+
|
||||
"other than %s ", name, credential.Id)
|
||||
}
|
||||
if credential.Username != username {
|
||||
t.Fatalf("credential's username should be %s "+
|
||||
"other than %s ", username, credential.Username)
|
||||
}
|
||||
if credential.Password != password {
|
||||
t.Fatalf("credential's password should be %s "+
|
||||
"other than %s ", password, credential.Password)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSshCredential(t *testing.T) {
|
||||
username := "test-user"
|
||||
passphrase := "passphrase"
|
||||
privatekey := "pk"
|
||||
name := "test-secret"
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "test",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"username": []byte(username),
|
||||
"passphrase": []byte(passphrase),
|
||||
"private_key": []byte(privatekey),
|
||||
},
|
||||
Type: "credential.devops.kubesphere.io/ssh-auth",
|
||||
}
|
||||
credential := NewSshCredential(secret)
|
||||
if credential.StaplerClass != "com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey" {
|
||||
t.Fatalf("credential's stapler class should be com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey"+
|
||||
"other than %s ", credential.StaplerClass)
|
||||
}
|
||||
if credential.Id != name {
|
||||
t.Fatalf("credential's id should be %s "+
|
||||
"other than %s ", name, credential.Id)
|
||||
}
|
||||
if credential.Username != username {
|
||||
t.Fatalf("credential's username should be %s "+
|
||||
"other than %s ", username, credential.Username)
|
||||
}
|
||||
if credential.Passphrase != passphrase {
|
||||
t.Fatalf("credential's passphrase should be %s "+
|
||||
"other than %s ", passphrase, credential.Passphrase)
|
||||
}
|
||||
if credential.KeySource.PrivateKey != privatekey {
|
||||
t.Fatalf("credential's privatekey should be %s "+
|
||||
"other than %s ", privatekey, credential.KeySource.PrivateKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewKubeconfigCredential(t *testing.T) {
|
||||
content := []byte("test-content")
|
||||
name := "test-secret"
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "test",
|
||||
},
|
||||
Type: "credential.devops.kubesphere.io/kubeconfig",
|
||||
Data: map[string][]byte{"content": content},
|
||||
}
|
||||
credential := NewKubeconfigCredential(secret)
|
||||
if credential.StaplerClass != "com.microsoft.jenkins.kubernetes.credentials.KubeconfigCredentials" {
|
||||
t.Fatalf("credential's stapler class should be com.microsoft.jenkins.kubernetes.credentials.KubeconfigCredentials"+
|
||||
"other than %s ", credential.StaplerClass)
|
||||
}
|
||||
if credential.Id != name {
|
||||
t.Fatalf("credential's id should be %s "+
|
||||
"other than %s ", name, credential.Id)
|
||||
}
|
||||
if credential.KubeconfigSource.Content != string(content) {
|
||||
t.Fatalf("credential's content should be %s "+
|
||||
"other than %s ", string(content), credential.KubeconfigSource.Content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewSecretTextCredential(t *testing.T) {
|
||||
content := []byte("test-content")
|
||||
name := "test-secret"
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "test",
|
||||
},
|
||||
Type: "credential.devops.kubesphere.io/secret-text",
|
||||
Data: map[string][]byte{"secret": content},
|
||||
}
|
||||
credential := NewSecretTextCredential(secret)
|
||||
if credential.StaplerClass != "org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl" {
|
||||
t.Fatalf("credential's stapler class should be org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl"+
|
||||
"other than %s ", credential.StaplerClass)
|
||||
}
|
||||
if credential.Id != name {
|
||||
t.Fatalf("credential's id should be %s "+
|
||||
"other than %s ", name, credential.Id)
|
||||
}
|
||||
if credential.Secret != string(content) {
|
||||
t.Fatalf("credential's content should be %s "+
|
||||
"other than %s ", string(content), credential.Secret)
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
func NewDevopsClient(options *Options) (devops.Interface, error) {
|
||||
|
||||
jenkins := CreateJenkins(nil, options.Host, options.MaxConnections, options.Username, options.Password)
|
||||
|
||||
return jenkins, nil
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_parseCronJobTime(t *testing.T) {
|
||||
type Except struct {
|
||||
Last string
|
||||
Next string
|
||||
}
|
||||
|
||||
Items := []struct {
|
||||
Input string
|
||||
Expected Except
|
||||
}{
|
||||
{"上次运行的时间 Tuesday, September 10, 2019 8:59:09 AM UTC; 下次运行的时间 Tuesday, September 10, 2019 9:14:09 AM UTC.", Except{Last: "2019-09-10T08:59:09Z", Next: "2019-09-10T09:14:09Z"}},
|
||||
{"上次运行的时间 Thursday, January 3, 2019 11:56:30 PM UTC; 下次运行的时间 Friday, January 3, 2020 12:11:30 AM UTC.", Except{Last: "2019-01-03T23:56:30Z", Next: "2020-01-03T00:11:30Z"}},
|
||||
{"上次运行的时间 Tuesday, September 10, 2019 8:41:34 AM UTC; 下次运行的时间 Tuesday, September 10, 2019 9:41:34 AM UTC.", Except{Last: "2019-09-10T08:41:34Z", Next: "2019-09-10T09:41:34Z"}},
|
||||
{"上次运行的时间 Tuesday, September 10, 2019 9:15:26 AM UTC; 下次运行的时间 Tuesday, September 10, 2019 10:03:26 AM UTC.", Except{Last: "2019-09-10T09:15:26Z", Next: "2019-09-10T10:03:26Z"}},
|
||||
{"Would last have run at Tuesday, September 10, 2019 9:15:26 AM UTC; would next run at Tuesday, September 10, 2019 10:03:26 AM UTC.", Except{Last: "2019-09-10T09:15:26Z", Next: "2019-09-10T10:03:26Z"}},
|
||||
{"Would last have run at Tuesday, September 10, 2019 8:41:34 AM UTC; would next run at Tuesday, September 10, 2019 9:41:34 AM UTC.", Except{Last: "2019-09-10T08:41:34Z", Next: "2019-09-10T09:41:34Z"}},
|
||||
}
|
||||
|
||||
for _, item := range Items {
|
||||
last, next, err := parseCronJobTime(item.Input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
|
||||
if last != item.Expected.Last {
|
||||
t.Errorf("got %#v, expected %#v", last, item.Expected.Last)
|
||||
}
|
||||
|
||||
if next != item.Expected.Next {
|
||||
t.Errorf("got %#v, expected %#v", next, item.Expected.Next)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,77 +0,0 @@
|
||||
// Copyright 2015 Vadim Kravcenko
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Folder struct {
|
||||
Raw *FolderResponse
|
||||
Jenkins *Jenkins
|
||||
Base string
|
||||
}
|
||||
|
||||
type FolderResponse struct {
|
||||
Actions []GeneralObj
|
||||
Description string `json:"description"`
|
||||
DisplayName string `json:"displayName"`
|
||||
Name string `json:"name"`
|
||||
URL string `json:"url"`
|
||||
Jobs []InnerJob `json:"jobs"`
|
||||
}
|
||||
|
||||
func (f *Folder) parentBase() string {
|
||||
return f.Base[:strings.LastIndex(f.Base, "/job")]
|
||||
}
|
||||
|
||||
func (f *Folder) GetName() string {
|
||||
return f.Raw.Name
|
||||
}
|
||||
|
||||
func (f *Folder) Create(name, description string) (*Folder, error) {
|
||||
mode := "com.cloudbees.hudson.plugins.folder.Folder"
|
||||
data := map[string]string{
|
||||
"name": name,
|
||||
"mode": mode,
|
||||
"Submit": "OK",
|
||||
"json": makeJson(map[string]string{
|
||||
"name": name,
|
||||
"mode": mode,
|
||||
"description": description,
|
||||
}),
|
||||
}
|
||||
r, err := f.Jenkins.Requester.Post(f.parentBase()+"/createItem", nil, f.Raw, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.Body.Close()
|
||||
if r.StatusCode == 200 {
|
||||
f.Poll()
|
||||
return f, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(r.StatusCode))
|
||||
}
|
||||
|
||||
func (f *Folder) Poll() (int, error) {
|
||||
response, err := f.Jenkins.Requester.GetJSON(f.Base, f.Raw, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response.Body.Close()
|
||||
return response.StatusCode, nil
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/beevik/etree"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
)
|
||||
|
||||
func AppendBitbucketServerSourceToEtree(source *etree.Element, gitSource *devopsv1alpha3.BitbucketServerSource) {
|
||||
if gitSource == nil {
|
||||
klog.Warning("please provide BitbucketServer source when the sourceType is BitbucketServer")
|
||||
return
|
||||
}
|
||||
source.CreateAttr("class", "com.cloudbees.jenkins.plugins.bitbucket.BitbucketSCMSource")
|
||||
source.CreateAttr("plugin", "cloudbees-bitbucket-branch-source")
|
||||
source.CreateElement("id").SetText(gitSource.ScmId)
|
||||
source.CreateElement("credentialsId").SetText(gitSource.CredentialId)
|
||||
source.CreateElement("repoOwner").SetText(gitSource.Owner)
|
||||
source.CreateElement("repository").SetText(gitSource.Repo)
|
||||
source.CreateElement("serverUrl").SetText(gitSource.ApiUri)
|
||||
|
||||
traits := source.CreateElement("traits")
|
||||
if gitSource.DiscoverBranches != 0 {
|
||||
traits.CreateElement("com.cloudbees.jenkins.plugins.bitbucket.BranchDiscoveryTrait>").
|
||||
CreateElement("strategyId").SetText(strconv.Itoa(gitSource.DiscoverBranches))
|
||||
}
|
||||
if gitSource.DiscoverPRFromOrigin != 0 {
|
||||
traits.CreateElement("com.cloudbees.jenkins.plugins.bitbucket.OriginPullRequestDiscoveryTrait").
|
||||
CreateElement("strategyId").SetText(strconv.Itoa(gitSource.DiscoverPRFromOrigin))
|
||||
}
|
||||
if gitSource.DiscoverPRFromForks != nil {
|
||||
forkTrait := traits.CreateElement("com.cloudbees.jenkins.plugins.bitbucket.ForkPullRequestDiscoveryTrait")
|
||||
forkTrait.CreateElement("strategyId").SetText(strconv.Itoa(gitSource.DiscoverPRFromForks.Strategy))
|
||||
trustClass := "com.cloudbees.jenkins.plugins.bitbucket.ForkPullRequestDiscoveryTrait$"
|
||||
|
||||
if prTrust := PRDiscoverTrust(gitSource.DiscoverPRFromForks.Trust); prTrust.IsValid() {
|
||||
trustClass += prTrust.String()
|
||||
} else {
|
||||
klog.Warningf("invalid Bitbucket discover PR trust value: %d", prTrust.Value())
|
||||
}
|
||||
|
||||
forkTrait.CreateElement("trust").CreateAttr("class", trustClass)
|
||||
}
|
||||
if gitSource.DiscoverTags {
|
||||
traits.CreateElement("com.cloudbees.jenkins.plugins.bitbucket.TagDiscoveryTrait")
|
||||
}
|
||||
if gitSource.CloneOption != nil {
|
||||
cloneExtension := traits.CreateElement("jenkins.plugins.git.traits.CloneOptionTrait").CreateElement("extension")
|
||||
cloneExtension.CreateAttr("class", "hudson.plugins.git.extensions.impl.CloneOption")
|
||||
cloneExtension.CreateElement("shallow").SetText(strconv.FormatBool(gitSource.CloneOption.Shallow))
|
||||
cloneExtension.CreateElement("noTags").SetText(strconv.FormatBool(false))
|
||||
cloneExtension.CreateElement("honorRefspec").SetText(strconv.FormatBool(true))
|
||||
cloneExtension.CreateElement("reference")
|
||||
if gitSource.CloneOption.Timeout >= 0 {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(gitSource.CloneOption.Timeout))
|
||||
} else {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(10))
|
||||
}
|
||||
|
||||
if gitSource.CloneOption.Depth >= 0 {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(gitSource.CloneOption.Depth))
|
||||
} else {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(1))
|
||||
}
|
||||
}
|
||||
if gitSource.RegexFilter != "" {
|
||||
regexTraits := traits.CreateElement("jenkins.scm.impl.trait.RegexSCMHeadFilterTrait")
|
||||
regexTraits.CreateAttr("plugin", "scm-api")
|
||||
regexTraits.CreateElement("regex").SetText(gitSource.RegexFilter)
|
||||
}
|
||||
}
|
||||
|
||||
func GetBitbucketServerSourceFromEtree(source *etree.Element) *devopsv1alpha3.BitbucketServerSource {
|
||||
var s devopsv1alpha3.BitbucketServerSource
|
||||
if credential := source.SelectElement("credentialsId"); credential != nil {
|
||||
s.CredentialId = credential.Text()
|
||||
}
|
||||
if repoOwner := source.SelectElement("repoOwner"); repoOwner != nil {
|
||||
s.Owner = repoOwner.Text()
|
||||
}
|
||||
if repository := source.SelectElement("repository"); repository != nil {
|
||||
s.Repo = repository.Text()
|
||||
}
|
||||
if apiUri := source.SelectElement("serverUrl"); apiUri != nil {
|
||||
s.ApiUri = apiUri.Text()
|
||||
}
|
||||
traits := source.SelectElement("traits")
|
||||
if branchDiscoverTrait := traits.SelectElement(
|
||||
"com.cloudbees.jenkins.plugins.bitbucket.BranchDiscoveryTrait"); branchDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(branchDiscoverTrait.SelectElement("strategyId").Text())
|
||||
s.DiscoverBranches = strategyId
|
||||
}
|
||||
if tagDiscoverTrait := traits.SelectElement(
|
||||
"com.cloudbees.jenkins.plugins.bitbucket.TagDiscoveryTrait"); tagDiscoverTrait != nil {
|
||||
s.DiscoverTags = true
|
||||
}
|
||||
if originPRDiscoverTrait := traits.SelectElement(
|
||||
"com.cloudbees.jenkins.plugins.bitbucket.OriginPullRequestDiscoveryTrait"); originPRDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(originPRDiscoverTrait.SelectElement("strategyId").Text())
|
||||
s.DiscoverPRFromOrigin = strategyId
|
||||
}
|
||||
if forkPRDiscoverTrait := traits.SelectElement(
|
||||
"com.cloudbees.jenkins.plugins.bitbucket.ForkPullRequestDiscoveryTrait"); forkPRDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(forkPRDiscoverTrait.SelectElement("strategyId").Text())
|
||||
trustClass := forkPRDiscoverTrait.SelectElement("trust").SelectAttr("class").Value
|
||||
trust := strings.Split(trustClass, "$")
|
||||
|
||||
if prTrust := BitbucketPRDiscoverTrust(1).ParseFromString(trust[1]); prTrust.IsValid() {
|
||||
s.DiscoverPRFromForks = &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: strategyId,
|
||||
Trust: prTrust.Value(),
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("invalid Bitbucket discover PR trust value: %s", trust[1])
|
||||
}
|
||||
|
||||
if cloneTrait := traits.SelectElement(
|
||||
"jenkins.plugins.git.traits.CloneOptionTrait"); cloneTrait != nil {
|
||||
if cloneExtension := cloneTrait.SelectElement(
|
||||
"extension"); cloneExtension != nil {
|
||||
s.CloneOption = &devopsv1alpha3.GitCloneOption{}
|
||||
if value, err := strconv.ParseBool(cloneExtension.SelectElement("shallow").Text()); err == nil {
|
||||
s.CloneOption.Shallow = value
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("timeout").Text(), 10, 32); err == nil {
|
||||
s.CloneOption.Timeout = int(value)
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("depth").Text(), 10, 32); err == nil {
|
||||
s.CloneOption.Depth = int(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if regexTrait := traits.SelectElement(
|
||||
"jenkins.scm.impl.trait.RegexSCMHeadFilterTrait"); regexTrait != nil {
|
||||
if regex := regexTrait.SelectElement("regex"); regex != nil {
|
||||
s.RegexFilter = regex.Text()
|
||||
}
|
||||
}
|
||||
}
|
||||
return &s
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCommonSituation(t *testing.T) {
|
||||
// make sure these functions do not panic
|
||||
// I add these test cases because it's possible that users just do give the git source
|
||||
AppendGitlabSourceToEtree(nil, nil)
|
||||
AppendGithubSourceToEtree(nil, nil)
|
||||
AppendBitbucketServerSourceToEtree(nil, nil)
|
||||
AppendGitSourceToEtree(nil, nil)
|
||||
AppendSingleSvnSourceToEtree(nil, nil)
|
||||
AppendSvnSourceToEtree(nil, nil)
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/beevik/etree"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
)
|
||||
|
||||
func AppendGitSourceToEtree(source *etree.Element, gitSource *devopsv1alpha3.GitSource) {
|
||||
if gitSource == nil {
|
||||
klog.Warning("please provide Git source when the sourceType is Git")
|
||||
return
|
||||
}
|
||||
source.CreateAttr("class", "jenkins.plugins.git.GitSCMSource")
|
||||
source.CreateAttr("plugin", "git")
|
||||
source.CreateElement("id").SetText(gitSource.ScmId)
|
||||
source.CreateElement("remote").SetText(gitSource.Url)
|
||||
if gitSource.CredentialId != "" {
|
||||
source.CreateElement("credentialsId").SetText(gitSource.CredentialId)
|
||||
}
|
||||
traits := source.CreateElement("traits")
|
||||
if gitSource.DiscoverBranches {
|
||||
traits.CreateElement("jenkins.plugins.git.traits.BranchDiscoveryTrait")
|
||||
}
|
||||
if gitSource.DiscoverTags {
|
||||
traits.CreateElement("jenkins.plugins.git.traits.TagDiscoveryTrait")
|
||||
}
|
||||
if gitSource.CloneOption != nil {
|
||||
cloneExtension := traits.CreateElement("jenkins.plugins.git.traits.CloneOptionTrait").CreateElement("extension")
|
||||
cloneExtension.CreateAttr("class", "hudson.plugins.git.extensions.impl.CloneOption")
|
||||
cloneExtension.CreateElement("shallow").SetText(strconv.FormatBool(gitSource.CloneOption.Shallow))
|
||||
cloneExtension.CreateElement("noTags").SetText(strconv.FormatBool(false))
|
||||
cloneExtension.CreateElement("honorRefspec").SetText(strconv.FormatBool(true))
|
||||
cloneExtension.CreateElement("reference")
|
||||
if gitSource.CloneOption.Timeout >= 0 {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(gitSource.CloneOption.Timeout))
|
||||
} else {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(10))
|
||||
}
|
||||
|
||||
if gitSource.CloneOption.Depth >= 0 {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(gitSource.CloneOption.Depth))
|
||||
} else {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(1))
|
||||
}
|
||||
}
|
||||
|
||||
if gitSource.RegexFilter != "" {
|
||||
regexTraits := traits.CreateElement("jenkins.scm.impl.trait.RegexSCMHeadFilterTrait")
|
||||
regexTraits.CreateAttr("plugin", "scm-api")
|
||||
regexTraits.CreateElement("regex").SetText(gitSource.RegexFilter)
|
||||
}
|
||||
}
|
||||
|
||||
func GetGitSourcefromEtree(source *etree.Element) *devopsv1alpha3.GitSource {
|
||||
var gitSource devopsv1alpha3.GitSource
|
||||
if credential := source.SelectElement("credentialsId"); credential != nil {
|
||||
gitSource.CredentialId = credential.Text()
|
||||
}
|
||||
if remote := source.SelectElement("remote"); remote != nil {
|
||||
gitSource.Url = remote.Text()
|
||||
}
|
||||
|
||||
traits := source.SelectElement("traits")
|
||||
if branchDiscoverTrait := traits.SelectElement(
|
||||
"jenkins.plugins.git.traits.BranchDiscoveryTrait"); branchDiscoverTrait != nil {
|
||||
gitSource.DiscoverBranches = true
|
||||
}
|
||||
if tagDiscoverTrait := traits.SelectElement(
|
||||
"jenkins.plugins.git.traits.TagDiscoveryTrait"); tagDiscoverTrait != nil {
|
||||
gitSource.DiscoverTags = true
|
||||
}
|
||||
if cloneTrait := traits.SelectElement(
|
||||
"jenkins.plugins.git.traits.CloneOptionTrait"); cloneTrait != nil {
|
||||
if cloneExtension := cloneTrait.SelectElement(
|
||||
"extension"); cloneExtension != nil {
|
||||
gitSource.CloneOption = &devopsv1alpha3.GitCloneOption{}
|
||||
if value, err := strconv.ParseBool(cloneExtension.SelectElement("shallow").Text()); err == nil {
|
||||
gitSource.CloneOption.Shallow = value
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("timeout").Text(), 10, 32); err == nil {
|
||||
gitSource.CloneOption.Timeout = int(value)
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("depth").Text(), 10, 32); err == nil {
|
||||
gitSource.CloneOption.Depth = int(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
if regexTrait := traits.SelectElement(
|
||||
"jenkins.scm.impl.trait.RegexSCMHeadFilterTrait"); regexTrait != nil {
|
||||
if regex := regexTrait.SelectElement("regex"); regex != nil {
|
||||
gitSource.RegexFilter = regex.Text()
|
||||
}
|
||||
}
|
||||
return &gitSource
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/beevik/etree"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
)
|
||||
|
||||
func AppendGithubSourceToEtree(source *etree.Element, githubSource *devopsv1alpha3.GithubSource) {
|
||||
if githubSource == nil {
|
||||
klog.Warning("please provide GitHub source when the sourceType is GitHub")
|
||||
return
|
||||
}
|
||||
source.CreateAttr("class", "org.jenkinsci.plugins.github_branch_source.GitHubSCMSource")
|
||||
source.CreateAttr("plugin", "github-branch-source")
|
||||
source.CreateElement("id").SetText(githubSource.ScmId)
|
||||
source.CreateElement("credentialsId").SetText(githubSource.CredentialId)
|
||||
source.CreateElement("repoOwner").SetText(githubSource.Owner)
|
||||
source.CreateElement("repository").SetText(githubSource.Repo)
|
||||
if githubSource.ApiUri != "" {
|
||||
source.CreateElement("apiUri").SetText(githubSource.ApiUri)
|
||||
}
|
||||
traits := source.CreateElement("traits")
|
||||
if githubSource.DiscoverBranches != 0 {
|
||||
traits.CreateElement("org.jenkinsci.plugins.github__branch__source.BranchDiscoveryTrait").
|
||||
CreateElement("strategyId").SetText(strconv.Itoa(githubSource.DiscoverBranches))
|
||||
}
|
||||
if githubSource.DiscoverPRFromOrigin != 0 {
|
||||
traits.CreateElement("org.jenkinsci.plugins.github__branch__source.OriginPullRequestDiscoveryTrait").
|
||||
CreateElement("strategyId").SetText(strconv.Itoa(githubSource.DiscoverPRFromOrigin))
|
||||
}
|
||||
if githubSource.DiscoverPRFromForks != nil {
|
||||
forkTrait := traits.CreateElement("org.jenkinsci.plugins.github__branch__source.ForkPullRequestDiscoveryTrait")
|
||||
forkTrait.CreateElement("strategyId").SetText(strconv.Itoa(githubSource.DiscoverPRFromForks.Strategy))
|
||||
trustClass := "org.jenkinsci.plugins.github_branch_source.ForkPullRequestDiscoveryTrait$"
|
||||
if prTrust := GitHubPRDiscoverTrust(githubSource.DiscoverPRFromForks.Trust); prTrust.IsValid() {
|
||||
trustClass += prTrust.String()
|
||||
} else {
|
||||
klog.Warningf("invalid GitHub discover PR trust value: %d", prTrust.Value())
|
||||
}
|
||||
forkTrait.CreateElement("trust").CreateAttr("class", trustClass)
|
||||
}
|
||||
if githubSource.DiscoverTags {
|
||||
traits.CreateElement("org.jenkinsci.plugins.github__branch__source.TagDiscoveryTrait")
|
||||
}
|
||||
if githubSource.CloneOption != nil {
|
||||
cloneExtension := traits.CreateElement("jenkins.plugins.git.traits.CloneOptionTrait").CreateElement("extension")
|
||||
cloneExtension.CreateAttr("class", "hudson.plugins.git.extensions.impl.CloneOption")
|
||||
cloneExtension.CreateElement("shallow").SetText(strconv.FormatBool(githubSource.CloneOption.Shallow))
|
||||
cloneExtension.CreateElement("noTags").SetText(strconv.FormatBool(false))
|
||||
cloneExtension.CreateElement("honorRefspec").SetText(strconv.FormatBool(true))
|
||||
cloneExtension.CreateElement("reference")
|
||||
if githubSource.CloneOption.Timeout >= 0 {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(githubSource.CloneOption.Timeout))
|
||||
} else {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(10))
|
||||
}
|
||||
|
||||
if githubSource.CloneOption.Depth >= 0 {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(githubSource.CloneOption.Depth))
|
||||
} else {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(1))
|
||||
}
|
||||
}
|
||||
if githubSource.RegexFilter != "" {
|
||||
regexTraits := traits.CreateElement("jenkins.scm.impl.trait.RegexSCMHeadFilterTrait")
|
||||
regexTraits.CreateAttr("plugin", "scm-api")
|
||||
regexTraits.CreateElement("regex").SetText(githubSource.RegexFilter)
|
||||
}
|
||||
}
|
||||
|
||||
func GetGithubSourcefromEtree(source *etree.Element) *devopsv1alpha3.GithubSource {
|
||||
var githubSource devopsv1alpha3.GithubSource
|
||||
if credential := source.SelectElement("credentialsId"); credential != nil {
|
||||
githubSource.CredentialId = credential.Text()
|
||||
}
|
||||
if repoOwner := source.SelectElement("repoOwner"); repoOwner != nil {
|
||||
githubSource.Owner = repoOwner.Text()
|
||||
}
|
||||
if repository := source.SelectElement("repository"); repository != nil {
|
||||
githubSource.Repo = repository.Text()
|
||||
}
|
||||
if apiUri := source.SelectElement("apiUri"); apiUri != nil {
|
||||
githubSource.ApiUri = apiUri.Text()
|
||||
}
|
||||
traits := source.SelectElement("traits")
|
||||
if branchDiscoverTrait := traits.SelectElement(
|
||||
"org.jenkinsci.plugins.github__branch__source.BranchDiscoveryTrait"); branchDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(branchDiscoverTrait.SelectElement("strategyId").Text())
|
||||
githubSource.DiscoverBranches = strategyId
|
||||
}
|
||||
if tagDiscoverTrait := traits.SelectElement(
|
||||
"org.jenkinsci.plugins.github__branch__source.TagDiscoveryTrait"); tagDiscoverTrait != nil {
|
||||
githubSource.DiscoverTags = true
|
||||
}
|
||||
if originPRDiscoverTrait := traits.SelectElement(
|
||||
"org.jenkinsci.plugins.github__branch__source.OriginPullRequestDiscoveryTrait"); originPRDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(originPRDiscoverTrait.SelectElement("strategyId").Text())
|
||||
githubSource.DiscoverPRFromOrigin = strategyId
|
||||
}
|
||||
if forkPRDiscoverTrait := traits.SelectElement(
|
||||
"org.jenkinsci.plugins.github__branch__source.ForkPullRequestDiscoveryTrait"); forkPRDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(forkPRDiscoverTrait.SelectElement("strategyId").Text())
|
||||
trustClass := forkPRDiscoverTrait.SelectElement("trust").SelectAttr("class").Value
|
||||
trust := strings.Split(trustClass, "$")
|
||||
if prTrust := GitHubPRDiscoverTrust(1).ParseFromString(trust[1]); prTrust.IsValid() {
|
||||
githubSource.DiscoverPRFromForks = &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: strategyId,
|
||||
Trust: prTrust.Value(),
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("invalid Gitlab discover PR trust value: %s", trust[1])
|
||||
}
|
||||
if cloneTrait := traits.SelectElement(
|
||||
"jenkins.plugins.git.traits.CloneOptionTrait"); cloneTrait != nil {
|
||||
if cloneExtension := cloneTrait.SelectElement(
|
||||
"extension"); cloneExtension != nil {
|
||||
githubSource.CloneOption = &devopsv1alpha3.GitCloneOption{}
|
||||
if value, err := strconv.ParseBool(cloneExtension.SelectElement("shallow").Text()); err == nil {
|
||||
githubSource.CloneOption.Shallow = value
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("timeout").Text(), 10, 32); err == nil {
|
||||
githubSource.CloneOption.Timeout = int(value)
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("depth").Text(), 10, 32); err == nil {
|
||||
githubSource.CloneOption.Depth = int(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if regexTrait := traits.SelectElement(
|
||||
"jenkins.scm.impl.trait.RegexSCMHeadFilterTrait"); regexTrait != nil {
|
||||
if regex := regexTrait.SelectElement("regex"); regex != nil {
|
||||
githubSource.RegexFilter = regex.Text()
|
||||
}
|
||||
}
|
||||
}
|
||||
return &githubSource
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/beevik/etree"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
)
|
||||
|
||||
func AppendGitlabSourceToEtree(source *etree.Element, gitSource *devopsv1alpha3.GitlabSource) {
|
||||
if gitSource == nil {
|
||||
klog.Warning("please provide Gitlab source when the sourceType is Gitlab")
|
||||
return
|
||||
}
|
||||
source.CreateAttr("class", "io.jenkins.plugins.gitlabbranchsource.GitLabSCMSource")
|
||||
source.CreateAttr("plugin", "gitlab-branch-source")
|
||||
source.CreateElement("id").SetText(gitSource.ScmId)
|
||||
source.CreateElement("serverName").SetText(gitSource.ServerName)
|
||||
source.CreateElement("credentialsId").SetText(gitSource.CredentialId)
|
||||
source.CreateElement("projectOwner").SetText(gitSource.Owner)
|
||||
source.CreateElement("projectPath").SetText(gitSource.Repo)
|
||||
traits := source.CreateElement("traits")
|
||||
if gitSource.DiscoverBranches != 0 {
|
||||
traits.CreateElement("io.jenkins.plugins.gitlabbranchsource.BranchDiscoveryTrait").
|
||||
CreateElement("strategyId").SetText(strconv.Itoa(gitSource.DiscoverBranches))
|
||||
}
|
||||
if gitSource.DiscoverTags {
|
||||
traits.CreateElement("io.jenkins.plugins.gitlabbranchsource.TagDiscoveryTrait")
|
||||
}
|
||||
if gitSource.DiscoverPRFromOrigin != 0 {
|
||||
traits.CreateElement("io.jenkins.plugins.gitlabbranchsource.OriginMergeRequestDiscoveryTrait").
|
||||
CreateElement("strategyId").SetText(strconv.Itoa(gitSource.DiscoverPRFromOrigin))
|
||||
}
|
||||
if gitSource.DiscoverPRFromForks != nil {
|
||||
forkTrait := traits.CreateElement("io.jenkins.plugins.gitlabbranchsource.ForkMergeRequestDiscoveryTrait")
|
||||
forkTrait.CreateElement("strategyId").SetText(strconv.Itoa(gitSource.DiscoverPRFromForks.Strategy))
|
||||
trustClass := "io.jenkins.plugins.gitlabbranchsource.ForkMergeRequestDiscoveryTrait$"
|
||||
|
||||
if prTrust := PRDiscoverTrust(gitSource.DiscoverPRFromForks.Trust); prTrust.IsValid() {
|
||||
trustClass += prTrust.String()
|
||||
} else {
|
||||
klog.Warningf("invalid Gitlab discover PR trust value: %d", prTrust.Value())
|
||||
}
|
||||
forkTrait.CreateElement("trust").CreateAttr("class", trustClass)
|
||||
}
|
||||
if gitSource.CloneOption != nil {
|
||||
cloneExtension := traits.CreateElement("jenkins.plugins.git.traits.CloneOptionTrait").CreateElement("extension")
|
||||
cloneExtension.CreateAttr("class", "hudson.plugins.git.extensions.impl.CloneOption")
|
||||
cloneExtension.CreateElement("shallow").SetText(strconv.FormatBool(gitSource.CloneOption.Shallow))
|
||||
cloneExtension.CreateElement("noTags").SetText(strconv.FormatBool(false))
|
||||
cloneExtension.CreateElement("honorRefspec").SetText(strconv.FormatBool(true))
|
||||
cloneExtension.CreateElement("reference")
|
||||
if gitSource.CloneOption.Timeout >= 0 {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(gitSource.CloneOption.Timeout))
|
||||
} else {
|
||||
cloneExtension.CreateElement("timeout").SetText(strconv.Itoa(10))
|
||||
}
|
||||
|
||||
if gitSource.CloneOption.Depth >= 0 {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(gitSource.CloneOption.Depth))
|
||||
} else {
|
||||
cloneExtension.CreateElement("depth").SetText(strconv.Itoa(1))
|
||||
}
|
||||
}
|
||||
if gitSource.RegexFilter != "" {
|
||||
regexTraits := traits.CreateElement("jenkins.scm.impl.trait.RegexSCMHeadFilterTrait")
|
||||
regexTraits.CreateAttr("plugin", "scm-api")
|
||||
regexTraits.CreateElement("regex").SetText(gitSource.RegexFilter)
|
||||
}
|
||||
//nolint:gosimple
|
||||
return
|
||||
}
|
||||
|
||||
func GetGitlabSourceFromEtree(source *etree.Element) (gitSource *devopsv1alpha3.GitlabSource) {
|
||||
gitSource = &devopsv1alpha3.GitlabSource{}
|
||||
if credential := source.SelectElement("credentialsId"); credential != nil {
|
||||
gitSource.CredentialId = credential.Text()
|
||||
}
|
||||
if serverName := source.SelectElement("serverName"); serverName != nil {
|
||||
gitSource.ServerName = serverName.Text()
|
||||
}
|
||||
if repoOwner := source.SelectElement("projectOwner"); repoOwner != nil {
|
||||
gitSource.Owner = repoOwner.Text()
|
||||
}
|
||||
if repository := source.SelectElement("projectPath"); repository != nil {
|
||||
gitSource.Repo = repository.Text()
|
||||
}
|
||||
traits := source.SelectElement("traits")
|
||||
if branchDiscoverTrait := traits.SelectElement(
|
||||
"io.jenkins.plugins.gitlabbranchsource.BranchDiscoveryTrait"); branchDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(branchDiscoverTrait.SelectElement("strategyId").Text())
|
||||
gitSource.DiscoverBranches = strategyId
|
||||
}
|
||||
if tagDiscoverTrait := traits.SelectElement(
|
||||
"io.jenkins.plugins.gitlabbranchsource.TagDiscoveryTrait"); tagDiscoverTrait != nil {
|
||||
gitSource.DiscoverTags = true
|
||||
}
|
||||
if originPRDiscoverTrait := traits.SelectElement(
|
||||
"io.jenkins.plugins.gitlabbranchsource.OriginMergeRequestDiscoveryTrait"); originPRDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(originPRDiscoverTrait.SelectElement("strategyId").Text())
|
||||
gitSource.DiscoverPRFromOrigin = strategyId
|
||||
}
|
||||
if forkPRDiscoverTrait := traits.SelectElement(
|
||||
"io.jenkins.plugins.gitlabbranchsource.ForkMergeRequestDiscoveryTrait"); forkPRDiscoverTrait != nil {
|
||||
strategyId, _ := strconv.Atoi(forkPRDiscoverTrait.SelectElement("strategyId").Text())
|
||||
trustClass := forkPRDiscoverTrait.SelectElement("trust").SelectAttr("class").Value
|
||||
trust := strings.Split(trustClass, "$")
|
||||
if prTrust := PRDiscoverTrust(1).ParseFromString(trust[1]); prTrust.IsValid() {
|
||||
gitSource.DiscoverPRFromForks = &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: strategyId,
|
||||
Trust: prTrust.Value(),
|
||||
}
|
||||
} else {
|
||||
klog.Warningf("invalid Gitlab discover PR trust value: %s", trust[1])
|
||||
}
|
||||
if cloneTrait := traits.SelectElement(
|
||||
"jenkins.plugins.git.traits.CloneOptionTrait"); cloneTrait != nil {
|
||||
if cloneExtension := cloneTrait.SelectElement(
|
||||
"extension"); cloneExtension != nil {
|
||||
gitSource.CloneOption = &devopsv1alpha3.GitCloneOption{}
|
||||
if value, err := strconv.ParseBool(cloneExtension.SelectElement("shallow").Text()); err == nil {
|
||||
gitSource.CloneOption.Shallow = value
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("timeout").Text(), 10, 32); err == nil {
|
||||
gitSource.CloneOption.Timeout = int(value)
|
||||
}
|
||||
if value, err := strconv.ParseInt(cloneExtension.SelectElement("depth").Text(), 10, 32); err == nil {
|
||||
gitSource.CloneOption.Depth = int(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if regexTrait := traits.SelectElement(
|
||||
"jenkins.scm.impl.trait.RegexSCMHeadFilterTrait"); regexTrait != nil {
|
||||
if regex := regexTrait.SelectElement("regex"); regex != nil {
|
||||
gitSource.RegexFilter = regex.Text()
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package internal
|
||||
|
||||
type PRDiscoverTrust int
|
||||
|
||||
const (
|
||||
PRDiscoverTrustMember PRDiscoverTrust = 1
|
||||
PRDiscoverTrustEveryone PRDiscoverTrust = 2
|
||||
PRDiscoverTrustPermission PRDiscoverTrust = 3
|
||||
PRDiscoverTrustNobody PRDiscoverTrust = 4
|
||||
PRDiscoverUnknown PRDiscoverTrust = -1
|
||||
)
|
||||
|
||||
func (p PRDiscoverTrust) Value() int {
|
||||
return int(p)
|
||||
}
|
||||
|
||||
func (p PRDiscoverTrust) String() string {
|
||||
switch p {
|
||||
case PRDiscoverTrustMember:
|
||||
return "TrustMembers"
|
||||
case PRDiscoverTrustEveryone:
|
||||
return "TrustEveryone"
|
||||
case PRDiscoverTrustPermission:
|
||||
return "TrustPermission"
|
||||
case PRDiscoverTrustNobody:
|
||||
return "TrustNobody"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p PRDiscoverTrust) ParseFromString(prTrust string) PRDiscoverTrust {
|
||||
switch prTrust {
|
||||
case "TrustMembers":
|
||||
return PRDiscoverTrustMember
|
||||
case "TrustEveryone":
|
||||
return PRDiscoverTrustEveryone
|
||||
case "TrustPermission":
|
||||
return PRDiscoverTrustPermission
|
||||
case "TrustNobody":
|
||||
return PRDiscoverTrustNobody
|
||||
default:
|
||||
return PRDiscoverUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// GitHub
|
||||
type GitHubPRDiscoverTrust int
|
||||
|
||||
const (
|
||||
GitHubPRDiscoverTrustContributors GitHubPRDiscoverTrust = 1
|
||||
)
|
||||
|
||||
func (p GitHubPRDiscoverTrust) Value() int {
|
||||
return int(p)
|
||||
}
|
||||
|
||||
func (p PRDiscoverTrust) IsValid() bool {
|
||||
return p.String() != ""
|
||||
}
|
||||
|
||||
func (p GitHubPRDiscoverTrust) String() string {
|
||||
switch p {
|
||||
case GitHubPRDiscoverTrustContributors:
|
||||
return "TrustContributors"
|
||||
default:
|
||||
return PRDiscoverTrust(p).String()
|
||||
}
|
||||
}
|
||||
|
||||
func (p GitHubPRDiscoverTrust) ParseFromString(prTrust string) GitHubPRDiscoverTrust {
|
||||
switch prTrust {
|
||||
case "TrustContributors":
|
||||
return GitHubPRDiscoverTrustContributors
|
||||
default:
|
||||
return GitHubPRDiscoverTrust(PRDiscoverTrust(p).ParseFromString(prTrust))
|
||||
}
|
||||
}
|
||||
|
||||
func (p GitHubPRDiscoverTrust) IsValid() bool {
|
||||
return PRDiscoverTrust(p).IsValid()
|
||||
}
|
||||
|
||||
// Bitbucket
|
||||
type BitbucketPRDiscoverTrust int
|
||||
|
||||
const (
|
||||
BitbucketPRDiscoverTrustEveryone BitbucketPRDiscoverTrust = 1
|
||||
BitbucketPRDiscoverTrustTeamForks BitbucketPRDiscoverTrust = 2
|
||||
BitbucketPRDiscoverTrustNobody BitbucketPRDiscoverTrust = 3
|
||||
)
|
||||
|
||||
func (p BitbucketPRDiscoverTrust) Value() int {
|
||||
return int(p)
|
||||
}
|
||||
|
||||
func (p BitbucketPRDiscoverTrust) IsValid() bool {
|
||||
return p.String() != ""
|
||||
}
|
||||
|
||||
func (p BitbucketPRDiscoverTrust) String() string {
|
||||
switch p {
|
||||
default:
|
||||
fallthrough
|
||||
case BitbucketPRDiscoverTrustEveryone:
|
||||
return "TrustEveryone"
|
||||
case BitbucketPRDiscoverTrustTeamForks:
|
||||
return "TrustTeamForks"
|
||||
case BitbucketPRDiscoverTrustNobody:
|
||||
return "TrustNobody"
|
||||
}
|
||||
}
|
||||
|
||||
func (p BitbucketPRDiscoverTrust) ParseFromString(prTrust string) BitbucketPRDiscoverTrust {
|
||||
switch prTrust {
|
||||
default:
|
||||
fallthrough
|
||||
case "TrustEveryone":
|
||||
return BitbucketPRDiscoverTrustEveryone
|
||||
case "TrustTeamForks":
|
||||
return BitbucketPRDiscoverTrustTeamForks
|
||||
case "TrustNobody":
|
||||
return BitbucketPRDiscoverTrustNobody
|
||||
}
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPRDiscoverTrust(t *testing.T) {
|
||||
assert.Equal(t, PRDiscoverTrust(1).String(), "TrustMembers")
|
||||
assert.Equal(t, PRDiscoverTrust(2).String(), "TrustEveryone")
|
||||
assert.Equal(t, PRDiscoverTrust(3).String(), "TrustPermission")
|
||||
assert.Equal(t, PRDiscoverTrust(4).String(), "TrustNobody")
|
||||
assert.Equal(t, PRDiscoverTrust(-1).IsValid(), false)
|
||||
assert.Equal(t, PRDiscoverTrust(1).Value(), 1)
|
||||
|
||||
assert.Equal(t, PRDiscoverTrust(1).ParseFromString("TrustMembers"), PRDiscoverTrustMember)
|
||||
assert.Equal(t, PRDiscoverTrust(1).ParseFromString("TrustEveryone"), PRDiscoverTrustEveryone)
|
||||
assert.Equal(t, PRDiscoverTrust(1).ParseFromString("TrustPermission"), PRDiscoverTrustPermission)
|
||||
assert.Equal(t, PRDiscoverTrust(1).ParseFromString("TrustNobody"), PRDiscoverTrustNobody)
|
||||
assert.Equal(t, PRDiscoverTrust(1).ParseFromString("fake").IsValid(), false)
|
||||
|
||||
// GitHub
|
||||
assert.Equal(t, GitHubPRDiscoverTrust(1).String(), "TrustContributors")
|
||||
assert.Equal(t, GitHubPRDiscoverTrust(2).String(), PRDiscoverTrust(2).String())
|
||||
assert.Equal(t, GitHubPRDiscoverTrust(1).Value(), 1)
|
||||
assert.Equal(t, GitHubPRDiscoverTrust(1).ParseFromString("TrustContributors"), GitHubPRDiscoverTrustContributors)
|
||||
assert.Equal(t, GitHubPRDiscoverTrust(1).ParseFromString("TrustEveryone").String(), "TrustEveryone")
|
||||
assert.Equal(t, GitHubPRDiscoverTrust(1).ParseFromString("fake").IsValid(), false)
|
||||
|
||||
// Bithucket
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(1).String(), "TrustEveryone")
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(2).String(), "TrustTeamForks")
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(3).String(), "TrustNobody")
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(3).Value(), 3)
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(-1).String(), "TrustEveryone")
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(1).ParseFromString("TrustEveryone"), BitbucketPRDiscoverTrustEveryone)
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(1).ParseFromString("TrustTeamForks"), BitbucketPRDiscoverTrustTeamForks)
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(1).ParseFromString("TrustNobody"), BitbucketPRDiscoverTrustNobody)
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(1).ParseFromString("fake"), BitbucketPRDiscoverTrustEveryone)
|
||||
assert.Equal(t, BitbucketPRDiscoverTrust(1).ParseFromString("TrustNobody").IsValid(), true)
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"github.com/beevik/etree"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
)
|
||||
|
||||
func AppendSvnSourceToEtree(source *etree.Element, svnSource *devopsv1alpha3.SvnSource) {
|
||||
if svnSource == nil {
|
||||
klog.Warning("please provide SVN source when the sourceType is SVN")
|
||||
return
|
||||
}
|
||||
source.CreateAttr("class", "jenkins.scm.impl.subversion.SubversionSCMSource")
|
||||
source.CreateAttr("plugin", "subversion")
|
||||
source.CreateElement("id").SetText(svnSource.ScmId)
|
||||
if svnSource.CredentialId != "" {
|
||||
source.CreateElement("credentialsId").SetText(svnSource.CredentialId)
|
||||
}
|
||||
if svnSource.Remote != "" {
|
||||
source.CreateElement("remoteBase").SetText(svnSource.Remote)
|
||||
}
|
||||
if svnSource.Includes != "" {
|
||||
source.CreateElement("includes").SetText(svnSource.Includes)
|
||||
}
|
||||
if svnSource.Excludes != "" {
|
||||
source.CreateElement("excludes").SetText(svnSource.Excludes)
|
||||
}
|
||||
//nolint:gosimple
|
||||
return
|
||||
}
|
||||
|
||||
func AppendSingleSvnSourceToEtree(source *etree.Element, svnSource *devopsv1alpha3.SingleSvnSource) {
|
||||
if svnSource == nil {
|
||||
klog.Warning("please provide SingleSvn source when the sourceType is SingleSvn")
|
||||
return
|
||||
}
|
||||
source.CreateAttr("class", "jenkins.scm.impl.SingleSCMSource")
|
||||
source.CreateAttr("plugin", "scm-api")
|
||||
source.CreateElement("id").SetText(svnSource.ScmId)
|
||||
source.CreateElement("name").SetText("master")
|
||||
|
||||
scm := source.CreateElement("scm")
|
||||
scm.CreateAttr("class", "hudson.scm.SubversionSCM")
|
||||
scm.CreateAttr("plugin", "subversion")
|
||||
|
||||
location := scm.CreateElement("locations").CreateElement("hudson.scm.SubversionSCM_-ModuleLocation")
|
||||
if svnSource.Remote != "" {
|
||||
location.CreateElement("remote").SetText(svnSource.Remote)
|
||||
}
|
||||
if svnSource.CredentialId != "" {
|
||||
location.CreateElement("credentialsId").SetText(svnSource.CredentialId)
|
||||
}
|
||||
location.CreateElement("local").SetText(".")
|
||||
location.CreateElement("depthOption").SetText("infinity")
|
||||
location.CreateElement("ignoreExternalsOption").SetText("true")
|
||||
location.CreateElement("cancelProcessOnExternalsFail").SetText("true")
|
||||
|
||||
source.CreateElement("excludedRegions")
|
||||
source.CreateElement("includedRegions")
|
||||
source.CreateElement("excludedUsers")
|
||||
source.CreateElement("excludedRevprop")
|
||||
source.CreateElement("excludedCommitMessages")
|
||||
source.CreateElement("workspaceUpdater").CreateAttr("class", "hudson.scm.subversion.UpdateUpdater")
|
||||
source.CreateElement("ignoreDirPropChanges").SetText("false")
|
||||
source.CreateElement("filterChangelog").SetText("false")
|
||||
source.CreateElement("quietOperation").SetText("true")
|
||||
//nolint:gosimple
|
||||
return
|
||||
}
|
||||
|
||||
func GetSingleSvnSourceFromEtree(source *etree.Element) *devopsv1alpha3.SingleSvnSource {
|
||||
var s devopsv1alpha3.SingleSvnSource
|
||||
if scm := source.SelectElement("scm"); scm != nil {
|
||||
if locations := scm.SelectElement("locations"); locations != nil {
|
||||
if moduleLocations := locations.SelectElement("hudson.scm.SubversionSCM_-ModuleLocation"); moduleLocations != nil {
|
||||
if remote := moduleLocations.SelectElement("remote"); remote != nil {
|
||||
s.Remote = remote.Text()
|
||||
}
|
||||
if credentialId := moduleLocations.SelectElement("credentialsId"); credentialId != nil {
|
||||
s.CredentialId = credentialId.Text()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
func GetSvnSourcefromEtree(source *etree.Element) *devopsv1alpha3.SvnSource {
|
||||
var s devopsv1alpha3.SvnSource
|
||||
if remote := source.SelectElement("remoteBase"); remote != nil {
|
||||
s.Remote = remote.Text()
|
||||
}
|
||||
|
||||
if credentialsId := source.SelectElement("credentialsId"); credentialsId != nil {
|
||||
s.CredentialId = credentialsId.Text()
|
||||
}
|
||||
|
||||
if includes := source.SelectElement("includes"); includes != nil {
|
||||
s.Includes = includes.Text()
|
||||
}
|
||||
|
||||
if excludes := source.SelectElement("excludes"); excludes != nil {
|
||||
s.Excludes = excludes.Text()
|
||||
}
|
||||
return &s
|
||||
}
|
||||
@@ -1,916 +0,0 @@
|
||||
// Copyright 2015 Vadim Kravcenko
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
// Gojenkins is a Jenkins Client in Go, that exposes the jenkins REST api in a more developer friendly way.
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
// Basic Authentication
|
||||
type BasicAuth struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
type Jenkins struct {
|
||||
Server string
|
||||
Version string
|
||||
Requester *Requester
|
||||
}
|
||||
|
||||
// Loggers
|
||||
var (
|
||||
Info *log.Logger
|
||||
Warning *log.Logger
|
||||
Error *log.Logger
|
||||
)
|
||||
|
||||
// Init Method. Should be called after creating a Jenkins Instance.
|
||||
// e.g jenkins := CreateJenkins("url").Init()
|
||||
// HTTP Client is set here, Connection to jenkins is tested here.
|
||||
func (j *Jenkins) Init() (*Jenkins, error) {
|
||||
j.initLoggers()
|
||||
|
||||
rsp, err := j.Requester.GetJSON("/", nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rsp.Body.Close()
|
||||
j.Version = rsp.Header.Get("X-Jenkins")
|
||||
//if j.Raw == nil {
|
||||
// return nil, errors.New("Connection Failed, Please verify that the host and credentials are correct.")
|
||||
//}
|
||||
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) initLoggers() {
|
||||
Info = log.New(os.Stdout,
|
||||
"INFO: ",
|
||||
log.Ldate|log.Ltime|log.Lshortfile)
|
||||
|
||||
Warning = log.New(os.Stdout,
|
||||
"WARNING: ",
|
||||
log.Ldate|log.Ltime|log.Lshortfile)
|
||||
|
||||
Error = log.New(os.Stderr,
|
||||
"ERROR: ",
|
||||
log.Ldate|log.Ltime|log.Lshortfile)
|
||||
}
|
||||
|
||||
// Create a new folder
|
||||
// This folder can be nested in other parent folders
|
||||
// Example: jenkins.CreateFolder("newFolder", "grandparentFolder", "parentFolder")
|
||||
func (j *Jenkins) CreateFolder(name, description string, parents ...string) (*Folder, error) {
|
||||
folderObj := &Folder{Jenkins: j, Raw: new(FolderResponse), Base: "/job/" + strings.Join(append(parents, name), "/job/")}
|
||||
folder, err := folderObj.Create(name, description)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return folder, nil
|
||||
}
|
||||
|
||||
// Create a new job in the folder
|
||||
// Example: jenkins.CreateJobInFolder("<config></config>", "newJobName", "myFolder", "parentFolder")
|
||||
func (j *Jenkins) CreateJobInFolder(config string, jobName string, parentIDs ...string) (*Job, error) {
|
||||
jobObj := Job{Jenkins: j, Raw: new(JobResponse), Base: "/job/" + strings.Join(append(parentIDs, jobName), "/job/")}
|
||||
qr := map[string]string{
|
||||
"name": jobName,
|
||||
}
|
||||
job, err := jobObj.Create(config, qr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// Create a new job from config File
|
||||
// Method takes XML string as first Parameter, and if the name is not specified in the config file
|
||||
// takes name as string as second Parameter
|
||||
// e.g jenkins.CreateJob("<config></config>","newJobName")
|
||||
func (j *Jenkins) CreateJob(config string, options ...interface{}) (*Job, error) {
|
||||
qr := make(map[string]string)
|
||||
if len(options) > 0 {
|
||||
qr["name"] = options[0].(string)
|
||||
} else {
|
||||
return nil, errors.New("Error Creating Job, job name is missing")
|
||||
}
|
||||
jobObj := Job{Jenkins: j, Raw: new(JobResponse), Base: "/job/" + qr["name"]}
|
||||
job, err := jobObj.Create(config, qr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// Rename a job.
|
||||
// First Parameter job old name, Second Parameter job new name.
|
||||
func (j *Jenkins) RenameJob(job string, name string) *Job {
|
||||
jobObj := Job{Jenkins: j, Raw: new(JobResponse), Base: "/job/" + job}
|
||||
jobObj.Rename(name)
|
||||
return &jobObj
|
||||
}
|
||||
|
||||
// Create a copy of a job.
|
||||
// First Parameter Name of the job to copy from, Second Parameter new job name.
|
||||
func (j *Jenkins) CopyJob(copyFrom string, newName string) (*Job, error) {
|
||||
job := Job{Jenkins: j, Raw: new(JobResponse), Base: "/job/" + copyFrom}
|
||||
_, err := job.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Copy(newName)
|
||||
}
|
||||
|
||||
// Delete a job.
|
||||
func (j *Jenkins) DeleteJob(name string, parentIDs ...string) (bool, error) {
|
||||
job := Job{Jenkins: j, Raw: new(JobResponse), Base: "/job/" + strings.Join(append(parentIDs, name), "/job/")}
|
||||
return job.Delete()
|
||||
}
|
||||
|
||||
// Invoke a job.
|
||||
// First Parameter job name, second Parameter is optional Build parameters.
|
||||
func (j *Jenkins) BuildJob(name string, options ...interface{}) (int64, error) {
|
||||
job := Job{Jenkins: j, Raw: new(JobResponse), Base: "/job/" + name}
|
||||
var params map[string]string
|
||||
if len(options) > 0 {
|
||||
params, _ = options[0].(map[string]string)
|
||||
}
|
||||
return job.InvokeSimple(params)
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBuild(jobName string, number int64) (*Build, error) {
|
||||
job, err := j.GetJob(jobName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
build, err := job.GetBuild(number)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return build, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetJob(id string, parentIDs ...string) (*Job, error) {
|
||||
job := Job{Jenkins: j, Raw: new(JobResponse), Base: "/job/" + strings.Join(append(parentIDs, id), "/job/")}
|
||||
status, err := job.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if status == 200 {
|
||||
return &job, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(status))
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetFolder(id string, parents ...string) (*Folder, error) {
|
||||
folder := Folder{Jenkins: j, Raw: new(FolderResponse), Base: "/job/" + strings.Join(append(parents, id), "/job/")}
|
||||
status, err := folder.Poll()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("trouble polling folder: %v", err)
|
||||
}
|
||||
if status == 200 {
|
||||
return &folder, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(status))
|
||||
}
|
||||
|
||||
// Get all builds Numbers and URLS for a specific job.
|
||||
// There are only build IDs here,
|
||||
// To get all the other info of the build use jenkins.GetBuild(job,buildNumber)
|
||||
// or job.GetBuild(buildNumber)
|
||||
|
||||
func (j *Jenkins) Poll() (int, error) {
|
||||
resp, err := j.Requester.GetJSON("/", nil, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return resp.StatusCode, nil
|
||||
}
|
||||
|
||||
// query roleName exist or not
|
||||
// if return roleName means exist
|
||||
func (j *Jenkins) GetGlobalRole(roleName string) (string, error) {
|
||||
roleResponse := &GlobalRoleResponse{
|
||||
RoleName: roleName,
|
||||
}
|
||||
stringResponse := ""
|
||||
response, err := j.Requester.Get("/role-strategy/strategy/getRole",
|
||||
&stringResponse,
|
||||
map[string]string{
|
||||
"roleName": roleName,
|
||||
"type": GLOBAL_ROLE,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return "", errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
if stringResponse == "{}" {
|
||||
return "", nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(stringResponse), roleResponse)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return roleResponse.RoleName, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetGlobalRoleHandler(roleName string) (*GlobalRole, error) {
|
||||
name, err := j.GetGlobalRole(roleName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
roleResponse := &GlobalRoleResponse{
|
||||
RoleName: name,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &GlobalRole{
|
||||
Jenkins: j,
|
||||
Raw: *roleResponse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// assign a global roleName to username(sid)
|
||||
func (j *Jenkins) AssignGlobalRole(roleName string, sid string) error {
|
||||
globalRole, err := j.GetGlobalRoleHandler(roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
param := map[string]string{
|
||||
"type": GLOBAL_ROLE,
|
||||
"roleName": globalRole.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/assignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unassign a global roleName to username(sid)
|
||||
func (j *Jenkins) UnAssignGlobalRole(roleName string, sid string) error {
|
||||
globalRole, err := j.GetGlobalRoleHandler(roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
param := map[string]string{
|
||||
"type": GLOBAL_ROLE,
|
||||
"roleName": globalRole.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/unassignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetProjectRole(roleName string) (*ProjectRole, error) {
|
||||
roleResponse := &ProjectRoleResponse{
|
||||
RoleName: roleName,
|
||||
}
|
||||
stringResponse := ""
|
||||
response, err := j.Requester.Get("/role-strategy/strategy/getRole",
|
||||
&stringResponse,
|
||||
map[string]string{
|
||||
"roleName": roleName,
|
||||
"type": PROJECT_ROLE,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
if stringResponse == "{}" {
|
||||
return nil, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(stringResponse), roleResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ProjectRole{
|
||||
Jenkins: j,
|
||||
Raw: *roleResponse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// assign a project roleName to username(sid)
|
||||
func (j *Jenkins) AssignProjectRole(roleName string, sid string) error {
|
||||
projectRole, err := j.GetProjectRole(roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
param := map[string]string{
|
||||
"type": PROJECT_ROLE,
|
||||
"roleName": projectRole.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/assignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unassign a project roleName to username(sid)
|
||||
func (j *Jenkins) UnAssignProjectRole(roleName string, sid string) error {
|
||||
projectRole, err := j.GetProjectRole(roleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
param := map[string]string{
|
||||
"type": PROJECT_ROLE,
|
||||
"roleName": projectRole.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/unassignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// add a global roleName
|
||||
func (j *Jenkins) AddGlobalRole(roleName string, ids devops.GlobalPermissionIds, overwrite bool) error {
|
||||
var idArray []string
|
||||
values := reflect.ValueOf(ids)
|
||||
for i := 0; i < values.NumField(); i++ {
|
||||
field := values.Field(i)
|
||||
if field.Bool() {
|
||||
idArray = append(idArray, values.Type().Field(i).Tag.Get("json"))
|
||||
}
|
||||
}
|
||||
param := map[string]string{
|
||||
"roleName": roleName,
|
||||
"type": GLOBAL_ROLE,
|
||||
"permissionIds": strings.Join(idArray, ","),
|
||||
"overwrite": strconv.FormatBool(overwrite),
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/addRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete roleName from the project
|
||||
func (j *Jenkins) DeleteProjectRoles(roleName ...string) error {
|
||||
responseString := ""
|
||||
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/removeRoles", nil, &responseString, map[string]string{
|
||||
"type": PROJECT_ROLE,
|
||||
"roleNames": strings.Join(roleName, ","),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
fmt.Println(responseString)
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// add roleName for project
|
||||
func (j *Jenkins) AddProjectRole(roleName string, pattern string, ids devops.ProjectPermissionIds, overwrite bool) error {
|
||||
var idArray []string
|
||||
values := reflect.ValueOf(ids)
|
||||
for i := 0; i < values.NumField(); i++ {
|
||||
field := values.Field(i)
|
||||
if field.Bool() {
|
||||
idArray = append(idArray, values.Type().Field(i).Tag.Get("json"))
|
||||
}
|
||||
}
|
||||
param := map[string]string{
|
||||
"roleName": roleName,
|
||||
"type": PROJECT_ROLE,
|
||||
"permissionIds": strings.Join(idArray, ","),
|
||||
"overwrite": strconv.FormatBool(overwrite),
|
||||
"pattern": pattern,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/addRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) DeleteUserInProject(username string) error {
|
||||
param := map[string]string{
|
||||
"type": PROJECT_ROLE,
|
||||
"sid": username,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Requester.Post("/role-strategy/strategy/deleteSid", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetPipeline(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.Pipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetPipelineUrl, projectName, pipelineName),
|
||||
}
|
||||
res, err := PipelineOjb.GetPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) ListPipelines(httpParameters *devops.HttpParameters) (*devops.PipelineList, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: ListPipelinesUrl + httpParameters.Url.RawQuery,
|
||||
}
|
||||
res, err := PipelineOjb.ListPipelines()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetPipelineRun(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) (*devops.PipelineRun, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetPipelineRunUrl, projectName, pipelineName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetPipelineRun()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) ListPipelineRuns(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.PipelineRunList, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(ListPipelineRunUrl, projectName, pipelineName) + httpParameters.Url.RawQuery,
|
||||
}
|
||||
res, err := PipelineOjb.ListPipelineRuns()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) StopPipeline(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) (*devops.StopPipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(StopPipelineUrl, projectName, pipelineName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.StopPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) ReplayPipeline(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) (*devops.ReplayPipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(ReplayPipelineUrl+httpParameters.Url.RawQuery, projectName, pipelineName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.ReplayPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) RunPipeline(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.RunPipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(RunPipelineUrl+httpParameters.Url.RawQuery, projectName, pipelineName),
|
||||
}
|
||||
res, err := PipelineOjb.RunPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetArtifacts(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) ([]devops.Artifacts, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetArtifactsUrl+httpParameters.Url.RawQuery, projectName, pipelineName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetArtifacts()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetRunLog(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetRunLogUrl+httpParameters.Url.RawQuery, projectName, pipelineName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetRunLog()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetStepLog(projectName, pipelineName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, http.Header, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetStepLogUrl+httpParameters.Url.RawQuery, projectName, pipelineName, runId, nodeId, stepId),
|
||||
}
|
||||
res, header, err := PipelineOjb.GetStepLog()
|
||||
return res, header, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetNodeSteps(projectName, pipelineName, runId, nodeId string, httpParameters *devops.HttpParameters) ([]devops.NodeSteps, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetNodeStepsUrl+httpParameters.Url.RawQuery, projectName, pipelineName, runId, nodeId),
|
||||
}
|
||||
res, err := PipelineOjb.GetNodeSteps()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetPipelineRunNodes(projectName, pipelineName, runId string, httpParameters *devops.HttpParameters) ([]devops.PipelineRunNodes, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetPipelineRunNodesUrl+httpParameters.Url.RawQuery, projectName, pipelineName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetPipelineRunNodes()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) SubmitInputStep(projectName, pipelineName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(SubmitInputStepUrl+httpParameters.Url.RawQuery, projectName, pipelineName, runId, nodeId, stepId),
|
||||
}
|
||||
res, err := PipelineOjb.SubmitInputStep()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBranchPipeline(projectName, pipelineName, branchName string, httpParameters *devops.HttpParameters) (*devops.BranchPipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetBranchPipelineUrl, projectName, pipelineName, branchName),
|
||||
}
|
||||
res, err := PipelineOjb.GetBranchPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBranchPipelineRun(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) (*devops.PipelineRun, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetBranchPipelineRunUrl, projectName, pipelineName, branchName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetBranchPipelineRun()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) StopBranchPipeline(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) (*devops.StopPipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(StopBranchPipelineUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.StopBranchPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) ReplayBranchPipeline(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) (*devops.ReplayPipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(ReplayBranchPipelineUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.ReplayBranchPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) RunBranchPipeline(projectName, pipelineName, branchName string, httpParameters *devops.HttpParameters) (*devops.RunPipeline, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(RunBranchPipelineUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName),
|
||||
}
|
||||
res, err := PipelineOjb.RunBranchPipeline()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBranchArtifacts(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) ([]devops.Artifacts, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetBranchArtifactsUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetBranchArtifacts()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBranchRunLog(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetBranchRunLogUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetBranchRunLog()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBranchStepLog(projectName, pipelineName, branchName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, http.Header, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetBranchStepLogUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId, nodeId, stepId),
|
||||
}
|
||||
res, header, err := PipelineOjb.GetBranchStepLog()
|
||||
return res, header, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBranchNodeSteps(projectName, pipelineName, branchName, runId, nodeId string, httpParameters *devops.HttpParameters) ([]devops.NodeSteps, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetBranchNodeStepsUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId, nodeId),
|
||||
}
|
||||
res, err := PipelineOjb.GetBranchNodeSteps()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetBranchPipelineRunNodes(projectName, pipelineName, branchName, runId string, httpParameters *devops.HttpParameters) ([]devops.BranchPipelineRunNodes, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetBranchPipeRunNodesUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId),
|
||||
}
|
||||
res, err := PipelineOjb.GetBranchPipelineRunNodes()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) SubmitBranchInputStep(projectName, pipelineName, branchName, runId, nodeId, stepId string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(CheckBranchPipelineUrl+httpParameters.Url.RawQuery, projectName, pipelineName, branchName, runId, nodeId, stepId),
|
||||
}
|
||||
res, err := PipelineOjb.SubmitBranchInputStep()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetPipelineBranch(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.PipelineBranch, error) {
|
||||
path := fmt.Sprintf(GetPipeBranchUrl, projectName, pipelineName) + httpParameters.Url.RawQuery
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: path,
|
||||
}
|
||||
res, err := PipelineOjb.GetPipelineBranch()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) ScanBranch(projectName, pipelineName string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(ScanBranchUrl+httpParameters.Url.RawQuery, projectName, pipelineName),
|
||||
}
|
||||
res, err := PipelineOjb.ScanBranch()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetConsoleLog(projectName, pipelineName string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetConsoleLogUrl+httpParameters.Url.RawQuery, projectName, pipelineName),
|
||||
}
|
||||
res, err := PipelineOjb.GetConsoleLog()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetCrumb(httpParameters *devops.HttpParameters) (*devops.Crumb, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: GetCrumbUrl,
|
||||
}
|
||||
res, err := PipelineOjb.GetCrumb()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetSCMServers(scmId string, httpParameters *devops.HttpParameters) ([]devops.SCMServer, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetSCMServersUrl, scmId),
|
||||
}
|
||||
res, err := PipelineOjb.GetSCMServers()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetSCMOrg(scmId string, httpParameters *devops.HttpParameters) ([]devops.SCMOrg, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetSCMOrgUrl+httpParameters.Url.RawQuery, scmId),
|
||||
}
|
||||
res, err := PipelineOjb.GetSCMOrg()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetOrgRepo(scmId, organizationId string, httpParameters *devops.HttpParameters) (devops.OrgRepo, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(GetOrgRepoUrl+httpParameters.Url.RawQuery, scmId, organizationId),
|
||||
}
|
||||
res, err := PipelineOjb.GetOrgRepo()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) CreateSCMServers(scmId string, httpParameters *devops.HttpParameters) (*devops.SCMServer, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(CreateSCMServersUrl, scmId),
|
||||
}
|
||||
res, err := PipelineOjb.CreateSCMServers()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetNotifyCommit(httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: GetNotifyCommitUrl + httpParameters.Url.RawQuery,
|
||||
}
|
||||
res, err := PipelineOjb.GetNotifyCommit()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) GithubWebhook(httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: GithubWebhookUrl + httpParameters.Url.RawQuery,
|
||||
}
|
||||
res, err := PipelineOjb.GithubWebhook()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) Validate(scmId string, httpParameters *devops.HttpParameters) (*devops.Validates, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(ValidateUrl, scmId),
|
||||
}
|
||||
res, err := PipelineOjb.Validate()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) CheckScriptCompile(projectName, pipelineName string, httpParameters *devops.HttpParameters) (*devops.CheckScript, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: fmt.Sprintf(CheckScriptCompileUrl, projectName, pipelineName),
|
||||
}
|
||||
res, err := PipelineOjb.CheckScriptCompile()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) CheckCron(projectName string, httpParameters *devops.HttpParameters) (*devops.CheckCronRes, error) {
|
||||
var cron = new(devops.CronData)
|
||||
var reader io.ReadCloser
|
||||
var path string
|
||||
|
||||
reader = httpParameters.Body
|
||||
//nolint:ineffassign,staticcheck
|
||||
cronData, err := io.ReadAll(reader)
|
||||
err = json.Unmarshal(cronData, cron)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := url.Values{
|
||||
"value": []string{cron.Cron},
|
||||
}
|
||||
|
||||
if cron.PipelineName != "" {
|
||||
path = fmt.Sprintf(CheckPipelienCronUrl, projectName, cron.PipelineName, query.Encode())
|
||||
} else {
|
||||
path = fmt.Sprintf(CheckCronUrl, projectName, query.Encode())
|
||||
}
|
||||
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: path,
|
||||
}
|
||||
|
||||
res, err := PipelineOjb.CheckCron()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) ToJenkinsfile(httpParameters *devops.HttpParameters) (*devops.ResJenkinsfile, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: ToJenkinsfileUrl,
|
||||
}
|
||||
res, err := PipelineOjb.ToJenkinsfile()
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (j *Jenkins) ToJson(httpParameters *devops.HttpParameters) (map[string]interface{}, error) {
|
||||
PipelineOjb := &Pipeline{
|
||||
HttpParameters: httpParameters,
|
||||
Jenkins: j,
|
||||
Path: ToJsonUrl,
|
||||
}
|
||||
res, err := PipelineOjb.ToJson()
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Creates a new Jenkins Instance
|
||||
// Optional parameters are: client, username, password
|
||||
// After creating an instance call init method.
|
||||
func CreateJenkins(client *http.Client, base string, maxConnection int, auth ...interface{}) *Jenkins {
|
||||
j := &Jenkins{}
|
||||
//nolint:gosimple
|
||||
if strings.HasSuffix(base, "/") {
|
||||
base = base[:len(base)-1]
|
||||
}
|
||||
j.Server = base
|
||||
j.Requester = &Requester{Base: base, SslVerify: true, Client: client, connControl: make(chan struct{}, maxConnection)}
|
||||
if j.Requester.Client == nil {
|
||||
j.Requester.Client = http.DefaultClient
|
||||
}
|
||||
if len(auth) == 2 {
|
||||
j.Requester.BasicAuth = &BasicAuth{Username: auth[0].(string), Password: auth[1].(string)}
|
||||
}
|
||||
return j
|
||||
}
|
||||
@@ -1,517 +0,0 @@
|
||||
// Copyright 2015 Vadim Kravcenko
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
type Job struct {
|
||||
Raw *JobResponse
|
||||
Jenkins *Jenkins
|
||||
Base string
|
||||
}
|
||||
|
||||
type JobBuild struct {
|
||||
Number int64
|
||||
URL string
|
||||
}
|
||||
|
||||
type JobBuildStatus struct {
|
||||
Number int64
|
||||
Building bool
|
||||
Result string
|
||||
}
|
||||
|
||||
type InnerJob struct {
|
||||
Name string `json:"name"`
|
||||
Url string `json:"url"`
|
||||
Color string `json:"color"`
|
||||
}
|
||||
|
||||
type ParameterDefinition struct {
|
||||
DefaultParameterValue struct {
|
||||
Name string `json:"name"`
|
||||
Value interface{} `json:"value"`
|
||||
} `json:"defaultParameterValue"`
|
||||
Description string `json:"description"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type JobResponse struct {
|
||||
Class string `json:"_class"`
|
||||
Actions []devops.GeneralAction
|
||||
Buildable bool `json:"buildable"`
|
||||
Builds []JobBuild
|
||||
Color string `json:"color"`
|
||||
ConcurrentBuild bool `json:"concurrentBuild"`
|
||||
Description string `json:"description"`
|
||||
DisplayName string `json:"displayName"`
|
||||
DisplayNameOrNull interface{} `json:"displayNameOrNull"`
|
||||
DownstreamProjects []InnerJob `json:"downstreamProjects"`
|
||||
FirstBuild JobBuild
|
||||
HealthReport []struct {
|
||||
Description string `json:"description"`
|
||||
IconClassName string `json:"iconClassName"`
|
||||
IconUrl string `json:"iconUrl"`
|
||||
Score int64 `json:"score"`
|
||||
} `json:"healthReport"`
|
||||
InQueue bool `json:"inQueue"`
|
||||
KeepDependencies bool `json:"keepDependencies"`
|
||||
LastBuild JobBuild `json:"lastBuild"`
|
||||
LastCompletedBuild JobBuild `json:"lastCompletedBuild"`
|
||||
LastFailedBuild JobBuild `json:"lastFailedBuild"`
|
||||
LastStableBuild JobBuild `json:"lastStableBuild"`
|
||||
LastSuccessfulBuild JobBuild `json:"lastSuccessfulBuild"`
|
||||
LastUnstableBuild JobBuild `json:"lastUnstableBuild"`
|
||||
LastUnsuccessfulBuild JobBuild `json:"lastUnsuccessfulBuild"`
|
||||
Name string `json:"name"`
|
||||
SubJobs []InnerJob `json:"subJobs"`
|
||||
NextBuildNumber int64 `json:"nextBuildNumber"`
|
||||
Property []struct {
|
||||
ParameterDefinitions []ParameterDefinition `json:"parameterDefinitions"`
|
||||
} `json:"property"`
|
||||
QueueItem interface{} `json:"queueItem"`
|
||||
Scm struct{} `json:"scm"`
|
||||
UpstreamProjects []InnerJob `json:"upstreamProjects"`
|
||||
URL string `json:"url"`
|
||||
Jobs []InnerJob `json:"jobs"`
|
||||
}
|
||||
|
||||
func (j *Job) parentBase() string {
|
||||
return j.Base[:strings.LastIndex(j.Base, "/job/")]
|
||||
}
|
||||
|
||||
type History struct {
|
||||
BuildNumber int
|
||||
BuildStatus string
|
||||
BuildTimestamp int64
|
||||
}
|
||||
|
||||
func (j *Job) GetName() string {
|
||||
return j.Raw.Name
|
||||
}
|
||||
|
||||
func (j *Job) GetDescription() string {
|
||||
return j.Raw.Description
|
||||
}
|
||||
|
||||
func (j *Job) GetDetails() *JobResponse {
|
||||
return j.Raw
|
||||
}
|
||||
|
||||
func (j *Job) GetBuild(id int64) (*Build, error) {
|
||||
build := Build{Jenkins: j.Jenkins, Job: j, Raw: new(devops.Build), Depth: 1, Base: "/job/" + j.GetName() + "/" + strconv.FormatInt(id, 10)}
|
||||
status, err := build.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if status == 200 {
|
||||
return &build, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(status))
|
||||
}
|
||||
|
||||
func (j *Job) getBuildByType(buildType string) (*Build, error) {
|
||||
allowed := map[string]JobBuild{
|
||||
"lastStableBuild": j.Raw.LastStableBuild,
|
||||
"lastSuccessfulBuild": j.Raw.LastSuccessfulBuild,
|
||||
"lastBuild": j.Raw.LastBuild,
|
||||
"lastCompletedBuild": j.Raw.LastCompletedBuild,
|
||||
"firstBuild": j.Raw.FirstBuild,
|
||||
"lastFailedBuild": j.Raw.LastFailedBuild,
|
||||
}
|
||||
number := ""
|
||||
if val, ok := allowed[buildType]; ok {
|
||||
number = strconv.FormatInt(val.Number, 10)
|
||||
} else {
|
||||
panic("No Such Build")
|
||||
}
|
||||
build := Build{
|
||||
Jenkins: j.Jenkins,
|
||||
Depth: 1,
|
||||
Job: j,
|
||||
Raw: new(devops.Build),
|
||||
Base: j.Base + "/" + number}
|
||||
status, err := build.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if status == 200 {
|
||||
return &build, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(status))
|
||||
}
|
||||
|
||||
func (j *Job) GetLastSuccessfulBuild() (*Build, error) {
|
||||
return j.getBuildByType("lastSuccessfulBuild")
|
||||
}
|
||||
|
||||
func (j *Job) GetFirstBuild() (*Build, error) {
|
||||
return j.getBuildByType("firstBuild")
|
||||
}
|
||||
|
||||
func (j *Job) GetLastBuild() (*Build, error) {
|
||||
return j.getBuildByType("lastBuild")
|
||||
}
|
||||
|
||||
func (j *Job) GetLastStableBuild() (*Build, error) {
|
||||
return j.getBuildByType("lastStableBuild")
|
||||
}
|
||||
|
||||
func (j *Job) GetLastFailedBuild() (*Build, error) {
|
||||
return j.getBuildByType("lastFailedBuild")
|
||||
}
|
||||
|
||||
func (j *Job) GetLastCompletedBuild() (*Build, error) {
|
||||
return j.getBuildByType("lastCompletedBuild")
|
||||
}
|
||||
|
||||
// Returns All Builds with Number and URL
|
||||
func (j *Job) GetAllBuildIds() ([]JobBuild, error) {
|
||||
var buildsResp struct {
|
||||
Builds []JobBuild `json:"allBuilds"`
|
||||
}
|
||||
rsp, err := j.Jenkins.Requester.GetJSON(j.Base, &buildsResp, map[string]string{"tree": "allBuilds[number,url]"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rsp.Body.Close()
|
||||
return buildsResp.Builds, nil
|
||||
}
|
||||
|
||||
func (j *Job) GetAllBuildStatus() ([]JobBuildStatus, error) {
|
||||
var buildsResp struct {
|
||||
Builds []JobBuildStatus `json:"allBuilds"`
|
||||
}
|
||||
resp, err := j.Jenkins.Requester.GetJSON(j.Base, &buildsResp, map[string]string{"tree": "allBuilds[number,building,result]"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return buildsResp.Builds, nil
|
||||
}
|
||||
|
||||
func (j *Job) GetUpstreamJobsMetadata() []InnerJob {
|
||||
return j.Raw.UpstreamProjects
|
||||
}
|
||||
|
||||
func (j *Job) GetDownstreamJobsMetadata() []InnerJob {
|
||||
return j.Raw.DownstreamProjects
|
||||
}
|
||||
|
||||
func (j *Job) GetInnerJobsMetadata() []InnerJob {
|
||||
return j.Raw.Jobs
|
||||
}
|
||||
|
||||
func (j *Job) GetUpstreamJobs() ([]*Job, error) {
|
||||
jobs := make([]*Job, len(j.Raw.UpstreamProjects))
|
||||
for i, job := range j.Raw.UpstreamProjects {
|
||||
ji, err := j.Jenkins.GetJob(job.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jobs[i] = ji
|
||||
}
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
func (j *Job) GetDownstreamJobs() ([]*Job, error) {
|
||||
jobs := make([]*Job, len(j.Raw.DownstreamProjects))
|
||||
for i, job := range j.Raw.DownstreamProjects {
|
||||
ji, err := j.Jenkins.GetJob(job.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jobs[i] = ji
|
||||
}
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
func (j *Job) GetInnerJob(id string) (*Job, error) {
|
||||
job := Job{Jenkins: j.Jenkins, Raw: new(JobResponse), Base: j.Base + "/job/" + id}
|
||||
status, err := job.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if status == 200 {
|
||||
return &job, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(status))
|
||||
}
|
||||
|
||||
func (j *Job) GetInnerJobs() ([]*Job, error) {
|
||||
jobs := make([]*Job, len(j.Raw.Jobs))
|
||||
for i, job := range j.Raw.Jobs {
|
||||
ji, err := j.GetInnerJob(job.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jobs[i] = ji
|
||||
}
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
func (j *Job) Enable() (bool, error) {
|
||||
resp, err := j.Jenkins.Requester.Post(j.Base+"/enable", nil, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return false, errors.New(strconv.Itoa(resp.StatusCode))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (j *Job) Disable() (bool, error) {
|
||||
resp, err := j.Jenkins.Requester.Post(j.Base+"/disable", nil, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return false, errors.New(strconv.Itoa(resp.StatusCode))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (j *Job) Delete() (bool, error) {
|
||||
resp, err := j.Jenkins.Requester.Post(j.Base+"/doDelete", nil, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return false, errors.New(strconv.Itoa(resp.StatusCode))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (j *Job) Rename(name string) (bool, error) {
|
||||
data := url.Values{}
|
||||
data.Set("newName", name)
|
||||
resp, err := j.Jenkins.Requester.Post(j.Base+"/doRename", bytes.NewBufferString(data.Encode()), nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
j.Base = "/job/" + name
|
||||
j.Poll()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (j *Job) Create(config string, qr ...interface{}) (*Job, error) {
|
||||
var querystring map[string]string
|
||||
if len(qr) > 0 {
|
||||
querystring = qr[0].(map[string]string)
|
||||
}
|
||||
resp, err := j.Jenkins.Requester.PostXML(j.parentBase()+"/createItem", config, j.Raw, querystring)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == 200 {
|
||||
j.Poll()
|
||||
return j, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(resp.StatusCode))
|
||||
}
|
||||
|
||||
func (j *Job) Copy(destinationName string) (*Job, error) {
|
||||
qr := map[string]string{"name": destinationName, "from": j.GetName(), "mode": "copy"}
|
||||
resp, err := j.Jenkins.Requester.Post(j.parentBase()+"/createItem", nil, nil, qr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == 200 {
|
||||
newJob := &Job{Jenkins: j.Jenkins, Raw: new(JobResponse), Base: "/job/" + destinationName}
|
||||
_, err := newJob.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newJob, nil
|
||||
}
|
||||
return nil, errors.New(strconv.Itoa(resp.StatusCode))
|
||||
}
|
||||
|
||||
func (j *Job) UpdateConfig(config string) error {
|
||||
|
||||
var querystring map[string]string
|
||||
|
||||
resp, err := j.Jenkins.Requester.PostXML(j.Base+"/config.xml", config, nil, querystring)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == 200 {
|
||||
j.Poll()
|
||||
return nil
|
||||
}
|
||||
return errors.New(strconv.Itoa(resp.StatusCode))
|
||||
|
||||
}
|
||||
|
||||
func (j *Job) GetConfig() (string, error) {
|
||||
var data string
|
||||
resp, err := j.Jenkins.Requester.GetXML(j.Base+"/config.xml", &data, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp.Body.Close()
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (j *Job) GetParameters() ([]ParameterDefinition, error) {
|
||||
_, err := j.Poll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var parameters []ParameterDefinition
|
||||
for _, property := range j.Raw.Property {
|
||||
parameters = append(parameters, property.ParameterDefinitions...)
|
||||
}
|
||||
return parameters, nil
|
||||
}
|
||||
|
||||
func (j *Job) IsQueued() (bool, error) {
|
||||
if _, err := j.Poll(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return j.Raw.InQueue, nil
|
||||
}
|
||||
|
||||
func (j *Job) IsRunning() (bool, error) {
|
||||
if _, err := j.Poll(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
lastBuild, err := j.GetLastBuild()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return lastBuild.IsRunning(), nil
|
||||
}
|
||||
|
||||
func (j *Job) IsEnabled() (bool, error) {
|
||||
if _, err := j.Poll(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return j.Raw.Color != "disabled", nil
|
||||
}
|
||||
|
||||
func (j *Job) HasQueuedBuild() {
|
||||
panic("Not Implemented yet")
|
||||
}
|
||||
|
||||
func (j *Job) InvokeSimple(params map[string]string) (int64, error) {
|
||||
endpoint := "/build"
|
||||
parameters, err := j.GetParameters()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(parameters) > 0 {
|
||||
endpoint = "/buildWithParameters"
|
||||
}
|
||||
data := url.Values{}
|
||||
for k, v := range params {
|
||||
data.Set(k, v)
|
||||
}
|
||||
resp, err := j.Jenkins.Requester.Post(j.Base+endpoint, bytes.NewBufferString(data.Encode()), nil, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 && resp.StatusCode != 201 {
|
||||
return 0, errors.New("Could not invoke job " + j.GetName())
|
||||
}
|
||||
|
||||
location := resp.Header.Get("Location")
|
||||
if location == "" {
|
||||
return 0, errors.New("Don't have key \"Location\" in response of header")
|
||||
}
|
||||
|
||||
u, err := url.Parse(location)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
number, err := strconv.ParseInt(path.Base(u.Path), 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return number, nil
|
||||
}
|
||||
|
||||
func (j *Job) Invoke(files []string, skipIfRunning bool, params map[string]string, cause string, securityToken string) (bool, error) {
|
||||
isRunning, err := j.IsRunning()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if isRunning && skipIfRunning {
|
||||
return false, fmt.Errorf("Will not request new build because %s is already running", j.GetName())
|
||||
}
|
||||
|
||||
base := "/build"
|
||||
|
||||
// If parameters are specified - url is /builWithParameters
|
||||
if params != nil {
|
||||
base = "/buildWithParameters"
|
||||
} else {
|
||||
params = make(map[string]string)
|
||||
}
|
||||
|
||||
// If files are specified - url is /build
|
||||
if files != nil {
|
||||
base = "/build"
|
||||
}
|
||||
reqParams := map[string]string{}
|
||||
buildParams := map[string]string{}
|
||||
if securityToken != "" {
|
||||
reqParams["token"] = securityToken
|
||||
}
|
||||
|
||||
buildParams["json"] = string(makeJson(params))
|
||||
b, _ := json.Marshal(buildParams)
|
||||
resp, err := j.Jenkins.Requester.PostFiles(j.Base+base, bytes.NewBuffer(b), nil, reqParams, files)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == 200 || resp.StatusCode == 201 {
|
||||
return true, nil
|
||||
}
|
||||
return false, errors.New(strconv.Itoa(resp.StatusCode))
|
||||
}
|
||||
|
||||
func (j *Job) Poll() (int, error) {
|
||||
response, err := j.Jenkins.Requester.GetJSON(j.Base, j.Raw, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response.Body.Close()
|
||||
return response.StatusCode, nil
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Host string `json:",omitempty" yaml:"host,omitempty" description:"Jenkins service host address"`
|
||||
Username string `json:",omitempty" yaml:"username,omitempty" description:"Jenkins admin username"`
|
||||
Password string `json:",omitempty" yaml:"password,omitempty" description:"Jenkins admin password"`
|
||||
MaxConnections int `json:"maxConnections,omitempty" yaml:"maxConnections,omitempty" description:"Maximum connections allowed to connect to Jenkins"`
|
||||
Endpoint string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" description:"The endpoint of the ks-devops apiserver"`
|
||||
}
|
||||
|
||||
// NewDevopsOptions returns a `zero` instance
|
||||
func NewDevopsOptions() *Options {
|
||||
return &Options{
|
||||
Host: "",
|
||||
Username: "",
|
||||
Password: "",
|
||||
MaxConnections: 100,
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyTo apply configuration to another options
|
||||
func (s *Options) ApplyTo(options *Options) {
|
||||
if s.Host != "" {
|
||||
reflectutils.Override(options, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate check if there is misconfiguration in options
|
||||
func (s *Options) Validate() []error {
|
||||
var errors []error
|
||||
|
||||
// devops is not needed, ignore rest options
|
||||
if s.Host == "" {
|
||||
return errors
|
||||
}
|
||||
|
||||
if s.Username == "" || s.Password == "" {
|
||||
errors = append(errors, fmt.Errorf("jenkins's username or password is empty"))
|
||||
}
|
||||
|
||||
if s.MaxConnections <= 0 {
|
||||
errors = append(errors, fmt.Errorf("jenkins's maximum connections should be greater than 0"))
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.StringVar(&s.Host, "jenkins-host", c.Host, ""+
|
||||
"Jenkins service host address. If left blank, means Jenkins "+
|
||||
"is unnecessary.")
|
||||
|
||||
fs.StringVar(&s.Username, "jenkins-username", c.Username, ""+
|
||||
"Username for access to Jenkins service. Leave it blank if there isn't any.")
|
||||
|
||||
fs.StringVar(&s.Password, "jenkins-password", c.Password, ""+
|
||||
"Password for access to Jenkins service, used pair with username.")
|
||||
|
||||
fs.IntVar(&s.MaxConnections, "jenkins-max-connections", c.MaxConnections, ""+
|
||||
"Maximum allowed connections to Jenkins. ")
|
||||
|
||||
}
|
||||
@@ -1,870 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/PuerkitoBio/goquery"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
type Pipeline struct {
|
||||
HttpParameters *devops.HttpParameters
|
||||
Jenkins *Jenkins
|
||||
Path string
|
||||
}
|
||||
|
||||
const (
|
||||
GetPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/"
|
||||
ListPipelinesUrl = "/blue/rest/search/?"
|
||||
GetPipelineRunUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/"
|
||||
ListPipelineRunUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/?"
|
||||
StopPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/stop/?"
|
||||
ReplayPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/replay/"
|
||||
RunPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/"
|
||||
GetArtifactsUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/artifacts/?"
|
||||
GetRunLogUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/log/?"
|
||||
GetStepLogUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/nodes/%s/steps/%s/log/?"
|
||||
GetPipelineRunNodesUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/nodes/?"
|
||||
SubmitInputStepUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/nodes/%s/steps/%s/"
|
||||
GetNodeStepsUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/runs/%s/nodes/%s/steps/?"
|
||||
|
||||
GetBranchPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/"
|
||||
GetBranchPipelineRunUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/"
|
||||
StopBranchPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/stop/?"
|
||||
ReplayBranchPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/replay/"
|
||||
RunBranchPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/"
|
||||
GetBranchArtifactsUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/artifacts/?"
|
||||
GetBranchRunLogUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/log/?"
|
||||
GetBranchStepLogUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/nodes/%s/steps/%s/log/?"
|
||||
GetBranchNodeStepsUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/nodes/%s/steps/?"
|
||||
GetBranchPipeRunNodesUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/nodes/?"
|
||||
CheckBranchPipelineUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/%s/runs/%s/nodes/%s/steps/%s/"
|
||||
GetPipeBranchUrl = "/blue/rest/organizations/jenkins/pipelines/%s/pipelines/%s/branches/?"
|
||||
ScanBranchUrl = "/job/%s/job/%s/build?"
|
||||
GetConsoleLogUrl = "/job/%s/job/%s/indexing/consoleText"
|
||||
GetCrumbUrl = "/crumbIssuer/api/json/"
|
||||
GetSCMServersUrl = "/blue/rest/organizations/jenkins/scm/%s/servers/"
|
||||
GetSCMOrgUrl = "/blue/rest/organizations/jenkins/scm/%s/organizations/?"
|
||||
GetOrgRepoUrl = "/blue/rest/organizations/jenkins/scm/%s/organizations/%s/repositories/?"
|
||||
CreateSCMServersUrl = "/blue/rest/organizations/jenkins/scm/%s/servers/"
|
||||
ValidateUrl = "/blue/rest/organizations/jenkins/scm/%s/validate"
|
||||
|
||||
GetNotifyCommitUrl = "/git/notifyCommit/?"
|
||||
GithubWebhookUrl = "/github-webhook/"
|
||||
CheckScriptCompileUrl = "/job/%s/job/%s/descriptorByName/org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition/checkScriptCompile"
|
||||
|
||||
CheckPipelienCronUrl = "/job/%s/job/%s/descriptorByName/hudson.triggers.TimerTrigger/checkSpec?%s"
|
||||
CheckCronUrl = "/job/%s/descriptorByName/hudson.triggers.TimerTrigger/checkSpec?%s"
|
||||
ToJenkinsfileUrl = "/pipeline-model-converter/toJenkinsfile"
|
||||
ToJsonUrl = "/pipeline-model-converter/toJson"
|
||||
|
||||
cronJobLayout = "Monday, January 2, 2006 15:04:05 PM"
|
||||
)
|
||||
|
||||
func (p *Pipeline) GetPipeline() (*devops.Pipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var pipeline devops.Pipeline
|
||||
|
||||
err = json.Unmarshal(res, &pipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return &pipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) ListPipelines() (*devops.PipelineList, error) {
|
||||
res, _, err := p.Jenkins.SendPureRequestWithHeaderResp(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if jErr, ok := err.(*JkError); ok {
|
||||
switch jErr.Code {
|
||||
case 404:
|
||||
err = fmt.Errorf("please check if there're any Jenkins plugins issues exist")
|
||||
default:
|
||||
err = fmt.Errorf("please check if Jenkins is running well")
|
||||
}
|
||||
klog.Errorf("API '%s' request response code is '%d'", p.Path, jErr.Code)
|
||||
} else {
|
||||
err = fmt.Errorf("unknow errors happened when communicate with Jenkins")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
count, err := p.searchPipelineCount()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pipelienList, err := devops.UnmarshalPipeline(count, res)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return pipelienList, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) searchPipelineCount() (int, error) {
|
||||
query, _ := ParseJenkinsQuery(p.HttpParameters.Url.RawQuery)
|
||||
query.Set("start", "0")
|
||||
query.Set("limit", "1000")
|
||||
query.Set("depth", "-1")
|
||||
|
||||
formatUrl := ListPipelinesUrl + query.Encode()
|
||||
|
||||
res, err := p.Jenkins.SendPureRequest(formatUrl, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return 0, err
|
||||
}
|
||||
var pipelines []devops.Pipeline
|
||||
err = json.Unmarshal(res, &pipelines)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return 0, err
|
||||
}
|
||||
return len(pipelines), nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetPipelineRun() (*devops.PipelineRun, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pipelineRun devops.PipelineRun
|
||||
err = json.Unmarshal(res, &pipelineRun)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return &pipelineRun, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) ListPipelineRuns() (*devops.PipelineRunList, error) {
|
||||
// prefer to use listPipelineRunsByRemotePaging once the corresponding issues from BlueOcean fixed
|
||||
return p.listPipelineRunsByLocalPaging()
|
||||
}
|
||||
|
||||
// listPipelineRunsByRemotePaging get the pipeline runs with pagination by remote (Jenkins BlueOcean plugin)
|
||||
// get the pagination information from the server side is better than the local side, but the API has some issues
|
||||
// see also https://github.com/kubesphere/kubesphere/issues/3507
|
||||
//
|
||||
//nolint:unused
|
||||
func (p *Pipeline) listPipelineRunsByRemotePaging() (*devops.PipelineRunList, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pipelineRunList devops.PipelineRunList
|
||||
err = json.Unmarshal(res, &pipelineRunList.Items)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
total, err := p.searchPipelineRunsCount()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
pipelineRunList.Total = total
|
||||
return &pipelineRunList, err
|
||||
}
|
||||
|
||||
// listPipelineRunsByLocalPaging should be a temporary solution
|
||||
// see also https://github.com/kubesphere/kubesphere/issues/3507
|
||||
func (p *Pipeline) listPipelineRunsByLocalPaging() (runList *devops.PipelineRunList, err error) {
|
||||
desiredStart, desiredLimit := p.parsePaging()
|
||||
|
||||
var pageUrl *url.URL // get all Pipeline runs
|
||||
if pageUrl, err = p.resetPaging(0, 10000); err != nil {
|
||||
return
|
||||
}
|
||||
res, err := p.Jenkins.SendPureRequest(pageUrl.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runList = &devops.PipelineRunList{
|
||||
Items: make([]devops.PipelineRun, 0),
|
||||
}
|
||||
if err = json.Unmarshal(res, &runList.Items); err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// set the total count number
|
||||
runList.Total = len(runList.Items)
|
||||
|
||||
// keep the desired data/
|
||||
if desiredStart+1 >= runList.Total {
|
||||
// beyond the boundary, return an empty
|
||||
return
|
||||
}
|
||||
|
||||
endIndex := runList.Total
|
||||
if desiredStart+desiredLimit < endIndex {
|
||||
endIndex = desiredStart + desiredLimit
|
||||
}
|
||||
runList.Items = runList.Items[desiredStart:endIndex]
|
||||
return
|
||||
}
|
||||
|
||||
// resetPaging reset the paging setting from request, support start, limit
|
||||
func (p *Pipeline) resetPaging(start, limit int) (path *url.URL, err error) {
|
||||
query, _ := ParseJenkinsQuery(p.HttpParameters.Url.RawQuery)
|
||||
query.Set("start", strconv.Itoa(start))
|
||||
query.Set("limit", strconv.Itoa(limit))
|
||||
p.HttpParameters.Url.RawQuery = query.Encode()
|
||||
path, err = url.Parse(p.Path)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Pipeline) parsePaging() (start, limit int) {
|
||||
query, _ := ParseJenkinsQuery(p.HttpParameters.Url.RawQuery)
|
||||
start, _ = strconv.Atoi(query.Get("start"))
|
||||
limit, _ = strconv.Atoi(query.Get("limit"))
|
||||
return
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (p *Pipeline) searchPipelineRunsCount() (int, error) {
|
||||
u, err := p.resetPaging(0, 1000)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
res, err := p.Jenkins.SendPureRequest(u.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return 0, err
|
||||
}
|
||||
var runs []devops.PipelineRun
|
||||
err = json.Unmarshal(res, &runs)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return 0, err
|
||||
}
|
||||
return len(runs), nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) StopPipeline() (*devops.StopPipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var stopPipeline devops.StopPipeline
|
||||
err = json.Unmarshal(res, &stopPipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stopPipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) ReplayPipeline() (*devops.ReplayPipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var replayPipeline devops.ReplayPipeline
|
||||
err = json.Unmarshal(res, &replayPipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &replayPipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) RunPipeline() (*devops.RunPipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var runPipeline devops.RunPipeline
|
||||
err = json.Unmarshal(res, &runPipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &runPipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetArtifacts() ([]devops.Artifacts, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var artifacts []devops.Artifacts
|
||||
err = json.Unmarshal(res, &artifacts)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return artifacts, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetRunLog() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetStepLog() ([]byte, http.Header, error) {
|
||||
res, header, err := p.Jenkins.SendPureRequestWithHeaderResp(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, header, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetNodeSteps() ([]devops.NodeSteps, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
var nodeSteps []devops.NodeSteps
|
||||
err = json.Unmarshal(res, &nodeSteps)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nodeSteps, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetPipelineRunNodes() ([]devops.PipelineRunNodes, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pipelineRunNodes []devops.PipelineRunNodes
|
||||
err = json.Unmarshal(res, &pipelineRunNodes)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pipelineRunNodes, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) SubmitInputStep() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetBranchPipeline() (*devops.BranchPipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var branchPipeline devops.BranchPipeline
|
||||
err = json.Unmarshal(res, &branchPipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &branchPipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetBranchPipelineRun() (*devops.PipelineRun, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var branchPipelineRun devops.PipelineRun
|
||||
err = json.Unmarshal(res, &branchPipelineRun)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &branchPipelineRun, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) StopBranchPipeline() (*devops.StopPipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var branchStopPipeline devops.StopPipeline
|
||||
err = json.Unmarshal(res, &branchStopPipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &branchStopPipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) ReplayBranchPipeline() (*devops.ReplayPipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var branchReplayPipeline devops.ReplayPipeline
|
||||
err = json.Unmarshal(res, &branchReplayPipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &branchReplayPipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) RunBranchPipeline() (*devops.RunPipeline, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var branchRunPipeline devops.RunPipeline
|
||||
err = json.Unmarshal(res, &branchRunPipeline)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &branchRunPipeline, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetBranchArtifacts() ([]devops.Artifacts, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var artifacts []devops.Artifacts
|
||||
err = json.Unmarshal(res, &artifacts)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return artifacts, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetBranchRunLog() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetBranchStepLog() ([]byte, http.Header, error) {
|
||||
res, header, err := p.Jenkins.SendPureRequestWithHeaderResp(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, header, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetBranchNodeSteps() ([]devops.NodeSteps, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var branchNodeSteps []devops.NodeSteps
|
||||
err = json.Unmarshal(res, &branchNodeSteps)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return branchNodeSteps, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetBranchPipelineRunNodes() ([]devops.BranchPipelineRunNodes, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var branchPipelineRunNodes []devops.BranchPipelineRunNodes
|
||||
err = json.Unmarshal(res, &branchPipelineRunNodes)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return branchPipelineRunNodes, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) SubmitBranchInputStep() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetPipelineBranch() (*devops.PipelineBranch, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var pipelineBranch devops.PipelineBranch
|
||||
err = json.Unmarshal(res, &pipelineBranch)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pipelineBranch, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) ScanBranch() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetConsoleLog() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetCrumb() (*devops.Crumb, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var crumb devops.Crumb
|
||||
err = json.Unmarshal(res, &crumb)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &crumb, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetSCMServers() ([]devops.SCMServer, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var SCMServer []devops.SCMServer
|
||||
err = json.Unmarshal(res, &SCMServer)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SCMServer, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetSCMOrg() ([]devops.SCMOrg, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var SCMOrg []devops.SCMOrg
|
||||
err = json.Unmarshal(res, &SCMOrg)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return SCMOrg, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetOrgRepo() (devops.OrgRepo, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return devops.OrgRepo{}, err
|
||||
}
|
||||
var OrgRepo devops.OrgRepo
|
||||
err = json.Unmarshal(res, &OrgRepo)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return devops.OrgRepo{}, err
|
||||
}
|
||||
|
||||
return OrgRepo, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) CreateSCMServers() (*devops.SCMServer, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var SCMServer devops.SCMServer
|
||||
err = json.Unmarshal(res, &SCMServer)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &SCMServer, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GetNotifyCommit() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) GithubWebhook() ([]byte, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) Validate() (*devops.Validates, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var validates devops.Validates
|
||||
err = json.Unmarshal(res, &validates)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &validates, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) CheckScriptCompile() (*devops.CheckScript, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Jenkins will return different struct according to different results.
|
||||
var checkScript devops.CheckScript
|
||||
ok := json.Unmarshal(res, &checkScript)
|
||||
if ok != nil {
|
||||
var resJson []*devops.CheckScript
|
||||
err := json.Unmarshal(res, &resJson)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resJson[0], nil
|
||||
}
|
||||
|
||||
return &checkScript, err
|
||||
|
||||
}
|
||||
|
||||
func (p *Pipeline) CheckCron() (*devops.CheckCronRes, error) {
|
||||
var res = new(devops.CheckCronRes)
|
||||
|
||||
reqJenkins := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
Header: p.HttpParameters.Header,
|
||||
}
|
||||
if cronServiceURL, err := url.Parse(p.Jenkins.Server + p.Path); err != nil {
|
||||
klog.Errorf(fmt.Sprintf("cannot parse Jenkins cronService URL, error: %#v", err))
|
||||
return interanlErrorMessage(), err
|
||||
} else {
|
||||
reqJenkins.URL = cronServiceURL
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
reqJenkins.SetBasicAuth(p.Jenkins.Requester.BasicAuth.Username, p.Jenkins.Requester.BasicAuth.Password)
|
||||
resp, err := client.Do(reqJenkins)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return interanlErrorMessage(), err
|
||||
}
|
||||
|
||||
var responseText string
|
||||
if resp != nil {
|
||||
if responseData, err := getRespBody(resp); err == nil {
|
||||
responseText = string(responseData)
|
||||
} else {
|
||||
klog.Error(err)
|
||||
return interanlErrorMessage(), fmt.Errorf("cannot get the response body from the Jenkins cron service request, %#v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
|
||||
statusCode := resp.StatusCode
|
||||
if statusCode != http.StatusOK && statusCode != http.StatusBadRequest {
|
||||
// the response body is meaningless for the users, but it's useful for a contributor
|
||||
klog.Errorf("cron service from Jenkins is unavailable, error response: %v, status code: %d", responseText, statusCode)
|
||||
return interanlErrorMessage(), err
|
||||
}
|
||||
}
|
||||
klog.V(8).Infof("response text: %s", responseText)
|
||||
|
||||
doc, err := goquery.NewDocumentFromReader(bytes.NewReader([]byte(responseText)))
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return interanlErrorMessage(), err
|
||||
}
|
||||
// it gives a message which located in <div>...</div> when the status code is 200
|
||||
doc.Find("div").Each(func(i int, selection *goquery.Selection) {
|
||||
res.Message = selection.Text()
|
||||
res.Result, _ = selection.Attr("class")
|
||||
})
|
||||
// it gives a message which located in <pre>...</pre> when the status code is 400
|
||||
doc.Find("pre").Each(func(i int, selection *goquery.Selection) {
|
||||
res.Message = selection.Text()
|
||||
res.Result = "error"
|
||||
})
|
||||
if res.Result == "ok" {
|
||||
res.LastTime, res.NextTime, err = parseCronJobTime(res.Message)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return interanlErrorMessage(), err
|
||||
}
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
func interanlErrorMessage() *devops.CheckCronRes {
|
||||
return &devops.CheckCronRes{
|
||||
Result: "error",
|
||||
Message: "internal errors happened, get more details by checking ks-apiserver log output",
|
||||
}
|
||||
}
|
||||
|
||||
func parseCronJobTime(msg string) (string, string, error) {
|
||||
|
||||
times := strings.Split(msg, ";")
|
||||
|
||||
lastTmp := strings.Split(times[0], " ")
|
||||
lastCount := len(lastTmp)
|
||||
lastTmp = lastTmp[lastCount-7 : lastCount-1]
|
||||
lastTime := strings.Join(lastTmp, " ")
|
||||
lastUinx, err := time.Parse(cronJobLayout, lastTime)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return "", "", err
|
||||
}
|
||||
last := lastUinx.Format(time.RFC3339)
|
||||
|
||||
nextTmp := strings.Split(times[1], " ")
|
||||
nextCount := len(nextTmp)
|
||||
nextTmp = nextTmp[nextCount-7 : nextCount-1]
|
||||
nextTime := strings.Join(nextTmp, " ")
|
||||
nextUinx, err := time.Parse(cronJobLayout, nextTime)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return "", "", err
|
||||
}
|
||||
next := nextUinx.Format(time.RFC3339)
|
||||
|
||||
return last, next, nil
|
||||
}
|
||||
|
||||
func (p *Pipeline) ToJenkinsfile() (*devops.ResJenkinsfile, error) {
|
||||
res, err := p.Jenkins.SendPureRequest(p.Path, p.HttpParameters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var jenkinsfile devops.ResJenkinsfile
|
||||
err = json.Unmarshal(res, &jenkinsfile)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &jenkinsfile, err
|
||||
}
|
||||
|
||||
func (p *Pipeline) ToJson() (result map[string]interface{}, err error) {
|
||||
var data []byte
|
||||
if data, err = p.Jenkins.SendPureRequest(p.Path, p.HttpParameters); err == nil {
|
||||
err = json.Unmarshal(data, &result)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1,486 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/beevik/etree"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins/internal"
|
||||
)
|
||||
|
||||
func replaceXmlVersion(config, oldVersion, targetVersion string) string {
|
||||
lines := strings.Split(config, "\n")
|
||||
lines[0] = strings.Replace(lines[0], oldVersion, targetVersion, -1)
|
||||
output := strings.Join(lines, "\n")
|
||||
return output
|
||||
}
|
||||
|
||||
func createPipelineConfigXml(pipeline *devopsv1alpha3.NoScmPipeline) (string, error) {
|
||||
doc := etree.NewDocument()
|
||||
xmlString := `<?xml version='1.0' encoding='UTF-8'?>
|
||||
<flow-definition plugin="workflow-job">
|
||||
<actions>
|
||||
<org.jenkinsci.plugins.pipeline.modeldefinition.actions.DeclarativeJobAction plugin="pipeline-model-definition"/>
|
||||
<org.jenkinsci.plugins.pipeline.modeldefinition.actions.DeclarativeJobPropertyTrackerAction plugin="pipeline-model-definition">
|
||||
<jobProperties/>
|
||||
<triggers/>
|
||||
<parameters/>
|
||||
<options/>
|
||||
</org.jenkinsci.plugins.pipeline.modeldefinition.actions.DeclarativeJobPropertyTrackerAction>
|
||||
</actions>
|
||||
</flow-definition>
|
||||
`
|
||||
doc.ReadFromString(xmlString)
|
||||
flow := doc.SelectElement("flow-definition")
|
||||
flow.CreateElement("description").SetText(pipeline.Description)
|
||||
properties := flow.CreateElement("properties")
|
||||
|
||||
if pipeline.DisableConcurrent {
|
||||
properties.CreateElement("org.jenkinsci.plugins.workflow.job.properties.DisableConcurrentBuildsJobProperty")
|
||||
}
|
||||
|
||||
if pipeline.Discarder != nil {
|
||||
discarder := properties.CreateElement("jenkins.model.BuildDiscarderProperty")
|
||||
strategy := discarder.CreateElement("strategy")
|
||||
strategy.CreateAttr("class", "hudson.tasks.LogRotator")
|
||||
strategy.CreateElement("daysToKeep").SetText(pipeline.Discarder.DaysToKeep)
|
||||
strategy.CreateElement("numToKeep").SetText(pipeline.Discarder.NumToKeep)
|
||||
strategy.CreateElement("artifactDaysToKeep").SetText("-1")
|
||||
strategy.CreateElement("artifactNumToKeep").SetText("-1")
|
||||
}
|
||||
if pipeline.Parameters != nil {
|
||||
appendParametersToEtree(properties, pipeline.Parameters)
|
||||
}
|
||||
|
||||
if pipeline.TimerTrigger != nil {
|
||||
triggers := properties.
|
||||
CreateElement("org.jenkinsci.plugins.workflow.job.properties.PipelineTriggersJobProperty").
|
||||
CreateElement("triggers")
|
||||
triggers.CreateElement("hudson.triggers.TimerTrigger").CreateElement("spec").SetText(pipeline.TimerTrigger.Cron)
|
||||
}
|
||||
|
||||
pipelineDefine := flow.CreateElement("definition")
|
||||
pipelineDefine.CreateAttr("class", "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition")
|
||||
pipelineDefine.CreateAttr("plugin", "workflow-cps")
|
||||
pipelineDefine.CreateElement("script").SetText(pipeline.Jenkinsfile)
|
||||
|
||||
pipelineDefine.CreateElement("sandbox").SetText("true")
|
||||
|
||||
flow.CreateElement("triggers")
|
||||
|
||||
if pipeline.RemoteTrigger != nil {
|
||||
flow.CreateElement("authToken").SetText(pipeline.RemoteTrigger.Token)
|
||||
}
|
||||
flow.CreateElement("disabled").SetText("false")
|
||||
|
||||
doc.Indent(2)
|
||||
stringXml, err := doc.WriteToString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return replaceXmlVersion(stringXml, "1.0", "1.1"), err
|
||||
}
|
||||
|
||||
func parsePipelineConfigXml(config string) (*devopsv1alpha3.NoScmPipeline, error) {
|
||||
pipeline := &devopsv1alpha3.NoScmPipeline{}
|
||||
config = replaceXmlVersion(config, "1.1", "1.0")
|
||||
doc := etree.NewDocument()
|
||||
err := doc.ReadFromString(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flow := doc.SelectElement("flow-definition")
|
||||
if flow == nil {
|
||||
return nil, fmt.Errorf("can not find pipeline definition")
|
||||
}
|
||||
pipeline.Description = flow.SelectElement("description").Text()
|
||||
|
||||
properties := flow.SelectElement("properties")
|
||||
if properties.
|
||||
SelectElement(
|
||||
"org.jenkinsci.plugins.workflow.job.properties.DisableConcurrentBuildsJobProperty") != nil {
|
||||
pipeline.DisableConcurrent = true
|
||||
}
|
||||
if properties.SelectElement("jenkins.model.BuildDiscarderProperty") != nil {
|
||||
strategy := properties.
|
||||
SelectElement("jenkins.model.BuildDiscarderProperty").
|
||||
SelectElement("strategy")
|
||||
pipeline.Discarder = &devopsv1alpha3.DiscarderProperty{
|
||||
DaysToKeep: strategy.SelectElement("daysToKeep").Text(),
|
||||
NumToKeep: strategy.SelectElement("numToKeep").Text(),
|
||||
}
|
||||
}
|
||||
|
||||
pipeline.Parameters = getParametersfromEtree(properties)
|
||||
if len(pipeline.Parameters) == 0 {
|
||||
pipeline.Parameters = nil
|
||||
}
|
||||
|
||||
if triggerProperty := properties.
|
||||
SelectElement(
|
||||
"org.jenkinsci.plugins.workflow.job.properties.PipelineTriggersJobProperty"); triggerProperty != nil {
|
||||
triggers := triggerProperty.SelectElement("triggers")
|
||||
if timerTrigger := triggers.SelectElement("hudson.triggers.TimerTrigger"); timerTrigger != nil {
|
||||
pipeline.TimerTrigger = &devopsv1alpha3.TimerTrigger{
|
||||
Cron: timerTrigger.SelectElement("spec").Text(),
|
||||
}
|
||||
}
|
||||
}
|
||||
if authToken := flow.SelectElement("authToken"); authToken != nil {
|
||||
pipeline.RemoteTrigger = &devopsv1alpha3.RemoteTrigger{
|
||||
Token: authToken.Text(),
|
||||
}
|
||||
}
|
||||
if definition := flow.SelectElement("definition"); definition != nil {
|
||||
if script := definition.SelectElement("script"); script != nil {
|
||||
pipeline.Jenkinsfile = script.Text()
|
||||
}
|
||||
}
|
||||
return pipeline, nil
|
||||
}
|
||||
|
||||
func appendParametersToEtree(properties *etree.Element, parameters []devopsv1alpha3.Parameter) {
|
||||
parameterDefinitions := properties.CreateElement("hudson.model.ParametersDefinitionProperty").
|
||||
CreateElement("parameterDefinitions")
|
||||
for _, parameter := range parameters {
|
||||
for className, typeName := range ParameterTypeMap {
|
||||
if typeName == parameter.Type {
|
||||
paramDefine := parameterDefinitions.CreateElement(className)
|
||||
paramDefine.CreateElement("name").SetText(parameter.Name)
|
||||
paramDefine.CreateElement("description").SetText(parameter.Description)
|
||||
switch parameter.Type {
|
||||
case "choice":
|
||||
choices := paramDefine.CreateElement("choices")
|
||||
choices.CreateAttr("class", "java.util.Arrays$ArrayList")
|
||||
// see also https://github.com/kubesphere/kubesphere/issues/3430
|
||||
a := choices.CreateElement("a")
|
||||
a.CreateAttr("class", "string-array")
|
||||
choiceValues := strings.Split(parameter.DefaultValue, "\n")
|
||||
for _, choiceValue := range choiceValues {
|
||||
a.CreateElement("string").SetText(choiceValue)
|
||||
}
|
||||
case "file":
|
||||
break
|
||||
default:
|
||||
paramDefine.CreateElement("defaultValue").SetText(parameter.DefaultValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getParametersfromEtree(properties *etree.Element) []devopsv1alpha3.Parameter {
|
||||
var parameters []devopsv1alpha3.Parameter
|
||||
if parametersProperty := properties.SelectElement("hudson.model.ParametersDefinitionProperty"); parametersProperty != nil {
|
||||
params := parametersProperty.SelectElement("parameterDefinitions").ChildElements()
|
||||
for _, param := range params {
|
||||
switch param.Tag {
|
||||
case "hudson.model.StringParameterDefinition":
|
||||
parameters = append(parameters, devopsv1alpha3.Parameter{
|
||||
Name: param.SelectElement("name").Text(),
|
||||
Description: param.SelectElement("description").Text(),
|
||||
DefaultValue: param.SelectElement("defaultValue").Text(),
|
||||
Type: ParameterTypeMap["hudson.model.StringParameterDefinition"],
|
||||
})
|
||||
case "hudson.model.BooleanParameterDefinition":
|
||||
parameters = append(parameters, devopsv1alpha3.Parameter{
|
||||
Name: param.SelectElement("name").Text(),
|
||||
Description: param.SelectElement("description").Text(),
|
||||
DefaultValue: param.SelectElement("defaultValue").Text(),
|
||||
Type: ParameterTypeMap["hudson.model.BooleanParameterDefinition"],
|
||||
})
|
||||
case "hudson.model.TextParameterDefinition":
|
||||
parameters = append(parameters, devopsv1alpha3.Parameter{
|
||||
Name: param.SelectElement("name").Text(),
|
||||
Description: param.SelectElement("description").Text(),
|
||||
DefaultValue: param.SelectElement("defaultValue").Text(),
|
||||
Type: ParameterTypeMap["hudson.model.TextParameterDefinition"],
|
||||
})
|
||||
case "hudson.model.FileParameterDefinition":
|
||||
parameters = append(parameters, devopsv1alpha3.Parameter{
|
||||
Name: param.SelectElement("name").Text(),
|
||||
Description: param.SelectElement("description").Text(),
|
||||
Type: ParameterTypeMap["hudson.model.FileParameterDefinition"],
|
||||
})
|
||||
case "hudson.model.PasswordParameterDefinition":
|
||||
parameters = append(parameters, devopsv1alpha3.Parameter{
|
||||
Name: param.SelectElement("name").Text(),
|
||||
Description: param.SelectElement("description").Text(),
|
||||
DefaultValue: param.SelectElement("name").Text(),
|
||||
Type: ParameterTypeMap["hudson.model.PasswordParameterDefinition"],
|
||||
})
|
||||
case "hudson.model.ChoiceParameterDefinition":
|
||||
choiceParameter := devopsv1alpha3.Parameter{
|
||||
Name: param.SelectElement("name").Text(),
|
||||
Description: param.SelectElement("description").Text(),
|
||||
Type: ParameterTypeMap["hudson.model.ChoiceParameterDefinition"],
|
||||
}
|
||||
choicesEle := param.SelectElement("choices")
|
||||
var choices []*etree.Element
|
||||
// the child element is a in the simple pipeline, the child is string list in the multi-branch pipeline
|
||||
// see also https://github.com/kubesphere/kubesphere/issues/3430
|
||||
choiceAnchor := choicesEle.SelectElement("a")
|
||||
if choiceAnchor == nil {
|
||||
choices = choicesEle.SelectElements("string")
|
||||
} else {
|
||||
choices = choiceAnchor.SelectElements("string")
|
||||
}
|
||||
for _, choice := range choices {
|
||||
choiceParameter.DefaultValue += fmt.Sprintf("%s\n", choice.Text())
|
||||
}
|
||||
choiceParameter.DefaultValue = strings.TrimSpace(choiceParameter.DefaultValue)
|
||||
parameters = append(parameters, choiceParameter)
|
||||
default:
|
||||
parameters = append(parameters, devopsv1alpha3.Parameter{
|
||||
Name: param.SelectElement("name").Text(),
|
||||
Description: param.SelectElement("description").Text(),
|
||||
DefaultValue: "unknown",
|
||||
Type: param.Tag,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return parameters
|
||||
}
|
||||
|
||||
func appendMultiBranchJobTriggerToEtree(properties *etree.Element, s *devopsv1alpha3.MultiBranchJobTrigger) {
|
||||
triggerProperty := properties.CreateElement("org.jenkinsci.plugins.workflow.multibranch.PipelineTriggerProperty")
|
||||
triggerProperty.CreateAttr("plugin", "multibranch-action-triggers")
|
||||
triggerProperty.CreateElement("createActionJobsToTrigger").SetText(s.CreateActionJobsToTrigger)
|
||||
triggerProperty.CreateElement("deleteActionJobsToTrigger").SetText(s.DeleteActionJobsToTrigger)
|
||||
//nolint:gosimple
|
||||
return
|
||||
}
|
||||
|
||||
func getMultiBranchJobTriggerfromEtree(properties *etree.Element) *devopsv1alpha3.MultiBranchJobTrigger {
|
||||
var s devopsv1alpha3.MultiBranchJobTrigger
|
||||
triggerProperty := properties.SelectElement("org.jenkinsci.plugins.workflow.multibranch.PipelineTriggerProperty")
|
||||
if triggerProperty != nil {
|
||||
s.CreateActionJobsToTrigger = triggerProperty.SelectElement("createActionJobsToTrigger").Text()
|
||||
s.DeleteActionJobsToTrigger = triggerProperty.SelectElement("deleteActionJobsToTrigger").Text()
|
||||
}
|
||||
return &s
|
||||
}
|
||||
func createMultiBranchPipelineConfigXml(projectName string, pipeline *devopsv1alpha3.MultiBranchPipeline) (string, error) {
|
||||
doc := etree.NewDocument()
|
||||
xmlString := `
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject plugin="workflow-multibranch">
|
||||
<actions/>
|
||||
<properties>
|
||||
<org.jenkinsci.plugins.pipeline.modeldefinition.config.FolderConfig plugin="pipeline-model-definition">
|
||||
<dockerLabel></dockerLabel>
|
||||
<registry plugin="docker-commons"/>
|
||||
</org.jenkinsci.plugins.pipeline.modeldefinition.config.FolderConfig>
|
||||
</properties>
|
||||
<folderViews class="jenkins.branch.MultiBranchProjectViewHolder" plugin="branch-api">
|
||||
<owner class="org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject" reference="../.."/>
|
||||
</folderViews>
|
||||
<healthMetrics>
|
||||
<com.cloudbees.hudson.plugins.folder.health.WorstChildHealthMetric plugin="cloudbees-folder">
|
||||
<nonRecursive>false</nonRecursive>
|
||||
</com.cloudbees.hudson.plugins.folder.health.WorstChildHealthMetric>
|
||||
</healthMetrics>
|
||||
<icon class="jenkins.branch.MetadataActionFolderIcon" plugin="branch-api">
|
||||
<owner class="org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject" reference="../.."/>
|
||||
</icon>
|
||||
</org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject>`
|
||||
err := doc.ReadFromString(xmlString)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
project := doc.SelectElement("org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject")
|
||||
project.CreateElement("description").SetText(pipeline.Description)
|
||||
|
||||
if pipeline.MultiBranchJobTrigger != nil {
|
||||
properties := project.SelectElement("properties")
|
||||
appendMultiBranchJobTriggerToEtree(properties, pipeline.MultiBranchJobTrigger)
|
||||
}
|
||||
|
||||
if pipeline.Discarder != nil {
|
||||
discarder := project.CreateElement("orphanedItemStrategy")
|
||||
discarder.CreateAttr("class", "com.cloudbees.hudson.plugins.folder.computed.DefaultOrphanedItemStrategy")
|
||||
discarder.CreateAttr("plugin", "cloudbees-folder")
|
||||
discarder.CreateElement("pruneDeadBranches").SetText("true")
|
||||
discarder.CreateElement("daysToKeep").SetText(pipeline.Discarder.DaysToKeep)
|
||||
discarder.CreateElement("numToKeep").SetText(pipeline.Discarder.NumToKeep)
|
||||
}
|
||||
|
||||
triggers := project.CreateElement("triggers")
|
||||
if pipeline.TimerTrigger != nil {
|
||||
timeTrigger := triggers.CreateElement(
|
||||
"com.cloudbees.hudson.plugins.folder.computed.PeriodicFolderTrigger")
|
||||
timeTrigger.CreateAttr("plugin", "cloudbees-folder")
|
||||
millis, err := strconv.ParseInt(pipeline.TimerTrigger.Interval, 10, 64)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
timeTrigger.CreateElement("spec").SetText(toCrontab(millis))
|
||||
timeTrigger.CreateElement("interval").SetText(pipeline.TimerTrigger.Interval)
|
||||
|
||||
triggers.CreateElement("disabled").SetText("false")
|
||||
}
|
||||
|
||||
sources := project.CreateElement("sources")
|
||||
sources.CreateAttr("class", "jenkins.branch.MultiBranchProject$BranchSourceList")
|
||||
sources.CreateAttr("plugin", "branch-api")
|
||||
sourcesOwner := sources.CreateElement("owner")
|
||||
sourcesOwner.CreateAttr("class", "org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject")
|
||||
sourcesOwner.CreateAttr("reference", "../..")
|
||||
|
||||
branchSource := sources.CreateElement("data").CreateElement("jenkins.branch.BranchSource")
|
||||
branchSourceStrategy := branchSource.CreateElement("strategy")
|
||||
branchSourceStrategy.CreateAttr("class", "jenkins.branch.NamedExceptionsBranchPropertyStrategy")
|
||||
branchSourceStrategy.CreateElement("defaultProperties").CreateAttr("class", "empty-list")
|
||||
branchSourceStrategy.CreateElement("namedExceptions").CreateAttr("class", "empty-list")
|
||||
source := branchSource.CreateElement("source")
|
||||
|
||||
switch pipeline.SourceType {
|
||||
case devopsv1alpha3.SourceTypeGit:
|
||||
internal.AppendGitSourceToEtree(source, pipeline.GitSource)
|
||||
case devopsv1alpha3.SourceTypeGithub:
|
||||
internal.AppendGithubSourceToEtree(source, pipeline.GitHubSource)
|
||||
case devopsv1alpha3.SourceTypeGitlab:
|
||||
internal.AppendGitlabSourceToEtree(source, pipeline.GitlabSource)
|
||||
case devopsv1alpha3.SourceTypeSVN:
|
||||
internal.AppendSvnSourceToEtree(source, pipeline.SvnSource)
|
||||
case devopsv1alpha3.SourceTypeSingleSVN:
|
||||
internal.AppendSingleSvnSourceToEtree(source, pipeline.SingleSvnSource)
|
||||
case devopsv1alpha3.SourceTypeBitbucket:
|
||||
internal.AppendBitbucketServerSourceToEtree(source, pipeline.BitbucketServerSource)
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unsupport source type: %s", pipeline.SourceType)
|
||||
}
|
||||
|
||||
factory := project.CreateElement("factory")
|
||||
factory.CreateAttr("class", "org.jenkinsci.plugins.workflow.multibranch.WorkflowBranchProjectFactory")
|
||||
factoryOwner := factory.CreateElement("owner")
|
||||
factoryOwner.CreateAttr("class", "org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject")
|
||||
factoryOwner.CreateAttr("reference", "../..")
|
||||
factory.CreateElement("scriptPath").SetText(pipeline.ScriptPath)
|
||||
|
||||
doc.Indent(2)
|
||||
stringXml, err := doc.WriteToString()
|
||||
return replaceXmlVersion(stringXml, "1.0", "1.1"), err
|
||||
}
|
||||
|
||||
func parseMultiBranchPipelineConfigXml(config string) (*devopsv1alpha3.MultiBranchPipeline, error) {
|
||||
pipeline := &devopsv1alpha3.MultiBranchPipeline{}
|
||||
config = replaceXmlVersion(config, "1.1", "1.0")
|
||||
doc := etree.NewDocument()
|
||||
err := doc.ReadFromString(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
project := doc.SelectElement("org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject")
|
||||
if project == nil {
|
||||
return nil, fmt.Errorf("can not parse mutibranch pipeline config")
|
||||
}
|
||||
if properties := project.SelectElement("properties"); properties != nil {
|
||||
if multibranchTrigger := properties.SelectElement(
|
||||
"org.jenkinsci.plugins.workflow.multibranch.PipelineTriggerProperty"); multibranchTrigger != nil {
|
||||
pipeline.MultiBranchJobTrigger = getMultiBranchJobTriggerfromEtree(properties)
|
||||
}
|
||||
}
|
||||
pipeline.Description = project.SelectElement("description").Text()
|
||||
|
||||
if discarder := project.SelectElement("orphanedItemStrategy"); discarder != nil {
|
||||
pipeline.Discarder = &devopsv1alpha3.DiscarderProperty{
|
||||
DaysToKeep: discarder.SelectElement("daysToKeep").Text(),
|
||||
NumToKeep: discarder.SelectElement("numToKeep").Text(),
|
||||
}
|
||||
}
|
||||
if triggers := project.SelectElement("triggers"); triggers != nil {
|
||||
if timerTrigger := triggers.SelectElement(
|
||||
"com.cloudbees.hudson.plugins.folder.computed.PeriodicFolderTrigger"); timerTrigger != nil {
|
||||
pipeline.TimerTrigger = &devopsv1alpha3.TimerTrigger{
|
||||
Interval: timerTrigger.SelectElement("interval").Text(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sources := project.SelectElement("sources"); sources != nil {
|
||||
if sourcesData := sources.SelectElement("data"); sourcesData != nil {
|
||||
if branchSource := sourcesData.SelectElement("jenkins.branch.BranchSource"); branchSource != nil {
|
||||
source := branchSource.SelectElement("source")
|
||||
switch source.SelectAttr("class").Value {
|
||||
case "org.jenkinsci.plugins.github_branch_source.GitHubSCMSource":
|
||||
pipeline.GitHubSource = internal.GetGithubSourcefromEtree(source)
|
||||
pipeline.SourceType = devopsv1alpha3.SourceTypeGithub
|
||||
case "com.cloudbees.jenkins.plugins.bitbucket.BitbucketSCMSource":
|
||||
pipeline.BitbucketServerSource = internal.GetBitbucketServerSourceFromEtree(source)
|
||||
pipeline.SourceType = devopsv1alpha3.SourceTypeBitbucket
|
||||
case "io.jenkins.plugins.gitlabbranchsource.GitLabSCMSource":
|
||||
pipeline.GitlabSource = internal.GetGitlabSourceFromEtree(source)
|
||||
pipeline.SourceType = devopsv1alpha3.SourceTypeGitlab
|
||||
|
||||
case "jenkins.plugins.git.GitSCMSource":
|
||||
pipeline.SourceType = devopsv1alpha3.SourceTypeGit
|
||||
pipeline.GitSource = internal.GetGitSourcefromEtree(source)
|
||||
|
||||
case "jenkins.scm.impl.SingleSCMSource":
|
||||
pipeline.SourceType = devopsv1alpha3.SourceTypeSingleSVN
|
||||
pipeline.SingleSvnSource = internal.GetSingleSvnSourceFromEtree(source)
|
||||
|
||||
case "jenkins.scm.impl.subversion.SubversionSCMSource":
|
||||
pipeline.SourceType = devopsv1alpha3.SourceTypeSVN
|
||||
pipeline.SvnSource = internal.GetSvnSourcefromEtree(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
scriptPathEle := project.SelectElement("factory").SelectElement("scriptPath")
|
||||
if scriptPathEle != nil {
|
||||
// There's no script path if current pipeline using a default Jenkinsfile
|
||||
// see also https://github.com/jenkinsci/pipeline-multibranch-defaults-plugin
|
||||
pipeline.ScriptPath = scriptPathEle.Text()
|
||||
}
|
||||
return pipeline, nil
|
||||
}
|
||||
|
||||
func toCrontab(millis int64) string {
|
||||
if millis*time.Millisecond.Nanoseconds() <= 5*time.Minute.Nanoseconds() {
|
||||
return "* * * * *"
|
||||
}
|
||||
if millis*time.Millisecond.Nanoseconds() <= 30*time.Minute.Nanoseconds() {
|
||||
return "H/5 * * * *"
|
||||
}
|
||||
if millis*time.Millisecond.Nanoseconds() <= 1*time.Hour.Nanoseconds() {
|
||||
return "H/15 * * * *"
|
||||
}
|
||||
if millis*time.Millisecond.Nanoseconds() <= 8*time.Hour.Nanoseconds() {
|
||||
return "H/30 * * * *"
|
||||
}
|
||||
if millis*time.Millisecond.Nanoseconds() <= 24*time.Hour.Nanoseconds() {
|
||||
return "H H/4 * * *"
|
||||
}
|
||||
if millis*time.Millisecond.Nanoseconds() <= 48*time.Hour.Nanoseconds() {
|
||||
return "H H/12 * * *"
|
||||
}
|
||||
return "H H * * *"
|
||||
|
||||
}
|
||||
@@ -1,739 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
)
|
||||
|
||||
func Test_NoScmPipelineConfig(t *testing.T) {
|
||||
inputs := []*devopsv1alpha3.NoScmPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
DisableConcurrent: true,
|
||||
},
|
||||
}
|
||||
for _, input := range inputs {
|
||||
outputString, err := createPipelineConfigXml(input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parsePipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NoScmPipelineConfig_Discarder(t *testing.T) {
|
||||
inputs := []*devopsv1alpha3.NoScmPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
Discarder: &devopsv1alpha3.DiscarderProperty{
|
||||
DaysToKeep: "3", NumToKeep: "5",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
Discarder: &devopsv1alpha3.DiscarderProperty{
|
||||
DaysToKeep: "3", NumToKeep: "",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
Discarder: &devopsv1alpha3.DiscarderProperty{
|
||||
DaysToKeep: "", NumToKeep: "21321",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
Discarder: &devopsv1alpha3.DiscarderProperty{
|
||||
DaysToKeep: "", NumToKeep: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, input := range inputs {
|
||||
outputString, err := createPipelineConfigXml(input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parsePipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NoScmPipelineConfig_Param(t *testing.T) {
|
||||
inputs := []*devopsv1alpha3.NoScmPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
Parameters: []devopsv1alpha3.Parameter{
|
||||
{
|
||||
Name: "d",
|
||||
DefaultValue: "a\nb",
|
||||
Type: "choice",
|
||||
Description: "fortest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
Parameters: []devopsv1alpha3.Parameter{
|
||||
{
|
||||
Name: "a",
|
||||
DefaultValue: "abc",
|
||||
Type: "string",
|
||||
Description: "fortest",
|
||||
},
|
||||
{
|
||||
Name: "b",
|
||||
DefaultValue: "false",
|
||||
Type: "boolean",
|
||||
Description: "fortest",
|
||||
},
|
||||
{
|
||||
Name: "c",
|
||||
DefaultValue: "password \n aaa",
|
||||
Type: "text",
|
||||
Description: "fortest",
|
||||
},
|
||||
{
|
||||
Name: "d",
|
||||
DefaultValue: "a\nb",
|
||||
Type: "choice",
|
||||
Description: "fortest",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, input := range inputs {
|
||||
outputString, err := createPipelineConfigXml(input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parsePipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NoScmPipelineConfig_Trigger(t *testing.T) {
|
||||
inputs := []*devopsv1alpha3.NoScmPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Cron: "1 1 1 * * *",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
RemoteTrigger: &devopsv1alpha3.RemoteTrigger{
|
||||
Token: "abc",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
Jenkinsfile: "node{echo 'hello'}",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Cron: "1 1 1 * * *",
|
||||
},
|
||||
RemoteTrigger: &devopsv1alpha3.RemoteTrigger{
|
||||
Token: "abc",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, input := range inputs {
|
||||
outputString, err := createPipelineConfigXml(input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parsePipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MultiBranchPipelineConfig(t *testing.T) {
|
||||
|
||||
inputs := []*devopsv1alpha3.MultiBranchPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "git",
|
||||
GitSource: &devopsv1alpha3.GitSource{},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "github",
|
||||
GitHubSource: &devopsv1alpha3.GithubSource{},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "single_svn",
|
||||
SingleSvnSource: &devopsv1alpha3.SingleSvnSource{},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "svn",
|
||||
SvnSource: &devopsv1alpha3.SvnSource{},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "gitlab",
|
||||
GitlabSource: &devopsv1alpha3.GitlabSource{},
|
||||
},
|
||||
}
|
||||
for _, input := range inputs {
|
||||
outputString, err := createMultiBranchPipelineConfigXml("", input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parseMultiBranchPipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MultiBranchPipelineConfig_Discarder(t *testing.T) {
|
||||
|
||||
inputs := []*devopsv1alpha3.MultiBranchPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "git",
|
||||
Discarder: &devopsv1alpha3.DiscarderProperty{
|
||||
DaysToKeep: "1",
|
||||
NumToKeep: "2",
|
||||
},
|
||||
GitSource: &devopsv1alpha3.GitSource{},
|
||||
},
|
||||
}
|
||||
for _, input := range inputs {
|
||||
outputString, err := createMultiBranchPipelineConfigXml("", input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parseMultiBranchPipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MultiBranchPipelineConfig_TimerTrigger(t *testing.T) {
|
||||
inputs := []*devopsv1alpha3.MultiBranchPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "git",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Interval: "12345566",
|
||||
},
|
||||
GitSource: &devopsv1alpha3.GitSource{},
|
||||
},
|
||||
}
|
||||
for _, input := range inputs {
|
||||
outputString, err := createMultiBranchPipelineConfigXml("", input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parseMultiBranchPipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MultiBranchPipelineConfig_Source(t *testing.T) {
|
||||
|
||||
inputs := []*devopsv1alpha3.MultiBranchPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "git",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Interval: "12345566",
|
||||
},
|
||||
GitSource: &devopsv1alpha3.GitSource{
|
||||
Url: "https://github.com/kubesphere/devops",
|
||||
CredentialId: "git",
|
||||
DiscoverBranches: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "github",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Interval: "12345566",
|
||||
},
|
||||
GitHubSource: &devopsv1alpha3.GithubSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "github",
|
||||
ApiUri: "https://api.github.com",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "gitlab",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Interval: "12345566",
|
||||
},
|
||||
GitlabSource: &devopsv1alpha3.GitlabSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "gitlab",
|
||||
ServerName: "default-gitlab",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverTags: true,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
CloneOption: &devopsv1alpha3.GitCloneOption{
|
||||
Timeout: 10,
|
||||
Depth: 10,
|
||||
},
|
||||
RegexFilter: "*-dev",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "gitlab",
|
||||
GitlabSource: &devopsv1alpha3.GitlabSource{
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 2,
|
||||
},
|
||||
//CloneOption: &devopsv1alpha3.GitCloneOption{
|
||||
// Depth: -1,
|
||||
// Timeout: -1,
|
||||
//},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "gitlab",
|
||||
GitlabSource: &devopsv1alpha3.GitlabSource{
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 3,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "gitlab",
|
||||
GitlabSource: &devopsv1alpha3.GitlabSource{
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "bitbucket_server",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Interval: "12345566",
|
||||
},
|
||||
BitbucketServerSource: &devopsv1alpha3.BitbucketServerSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "github",
|
||||
ApiUri: "https://api.github.com",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "svn",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Interval: "12345566",
|
||||
},
|
||||
SvnSource: &devopsv1alpha3.SvnSource{
|
||||
Remote: "https://api.svn.com/bcd",
|
||||
CredentialId: "svn",
|
||||
Excludes: "truck",
|
||||
Includes: "tag/*",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "single_svn",
|
||||
TimerTrigger: &devopsv1alpha3.TimerTrigger{
|
||||
Interval: "12345566",
|
||||
},
|
||||
SingleSvnSource: &devopsv1alpha3.SingleSvnSource{
|
||||
Remote: "https://api.svn.com/bcd",
|
||||
CredentialId: "svn",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, input := range inputs {
|
||||
outputString, err := createMultiBranchPipelineConfigXml("", input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parseMultiBranchPipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("\ninput [%+v] \noutput [%+v] \nshould equal ", input.GitlabSource.CloneOption, output.GitlabSource.CloneOption)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MultiBranchPipelineCloneConfig(t *testing.T) {
|
||||
|
||||
inputs := []*devopsv1alpha3.MultiBranchPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "git",
|
||||
GitSource: &devopsv1alpha3.GitSource{
|
||||
Url: "https://github.com/kubesphere/devops",
|
||||
CredentialId: "git",
|
||||
DiscoverBranches: true,
|
||||
CloneOption: &devopsv1alpha3.GitCloneOption{
|
||||
Shallow: false,
|
||||
Depth: 3,
|
||||
Timeout: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "github",
|
||||
GitHubSource: &devopsv1alpha3.GithubSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "github",
|
||||
ApiUri: "https://api.github.com",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
CloneOption: &devopsv1alpha3.GitCloneOption{
|
||||
Shallow: false,
|
||||
Depth: 3,
|
||||
Timeout: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "gitlab",
|
||||
GitlabSource: &devopsv1alpha3.GitlabSource{
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
CloneOption: &devopsv1alpha3.GitCloneOption{
|
||||
Depth: -1,
|
||||
Timeout: -1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, input := range inputs {
|
||||
outputString, err := createMultiBranchPipelineConfigXml("", input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parseMultiBranchPipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
|
||||
// we'll give it a default value if it's negative
|
||||
if input.GitlabSource != nil && input.GitlabSource.CloneOption != nil {
|
||||
if input.GitlabSource.CloneOption.Timeout < 0 {
|
||||
input.GitlabSource.CloneOption.Timeout = 10
|
||||
}
|
||||
if input.GitlabSource.CloneOption.Depth < 0 {
|
||||
input.GitlabSource.CloneOption.Depth = 1
|
||||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input.GitlabSource, output.GitlabSource)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_MultiBranchPipelineRegexFilter(t *testing.T) {
|
||||
|
||||
inputs := []*devopsv1alpha3.MultiBranchPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "git",
|
||||
GitSource: &devopsv1alpha3.GitSource{
|
||||
Url: "https://github.com/kubesphere/devops",
|
||||
CredentialId: "git",
|
||||
DiscoverBranches: true,
|
||||
RegexFilter: ".*",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "github",
|
||||
GitHubSource: &devopsv1alpha3.GithubSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "github",
|
||||
ApiUri: "https://api.github.com",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
RegexFilter: ".*",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, input := range inputs {
|
||||
outputString, err := createMultiBranchPipelineConfigXml("", input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parseMultiBranchPipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_MultiBranchPipelineMultibranchTrigger(t *testing.T) {
|
||||
|
||||
inputs := []*devopsv1alpha3.MultiBranchPipeline{
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "github",
|
||||
GitHubSource: &devopsv1alpha3.GithubSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "github",
|
||||
ApiUri: "https://api.github.com",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
RegexFilter: ".*",
|
||||
},
|
||||
MultiBranchJobTrigger: &devopsv1alpha3.MultiBranchJobTrigger{
|
||||
CreateActionJobsToTrigger: "abc",
|
||||
DeleteActionJobsToTrigger: "ddd",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "github",
|
||||
GitHubSource: &devopsv1alpha3.GithubSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "github",
|
||||
ApiUri: "https://api.github.com",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
RegexFilter: ".*",
|
||||
},
|
||||
MultiBranchJobTrigger: &devopsv1alpha3.MultiBranchJobTrigger{
|
||||
CreateActionJobsToTrigger: "abc",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "",
|
||||
Description: "for test",
|
||||
ScriptPath: "Jenkinsfile",
|
||||
SourceType: "github",
|
||||
GitHubSource: &devopsv1alpha3.GithubSource{
|
||||
Owner: "kubesphere",
|
||||
Repo: "devops",
|
||||
CredentialId: "github",
|
||||
ApiUri: "https://api.github.com",
|
||||
DiscoverBranches: 1,
|
||||
DiscoverPRFromOrigin: 2,
|
||||
DiscoverPRFromForks: &devopsv1alpha3.DiscoverPRFromForks{
|
||||
Strategy: 1,
|
||||
Trust: 1,
|
||||
},
|
||||
RegexFilter: ".*",
|
||||
},
|
||||
MultiBranchJobTrigger: &devopsv1alpha3.MultiBranchJobTrigger{
|
||||
DeleteActionJobsToTrigger: "ddd",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, input := range inputs {
|
||||
outputString, err := createMultiBranchPipelineConfigXml("", input)
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
output, err := parseMultiBranchPipelineConfigXml(outputString)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("should not get error %+v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(input, output) {
|
||||
t.Fatalf("input [%+v] output [%+v] should equal ", input, output)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,168 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The KubeSphere Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ValidateJenkinsfileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Result string `json:"result"`
|
||||
Errors []map[string]interface{} `json:"errors"`
|
||||
} `json:"data"`
|
||||
}
|
||||
type ValidatePipelineJsonResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Result string `json:"result"`
|
||||
Errors []map[string]interface{} `json:"errors"`
|
||||
}
|
||||
}
|
||||
|
||||
type PipelineJsonToJenkinsfileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Result string `json:"result"`
|
||||
Errors []map[string]interface{} `json:"errors"`
|
||||
Jenkinsfile string `json:"jenkinsfile"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type JenkinsfileToPipelineJsonResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Result string `json:"result"`
|
||||
Errors []map[string]interface{} `json:"errors"`
|
||||
Json map[string]interface{} `json:"json"`
|
||||
} `json:"data"`
|
||||
}
|
||||
type StepJsonToJenkinsfileResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Result string `json:"result"`
|
||||
Errors []map[string]interface{} `json:"errors"`
|
||||
Jenkinsfile string `json:"jenkinsfile"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type StepsJenkinsfileToJsonResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
Result string `json:"result"`
|
||||
Errors []map[string]interface{} `json:"errors"`
|
||||
Json []map[string]interface{} `json:"json"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
func (j *Jenkins) ValidateJenkinsfile(jenkinsfile string) (*ValidateJenkinsfileResponse, error) {
|
||||
responseStrut := &ValidateJenkinsfileResponse{}
|
||||
query := map[string]string{
|
||||
"jenkinsfile": jenkinsfile,
|
||||
}
|
||||
response, err := j.Requester.PostForm("/pipeline-model-converter/validateJenkinsfile", nil, responseStrut, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return responseStrut, nil
|
||||
|
||||
}
|
||||
|
||||
func (j *Jenkins) ValidatePipelineJson(json string) (*ValidatePipelineJsonResponse, error) {
|
||||
|
||||
responseStruct := &ValidatePipelineJsonResponse{}
|
||||
query := map[string]string{
|
||||
"json": json,
|
||||
}
|
||||
response, err := j.Requester.PostForm("/pipeline-model-converter/validateJson", nil, responseStruct, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return responseStruct, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) PipelineJsonToJenkinsfile(json string) (*PipelineJsonToJenkinsfileResponse, error) {
|
||||
responseStrut := &PipelineJsonToJenkinsfileResponse{}
|
||||
query := map[string]string{
|
||||
"json": json,
|
||||
}
|
||||
response, err := j.Requester.PostForm("/pipeline-model-converter/toJenkinsfile", nil, responseStrut, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return responseStrut, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) JenkinsfileToPipelineJson(jenkinsfile string) (*JenkinsfileToPipelineJsonResponse, error) {
|
||||
responseStrut := &JenkinsfileToPipelineJsonResponse{}
|
||||
query := map[string]string{
|
||||
"jenkinsfile": jenkinsfile,
|
||||
}
|
||||
response, err := j.Requester.PostForm("/pipeline-model-converter/toJson", nil, responseStrut, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return responseStrut, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) StepsJsonToJenkinsfile(json string) (*StepJsonToJenkinsfileResponse, error) {
|
||||
responseStrut := &StepJsonToJenkinsfileResponse{}
|
||||
query := map[string]string{
|
||||
"json": json,
|
||||
}
|
||||
response, err := j.Requester.PostForm("/pipeline-model-converter/stepsToJenkinsfile", nil, responseStrut, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return responseStrut, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) StepsJenkinsfileToJson(jenkinsfile string) (*StepsJenkinsfileToJsonResponse, error) {
|
||||
responseStrut := &StepsJenkinsfileToJsonResponse{}
|
||||
query := map[string]string{
|
||||
"jenkinsfile": jenkinsfile,
|
||||
}
|
||||
response, err := j.Requester.PostForm("/pipeline-model-converter/stepsToJson", nil, responseStrut, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return responseStrut, nil
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
func TestResetPaging(t *testing.T) {
|
||||
table := []struct {
|
||||
path string
|
||||
rawQuery string
|
||||
start int
|
||||
limit int
|
||||
hasErr bool
|
||||
message string
|
||||
}{{
|
||||
start: 0,
|
||||
limit: 10,
|
||||
hasErr: false,
|
||||
message: "without query, should no errors",
|
||||
}, {
|
||||
path: "/fake/path",
|
||||
rawQuery: "?start=1&limit1",
|
||||
start: 0,
|
||||
limit: 10,
|
||||
hasErr: false,
|
||||
message: "without a query",
|
||||
}, {
|
||||
path: "/fake/path",
|
||||
rawQuery: "?start=1&limit1",
|
||||
start: 3,
|
||||
limit: 13,
|
||||
hasErr: false,
|
||||
message: "without a query",
|
||||
}}
|
||||
|
||||
for index, item := range table {
|
||||
pip := &Pipeline{
|
||||
Path: item.path,
|
||||
HttpParameters: &devops.HttpParameters{
|
||||
Url: &url.URL{
|
||||
Path: item.path,
|
||||
RawQuery: item.rawQuery,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resultPath, err := pip.resetPaging(item.start, item.limit)
|
||||
if item.hasErr {
|
||||
assert.NotNil(t, err, printTestMessage(index, item.message))
|
||||
} else {
|
||||
assert.Nil(t, err, printTestMessage(index, item.message))
|
||||
|
||||
assert.Equal(t, item.path, resultPath.Path, printTestMessage(index, item.message))
|
||||
assert.Equal(t, strconv.Itoa(item.start), pip.HttpParameters.Url.Query().Get("start"),
|
||||
printTestMessage(index, item.message))
|
||||
assert.Equal(t, strconv.Itoa(item.limit), pip.HttpParameters.Url.Query().Get("limit"),
|
||||
printTestMessage(index, item.message))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePaging(t *testing.T) {
|
||||
table := []struct {
|
||||
targetUrl string
|
||||
start int
|
||||
limit int
|
||||
message string
|
||||
}{{
|
||||
targetUrl: "http://localhost?start=0&limit=0",
|
||||
start: 0,
|
||||
limit: 0,
|
||||
message: "should be success",
|
||||
}, {
|
||||
targetUrl: "http://localhost?start=1&limit=10",
|
||||
start: 1,
|
||||
limit: 10,
|
||||
message: "should be success",
|
||||
}, {
|
||||
targetUrl: "http://localhost?start=5&limit=55",
|
||||
start: 5,
|
||||
limit: 55,
|
||||
message: "should be success",
|
||||
}}
|
||||
|
||||
for index, item := range table {
|
||||
pipUrl, _ := url.Parse(item.targetUrl)
|
||||
pip := &Pipeline{
|
||||
HttpParameters: &devops.HttpParameters{
|
||||
Url: pipUrl,
|
||||
},
|
||||
}
|
||||
resultStart, resultLimit := pip.parsePaging()
|
||||
|
||||
assert.Equal(t, item.start, resultStart, printTestMessage(index, item.message))
|
||||
assert.Equal(t, item.limit, resultLimit, printTestMessage(index, item.message))
|
||||
}
|
||||
}
|
||||
|
||||
func printTestMessage(index int, message string) string {
|
||||
return fmt.Sprintf("index: %d, message: %s", index, message)
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
type DevOpsProjectRoleResponse struct {
|
||||
ProjectRole *ProjectRole
|
||||
Err error
|
||||
}
|
||||
|
||||
func (j *Jenkins) CreateDevOpsProject(projectId string) (string, error) {
|
||||
_, err := j.CreateFolder(projectId, "")
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
return projectId, nil
|
||||
}
|
||||
|
||||
func (j *Jenkins) DeleteDevOpsProject(projectId string) (err error) {
|
||||
_, err = j.DeleteJob(projectId)
|
||||
if err != nil {
|
||||
return restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetDevOpsProject(projectId string) (string, error) {
|
||||
job, err := j.GetJob(projectId)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
|
||||
}
|
||||
return job.GetName(), nil
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/emicklei/go-restful/v3"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
func (j *Jenkins) CreateProjectPipeline(projectId string, pipeline *devopsv1alpha3.Pipeline) (string, error) {
|
||||
switch pipeline.Spec.Type {
|
||||
case devopsv1alpha3.NoScmPipelineType:
|
||||
|
||||
config, err := createPipelineConfigXml(pipeline.Spec.Pipeline)
|
||||
if err != nil {
|
||||
return "", restful.NewError(http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
job, err := j.GetJob(pipeline.Name, projectId)
|
||||
if job != nil {
|
||||
err := fmt.Errorf("job name [%s] has been used", job.GetName())
|
||||
return "", restful.NewError(http.StatusConflict, err.Error())
|
||||
}
|
||||
|
||||
if err != nil && devops.GetDevOpsStatusCode(err) != http.StatusNotFound {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
_, err = j.CreateJobInFolder(config, pipeline.Name, projectId)
|
||||
if err != nil {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
return pipeline.Name, nil
|
||||
case devopsv1alpha3.MultiBranchPipelineType:
|
||||
config, err := createMultiBranchPipelineConfigXml(projectId, pipeline.Spec.MultiBranchPipeline)
|
||||
if err != nil {
|
||||
return "", restful.NewError(http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
job, err := j.GetJob(pipeline.Name, projectId)
|
||||
if job != nil {
|
||||
err := fmt.Errorf("job name [%s] has been used", job.GetName())
|
||||
return "", restful.NewError(http.StatusConflict, err.Error())
|
||||
}
|
||||
|
||||
if err != nil && devops.GetDevOpsStatusCode(err) != http.StatusNotFound {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
_, err = j.CreateJobInFolder(config, pipeline.Name, projectId)
|
||||
if err != nil {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
return pipeline.Name, nil
|
||||
|
||||
default:
|
||||
err := fmt.Errorf("error unsupport job type")
|
||||
klog.Errorf("%+v", err)
|
||||
return "", restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Jenkins) DeleteProjectPipeline(projectId string, pipelineId string) (string, error) {
|
||||
_, err := j.DeleteJob(pipelineId, projectId)
|
||||
if err != nil {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
return pipelineId, nil
|
||||
|
||||
}
|
||||
func (j *Jenkins) UpdateProjectPipeline(projectId string, pipeline *devopsv1alpha3.Pipeline) (string, error) {
|
||||
switch pipeline.Spec.Type {
|
||||
case devopsv1alpha3.NoScmPipelineType:
|
||||
|
||||
config, err := createPipelineConfigXml(pipeline.Spec.Pipeline)
|
||||
if err != nil {
|
||||
return "", restful.NewError(http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
job, err := j.GetJob(pipeline.Name, projectId)
|
||||
|
||||
if err != nil {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
err = job.UpdateConfig(config)
|
||||
if err != nil {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
return pipeline.Name, nil
|
||||
case devopsv1alpha3.MultiBranchPipelineType:
|
||||
|
||||
config, err := createMultiBranchPipelineConfigXml(projectId, pipeline.Spec.MultiBranchPipeline)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
|
||||
return "", restful.NewError(http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
job, err := j.GetJob(pipeline.Spec.MultiBranchPipeline.Name, projectId)
|
||||
|
||||
if err != nil {
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
err = job.UpdateConfig(config)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return "", restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
|
||||
return pipeline.Name, nil
|
||||
|
||||
default:
|
||||
err := fmt.Errorf("error unsupport job type")
|
||||
klog.Errorf("%+v", err)
|
||||
return "", restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Jenkins) GetProjectPipelineConfig(projectId, pipelineId string) (*devopsv1alpha3.Pipeline, error) {
|
||||
job, err := j.GetJob(pipelineId, projectId)
|
||||
if err != nil {
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
switch job.Raw.Class {
|
||||
case "org.jenkinsci.plugins.workflow.job.WorkflowJob":
|
||||
config, err := job.GetConfig()
|
||||
if err != nil {
|
||||
return nil, restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
pipeline, err := parsePipelineConfigXml(config)
|
||||
if err != nil {
|
||||
return nil, restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
pipeline.Name = pipelineId
|
||||
return &devopsv1alpha3.Pipeline{
|
||||
Spec: devopsv1alpha3.PipelineSpec{
|
||||
Type: devopsv1alpha3.NoScmPipelineType,
|
||||
Pipeline: pipeline,
|
||||
},
|
||||
}, nil
|
||||
|
||||
case "org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject":
|
||||
config, err := job.GetConfig()
|
||||
if err != nil {
|
||||
return nil, restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
pipeline, err := parseMultiBranchPipelineConfigXml(config)
|
||||
if err != nil {
|
||||
return nil, restful.NewError(devops.GetDevOpsStatusCode(err), err.Error())
|
||||
}
|
||||
pipeline.Name = pipelineId
|
||||
return &devopsv1alpha3.Pipeline{
|
||||
Spec: devopsv1alpha3.PipelineSpec{
|
||||
Type: devopsv1alpha3.MultiBranchPipelineType,
|
||||
MultiBranchPipeline: pipeline,
|
||||
},
|
||||
}, nil
|
||||
default:
|
||||
klog.Errorf("%+v", err)
|
||||
return nil, restful.NewError(http.StatusBadRequest, err.Error())
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
// TODO: deprecated, use SendJenkinsRequestWithHeaderResp() instead
|
||||
func (j *Jenkins) SendPureRequest(path string, httpParameters *devops.HttpParameters) ([]byte, error) {
|
||||
resBody, _, err := j.SendPureRequestWithHeaderResp(path, httpParameters)
|
||||
|
||||
return resBody, err
|
||||
}
|
||||
|
||||
// provider request header to call jenkins api.
|
||||
// transfer bearer token to basic token for inner Oauth and Jeknins
|
||||
func (j *Jenkins) SendPureRequestWithHeaderResp(path string, httpParameters *devops.HttpParameters) ([]byte, http.Header, error) {
|
||||
apiURL, err := url.Parse(j.Server + path)
|
||||
if err != nil {
|
||||
klog.V(8).Info(err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
apiURL.RawQuery = httpParameters.Url.RawQuery
|
||||
client := &http.Client{Timeout: 30 * time.Second}
|
||||
|
||||
header := httpParameters.Header
|
||||
SetBasicBearTokenHeader(&header)
|
||||
|
||||
newRequest := &http.Request{
|
||||
Method: httpParameters.Method,
|
||||
URL: apiURL,
|
||||
Header: header,
|
||||
Body: httpParameters.Body,
|
||||
Form: httpParameters.Form,
|
||||
PostForm: httpParameters.PostForm,
|
||||
}
|
||||
|
||||
resp, err := client.Do(newRequest)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resBody, _ := getRespBody(resp)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= http.StatusBadRequest {
|
||||
klog.Errorf("%+v", string(resBody))
|
||||
jkerr := new(JkError)
|
||||
jkerr.Code = resp.StatusCode
|
||||
jkerr.Message = string(resBody)
|
||||
return nil, nil, jkerr
|
||||
}
|
||||
|
||||
return resBody, resp.Header, nil
|
||||
}
|
||||
@@ -1,488 +0,0 @@
|
||||
// Copyright 2015 Vadim Kravcenko
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
|
||||
authtoken "kubesphere.io/kubesphere/pkg/apiserver/authentication/token"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
// Request Methods
|
||||
|
||||
type APIRequest struct {
|
||||
Method string
|
||||
Endpoint string
|
||||
Payload io.Reader
|
||||
Headers http.Header
|
||||
Suffix string
|
||||
}
|
||||
|
||||
// set basic token for jenkins auth
|
||||
func SetBasicBearTokenHeader(header *http.Header) error {
|
||||
bearTokenArray := strings.Split(header.Get("Authorization"), " ")
|
||||
bearFlag := bearTokenArray[0]
|
||||
var err error
|
||||
if strings.ToLower(bearFlag) == "bearer" {
|
||||
bearToken := bearTokenArray[1]
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
claim := authtoken.Claims{}
|
||||
parser := jwt.Parser{}
|
||||
_, _, err = parser.ParseUnverified(bearToken, &claim)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
creds := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", claim.Username, bearToken)))
|
||||
header.Set("Authorization", fmt.Sprintf("Basic %s", creds))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ar *APIRequest) SetHeader(key string, value string) *APIRequest {
|
||||
ar.Headers.Set(key, value)
|
||||
return ar
|
||||
}
|
||||
|
||||
func NewAPIRequest(method string, endpoint string, payload io.Reader) *APIRequest {
|
||||
var headers = http.Header{}
|
||||
var suffix string
|
||||
ar := &APIRequest{method, endpoint, payload, headers, suffix}
|
||||
return ar
|
||||
}
|
||||
|
||||
type Requester struct {
|
||||
Base string
|
||||
BasicAuth *BasicAuth
|
||||
Client *http.Client
|
||||
CACert []byte
|
||||
SslVerify bool
|
||||
connControl chan struct{}
|
||||
}
|
||||
|
||||
func (r *Requester) SetCrumb(ar *APIRequest) error {
|
||||
crumbData := map[string]string{}
|
||||
response, err := r.GetJSON("/crumbIssuer/api/json", &crumbData, nil)
|
||||
if err != nil {
|
||||
jenkinsError, ok := err.(*devops.ErrorResponse)
|
||||
if ok && jenkinsError.Response.StatusCode == http.StatusNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode == 200 && crumbData["crumbRequestField"] != "" {
|
||||
ar.SetHeader(crumbData["crumbRequestField"], crumbData["crumb"])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Requester) PostJSON(endpoint string, payload io.Reader, responseStruct interface{}, querystring map[string]string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("POST", endpoint, payload)
|
||||
if err := r.SetCrumb(ar); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.SetHeader("Content-Type", "application/x-www-form-urlencoded")
|
||||
ar.Suffix = "api/json"
|
||||
return r.Do(ar, responseStruct, querystring)
|
||||
}
|
||||
|
||||
func (r *Requester) Post(endpoint string, payload io.Reader, responseStruct interface{}, querystring map[string]string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("POST", endpoint, payload)
|
||||
if err := r.SetCrumb(ar); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.SetHeader("Content-Type", "application/x-www-form-urlencoded")
|
||||
ar.Suffix = ""
|
||||
return r.Do(ar, responseStruct, querystring)
|
||||
}
|
||||
func (r *Requester) PostForm(endpoint string, payload io.Reader, responseStruct interface{}, formString map[string]string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("POST", endpoint, payload)
|
||||
if err := r.SetCrumb(ar); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.SetHeader("Content-Type", "application/x-www-form-urlencoded")
|
||||
ar.Suffix = ""
|
||||
return r.DoPostForm(ar, responseStruct, formString)
|
||||
}
|
||||
|
||||
func (r *Requester) PostFiles(endpoint string, payload io.Reader, responseStruct interface{}, querystring map[string]string, files []string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("POST", endpoint, payload)
|
||||
if err := r.SetCrumb(ar); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Do(ar, responseStruct, querystring, files)
|
||||
}
|
||||
|
||||
func (r *Requester) PostXML(endpoint string, xml string, responseStruct interface{}, querystring map[string]string) (*http.Response, error) {
|
||||
payload := bytes.NewBuffer([]byte(xml))
|
||||
ar := NewAPIRequest("POST", endpoint, payload)
|
||||
if err := r.SetCrumb(ar); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ar.SetHeader("Content-Type", "application/xml;charset=utf-8")
|
||||
ar.Suffix = ""
|
||||
return r.Do(ar, responseStruct, querystring)
|
||||
}
|
||||
|
||||
func (r *Requester) GetJSON(endpoint string, responseStruct interface{}, query map[string]string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("GET", endpoint, nil)
|
||||
ar.SetHeader("Content-Type", "application/json")
|
||||
ar.Suffix = "api/json"
|
||||
return r.Do(ar, responseStruct, query)
|
||||
}
|
||||
|
||||
func (r *Requester) GetXML(endpoint string, responseStruct interface{}, query map[string]string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("GET", endpoint, nil)
|
||||
ar.SetHeader("Content-Type", "application/xml")
|
||||
ar.Suffix = ""
|
||||
return r.Do(ar, responseStruct, query)
|
||||
}
|
||||
|
||||
func (r *Requester) Get(endpoint string, responseStruct interface{}, querystring map[string]string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("GET", endpoint, nil)
|
||||
ar.Suffix = ""
|
||||
return r.Do(ar, responseStruct, querystring)
|
||||
}
|
||||
|
||||
func (r *Requester) GetHtml(endpoint string, responseStruct interface{}, querystring map[string]string) (*http.Response, error) {
|
||||
ar := NewAPIRequest("GET", endpoint, nil)
|
||||
ar.Suffix = ""
|
||||
return r.DoGet(ar, responseStruct, querystring)
|
||||
}
|
||||
|
||||
func (r *Requester) SetClient(client *http.Client) *Requester {
|
||||
r.Client = client
|
||||
return r
|
||||
}
|
||||
|
||||
// Add auth on redirect if required.
|
||||
//
|
||||
//nolint:unused
|
||||
func (r *Requester) redirectPolicyFunc(req *http.Request, via []*http.Request) error {
|
||||
if r.BasicAuth != nil {
|
||||
req.SetBasicAuth(r.BasicAuth.Username, r.BasicAuth.Password)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Requester) DoGet(ar *APIRequest, responseStruct interface{}, options ...interface{}) (*http.Response, error) {
|
||||
fileUpload := false
|
||||
var files []string
|
||||
URL, err := url.Parse(r.Base + ar.Endpoint + ar.Suffix)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, o := range options {
|
||||
switch v := o.(type) {
|
||||
case map[string]string:
|
||||
|
||||
querystring := make(url.Values)
|
||||
for key, val := range v {
|
||||
querystring.Set(key, val)
|
||||
}
|
||||
|
||||
URL.RawQuery = querystring.Encode()
|
||||
case []string:
|
||||
fileUpload = true
|
||||
files = v
|
||||
}
|
||||
}
|
||||
var req *http.Request
|
||||
if fileUpload {
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
for _, file := range files {
|
||||
fileData, err := os.Open(file)
|
||||
if err != nil {
|
||||
Error.Println(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
part, err := writer.CreateFormFile("file", filepath.Base(file))
|
||||
if err != nil {
|
||||
Error.Println(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
if _, err = io.Copy(part, fileData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fileData.Close()
|
||||
}
|
||||
var params map[string]string
|
||||
json.NewDecoder(ar.Payload).Decode(¶ms)
|
||||
for key, val := range params {
|
||||
if err = writer.WriteField(key, val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err = http.NewRequest(ar.Method, URL.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
} else {
|
||||
req, err = http.NewRequest(ar.Method, URL.String(), ar.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if r.BasicAuth != nil {
|
||||
req.SetBasicAuth(r.BasicAuth.Username, r.BasicAuth.Password)
|
||||
}
|
||||
req.Close = true
|
||||
req.Header.Add("Accept", "*/*")
|
||||
for k := range ar.Headers {
|
||||
req.Header.Add(k, ar.Headers.Get(k))
|
||||
}
|
||||
r.connControl <- struct{}{}
|
||||
if response, err := r.Client.Do(req); err != nil {
|
||||
<-r.connControl
|
||||
return nil, err
|
||||
} else {
|
||||
<-r.connControl
|
||||
errorText := response.Header.Get("X-Error")
|
||||
if errorText != "" {
|
||||
return nil, errors.New(errorText)
|
||||
}
|
||||
err := CheckResponse(response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch responseStruct.(type) {
|
||||
case *string:
|
||||
return r.ReadRawResponse(response, responseStruct)
|
||||
default:
|
||||
return r.ReadJSONResponse(response, responseStruct)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *Requester) Do(ar *APIRequest, responseStruct interface{}, options ...interface{}) (*http.Response, error) {
|
||||
if !strings.HasSuffix(ar.Endpoint, "/") && ar.Method != "POST" {
|
||||
ar.Endpoint += "/"
|
||||
}
|
||||
|
||||
fileUpload := false
|
||||
var files []string
|
||||
URL, err := url.Parse(r.Base + ar.Endpoint + ar.Suffix)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, o := range options {
|
||||
switch v := o.(type) {
|
||||
case map[string]string:
|
||||
|
||||
querystring := make(url.Values)
|
||||
for key, val := range v {
|
||||
querystring.Set(key, val)
|
||||
}
|
||||
|
||||
URL.RawQuery = querystring.Encode()
|
||||
case []string:
|
||||
fileUpload = true
|
||||
files = v
|
||||
}
|
||||
}
|
||||
var req *http.Request
|
||||
if fileUpload {
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
for _, file := range files {
|
||||
fileData, err := os.Open(file)
|
||||
if err != nil {
|
||||
Error.Println(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
part, err := writer.CreateFormFile("file", filepath.Base(file))
|
||||
if err != nil {
|
||||
Error.Println(err.Error())
|
||||
return nil, err
|
||||
}
|
||||
if _, err = io.Copy(part, fileData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fileData.Close()
|
||||
}
|
||||
var params map[string]string
|
||||
json.NewDecoder(ar.Payload).Decode(¶ms)
|
||||
for key, val := range params {
|
||||
if err = writer.WriteField(key, val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = writer.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err = http.NewRequest(ar.Method, URL.String(), body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
} else {
|
||||
req, err = http.NewRequest(ar.Method, URL.String(), ar.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if r.BasicAuth != nil {
|
||||
req.SetBasicAuth(r.BasicAuth.Username, r.BasicAuth.Password)
|
||||
}
|
||||
req.Close = true
|
||||
req.Header.Add("Accept", "*/*")
|
||||
for k := range ar.Headers {
|
||||
req.Header.Add(k, ar.Headers.Get(k))
|
||||
}
|
||||
r.connControl <- struct{}{}
|
||||
if response, err := r.Client.Do(req); err != nil {
|
||||
<-r.connControl
|
||||
return nil, err
|
||||
} else {
|
||||
<-r.connControl
|
||||
errorText := response.Header.Get("X-Error")
|
||||
if errorText != "" {
|
||||
return nil, errors.New(errorText)
|
||||
}
|
||||
err := CheckResponse(response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch responseStruct.(type) {
|
||||
case *string:
|
||||
return r.ReadRawResponse(response, responseStruct)
|
||||
default:
|
||||
return r.ReadJSONResponse(response, responseStruct)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *Requester) DoPostForm(ar *APIRequest, responseStruct interface{}, form map[string]string) (*http.Response, error) {
|
||||
|
||||
if !strings.HasSuffix(ar.Endpoint, "/") && ar.Method != "POST" {
|
||||
ar.Endpoint += "/"
|
||||
}
|
||||
URL, err := url.Parse(r.Base + ar.Endpoint + ar.Suffix)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
formValue := make(url.Values)
|
||||
for k, v := range form {
|
||||
formValue.Set(k, v)
|
||||
}
|
||||
req, _ := http.NewRequest("POST", URL.String(), strings.NewReader(formValue.Encode()))
|
||||
if r.BasicAuth != nil {
|
||||
req.SetBasicAuth(r.BasicAuth.Username, r.BasicAuth.Password)
|
||||
}
|
||||
req.Close = true
|
||||
req.Header.Add("Accept", "*/*")
|
||||
for k := range ar.Headers {
|
||||
req.Header.Add(k, ar.Headers.Get(k))
|
||||
}
|
||||
r.connControl <- struct{}{}
|
||||
if response, err := r.Client.Do(req); err != nil {
|
||||
<-r.connControl
|
||||
return nil, err
|
||||
} else {
|
||||
<-r.connControl
|
||||
errorText := response.Header.Get("X-Error")
|
||||
if errorText != "" {
|
||||
return nil, errors.New(errorText)
|
||||
}
|
||||
err := CheckResponse(response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch responseStruct.(type) {
|
||||
case *string:
|
||||
return r.ReadRawResponse(response, responseStruct)
|
||||
default:
|
||||
return r.ReadJSONResponse(response, responseStruct)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Requester) ReadRawResponse(response *http.Response, responseStruct interface{}) (*http.Response, error) {
|
||||
defer response.Body.Close()
|
||||
|
||||
content, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if str, ok := responseStruct.(*string); ok {
|
||||
*str = string(content)
|
||||
} else {
|
||||
return nil, fmt.Errorf("Could not cast responseStruct to *string")
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (r *Requester) ReadJSONResponse(response *http.Response, responseStruct interface{}) (*http.Response, error) {
|
||||
defer response.Body.Close()
|
||||
err := json.NewDecoder(response.Body).Decode(responseStruct)
|
||||
if err != nil && err.Error() == "EOF" {
|
||||
return response, nil
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func CheckResponse(r *http.Response) error {
|
||||
|
||||
switch r.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusFound, http.StatusNotModified:
|
||||
return nil
|
||||
}
|
||||
defer r.Body.Close()
|
||||
errorResponse := &devops.ErrorResponse{Response: r}
|
||||
data, err := io.ReadAll(r.Body)
|
||||
if err == nil && data != nil {
|
||||
errorResponse.Body = data
|
||||
errorResponse.Message = string(data)
|
||||
}
|
||||
|
||||
return errorResponse
|
||||
}
|
||||
@@ -1,178 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops"
|
||||
)
|
||||
|
||||
type GlobalRoleResponse struct {
|
||||
RoleName string `json:"roleName"`
|
||||
PermissionIds devops.GlobalPermissionIds `json:"permissionIds"`
|
||||
}
|
||||
|
||||
type GlobalRole struct {
|
||||
Jenkins *Jenkins
|
||||
Raw GlobalRoleResponse
|
||||
}
|
||||
|
||||
type ProjectRole struct {
|
||||
Jenkins *Jenkins
|
||||
Raw ProjectRoleResponse
|
||||
}
|
||||
|
||||
type ProjectRoleResponse struct {
|
||||
RoleName string `json:"roleName"`
|
||||
PermissionIds devops.ProjectPermissionIds `json:"permissionIds"`
|
||||
Pattern string `json:"pattern"`
|
||||
}
|
||||
|
||||
func (j *GlobalRole) Update(ids devops.GlobalPermissionIds) error {
|
||||
var idArray []string
|
||||
values := reflect.ValueOf(ids)
|
||||
for i := 0; i < values.NumField(); i++ {
|
||||
field := values.Field(i)
|
||||
if field.Bool() {
|
||||
idArray = append(idArray, values.Type().Field(i).Tag.Get("json"))
|
||||
}
|
||||
}
|
||||
param := map[string]string{
|
||||
"roleName": j.Raw.RoleName,
|
||||
"type": GLOBAL_ROLE,
|
||||
"permissionIds": strings.Join(idArray, ","),
|
||||
"overwrite": strconv.FormatBool(true),
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Jenkins.Requester.Post("/role-strategy/strategy/addRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// call jenkins api to update global role
|
||||
func (j *GlobalRole) AssignRole(sid string) error {
|
||||
param := map[string]string{
|
||||
"type": GLOBAL_ROLE,
|
||||
"roleName": j.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Jenkins.Requester.Post("/role-strategy/strategy/assignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *GlobalRole) UnAssignRole(sid string) error {
|
||||
param := map[string]string{
|
||||
"type": GLOBAL_ROLE,
|
||||
"roleName": j.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Jenkins.Requester.Post("/role-strategy/strategy/unassignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// update ProjectPermissionIds to Project
|
||||
// pattern string means some project, like project-name/*
|
||||
func (j *ProjectRole) Update(pattern string, ids devops.ProjectPermissionIds) error {
|
||||
var idArray []string
|
||||
values := reflect.ValueOf(ids)
|
||||
for i := 0; i < values.NumField(); i++ {
|
||||
field := values.Field(i)
|
||||
if field.Bool() {
|
||||
idArray = append(idArray, values.Type().Field(i).Tag.Get("json"))
|
||||
}
|
||||
}
|
||||
param := map[string]string{
|
||||
"roleName": j.Raw.RoleName,
|
||||
"type": PROJECT_ROLE,
|
||||
"permissionIds": strings.Join(idArray, ","),
|
||||
"overwrite": strconv.FormatBool(true),
|
||||
"pattern": pattern,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Jenkins.Requester.Post("/role-strategy/strategy/addRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *ProjectRole) AssignRole(sid string) error {
|
||||
param := map[string]string{
|
||||
"type": PROJECT_ROLE,
|
||||
"roleName": j.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Jenkins.Requester.Post("/role-strategy/strategy/assignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *ProjectRole) UnAssignRole(sid string) error {
|
||||
param := map[string]string{
|
||||
"type": PROJECT_ROLE,
|
||||
"roleName": j.Raw.RoleName,
|
||||
"sid": sid,
|
||||
}
|
||||
responseString := ""
|
||||
response, err := j.Jenkins.Requester.Post("/role-strategy/strategy/unassignRole", nil, &responseString, param)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return errors.New(strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
// Copyright 2015 Vadim Kravcenko
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
// not use this file except in compliance with the License. You may obtain
|
||||
// a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
// License for the specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func makeJson(data interface{}) string {
|
||||
str, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(json.RawMessage(str))
|
||||
}
|
||||
|
||||
func Reverse(s string) string {
|
||||
size := len(s)
|
||||
buf := make([]byte, size)
|
||||
for start := 0; start < size; {
|
||||
r, n := utf8.DecodeRuneInString(s[start:])
|
||||
start += n
|
||||
utf8.EncodeRune(buf[size-start:], r)
|
||||
}
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
type JkError struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
|
||||
func (err *JkError) Error() string {
|
||||
return err.Message
|
||||
}
|
||||
|
||||
// Decompress response.body of JenkinsAPIResponse
|
||||
func getRespBody(resp *http.Response) ([]byte, error) {
|
||||
var reader io.ReadCloser
|
||||
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||
reader, _ = gzip.NewReader(resp.Body)
|
||||
} else {
|
||||
reader = resp.Body
|
||||
}
|
||||
resBody, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return resBody, err
|
||||
|
||||
}
|
||||
|
||||
// ParseJenkinsQuery Parse the special query of jenkins.
|
||||
// ParseQuery in the standard library makes the query not re-encode
|
||||
func ParseJenkinsQuery(query string) (result url.Values, err error) {
|
||||
result = make(url.Values)
|
||||
for query != "" && err == nil {
|
||||
key := query
|
||||
if i := strings.IndexAny(key, "&"); i >= 0 {
|
||||
key, query = key[:i], key[i+1:]
|
||||
} else {
|
||||
query = ""
|
||||
}
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
value := ""
|
||||
if i := strings.Index(key, "="); i >= 0 {
|
||||
key, value = key[:i], key[i+1:]
|
||||
}
|
||||
if key, err = url.QueryUnescape(key); err == nil {
|
||||
if value, err = url.QueryUnescape(value); err == nil {
|
||||
result[key] = append(result[key], value)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type JenkinsBlueTime time.Time
|
||||
|
||||
func (t *JenkinsBlueTime) UnmarshalJSON(b []byte) error {
|
||||
if b == nil || strings.Trim(string(b), "\"") == "null" {
|
||||
*t = JenkinsBlueTime(time.Time{})
|
||||
return nil
|
||||
}
|
||||
j, err := time.Parse("2006-01-02T15:04:05.000-0700", strings.Trim(string(b), "\""))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = JenkinsBlueTime(j)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t JenkinsBlueTime) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(time.Time(t))
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
package jenkins
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseJenkinsQuery(t *testing.T) {
|
||||
table := []testData{
|
||||
{
|
||||
param: "start=0&limit=10&branch=master",
|
||||
expected: url.Values{
|
||||
"start": []string{"0"},
|
||||
"limit": []string{"10"},
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "branch=master", expected: url.Values{
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "&branch=master", expected: url.Values{
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "branch=master&", expected: url.Values{
|
||||
"branch": []string{"master"},
|
||||
}, err: false,
|
||||
},
|
||||
{
|
||||
param: "branch=%gg", expected: url.Values{}, err: true,
|
||||
},
|
||||
{
|
||||
param: "%gg=fake", expected: url.Values{}, err: true,
|
||||
},
|
||||
}
|
||||
|
||||
for index, item := range table {
|
||||
result, err := ParseJenkinsQuery(item.param)
|
||||
if item.err {
|
||||
assert.NotNil(t, err, "index: [%d], unexpected error happen %v", index, err)
|
||||
} else {
|
||||
assert.Nil(t, err, "index: [%d], unexpected error happen %v", index, err)
|
||||
}
|
||||
assert.Equal(t, item.expected, result, "index: [%d], result do not match with the expect value", index)
|
||||
}
|
||||
}
|
||||
|
||||
type testData struct {
|
||||
param string
|
||||
expected interface{}
|
||||
err bool
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,71 +0,0 @@
|
||||
package devops
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func TestGetSubmitters(t *testing.T) {
|
||||
input := &Input{}
|
||||
assert.Equal(t, len(input.GetSubmitters()), 0,
|
||||
"errors happen when try to get submitters without any submitters")
|
||||
|
||||
input.Submitter = "a , b, c,d"
|
||||
submitters := input.GetSubmitters()
|
||||
assert.Equal(t, len(submitters), 4, "get incorrect number of submitters")
|
||||
assert.DeepEqual(t, submitters, []string{"a", "b", "c", "d"})
|
||||
}
|
||||
|
||||
func TestApprovable(t *testing.T) {
|
||||
input := &Input{}
|
||||
|
||||
assert.Equal(t, input.Approvable(""), false, "should allow anyone to approve it if there's no submitter given")
|
||||
assert.Equal(t, input.Approvable("fake"), false, "should allow anyone to approve it if there's no submitter given")
|
||||
|
||||
input.Submitter = "fake"
|
||||
assert.Equal(t, input.Approvable(""), false, "should not approve by nobody if there's a particular submitter")
|
||||
assert.Equal(t, input.Approvable("rick"), false, "should not approve by who is not the specific one")
|
||||
assert.Equal(t, input.Approvable("fake"), true, "should be approvable")
|
||||
|
||||
input.Submitter = "fake, good ,bad"
|
||||
assert.Equal(t, input.Approvable("fake"), true, "should be approvable")
|
||||
assert.Equal(t, input.Approvable("good"), true, "should be approvable")
|
||||
assert.Equal(t, input.Approvable("bad"), true, "should be approvable")
|
||||
}
|
||||
|
||||
func TestPipelineJsonMarshall(t *testing.T) {
|
||||
const name = "fakeName"
|
||||
var err error
|
||||
var pipelineText string
|
||||
var pipelienList *PipelineList
|
||||
|
||||
pipelineText = fmt.Sprintf(`[{"displayName":"%s", "weatherScore": 11}]`, name)
|
||||
pipelienList, err = UnmarshalPipeline(1, []byte(pipelineText))
|
||||
assert.NilError(t, err, "pipeline json marshal should be success")
|
||||
assert.Equal(t, pipelienList.Total, 1)
|
||||
assert.Equal(t, len(pipelienList.Items), 1)
|
||||
assert.Equal(t, pipelienList.Items[0].DisplayName, name)
|
||||
assert.Equal(t, pipelienList.Items[0].WeatherScore, 11)
|
||||
|
||||
// test against the default value of weatherScore, it should be 100
|
||||
pipelineText = fmt.Sprintf(`[{"displayName":"%s"}]`, name)
|
||||
pipelienList, err = UnmarshalPipeline(1, []byte(pipelineText))
|
||||
assert.NilError(t, err, "pipeline json marshal should be success")
|
||||
assert.Equal(t, pipelienList.Total, 1)
|
||||
assert.Equal(t, len(pipelienList.Items), 1)
|
||||
assert.Equal(t, pipelienList.Items[0].DisplayName, name)
|
||||
assert.Equal(t, pipelienList.Items[0].WeatherScore, 100)
|
||||
|
||||
// test against multiple items
|
||||
pipelineText = fmt.Sprintf(`[{"displayName":"%s"}, {"displayName":"%s-1"}]`, name, name)
|
||||
pipelienList, err = UnmarshalPipeline(2, []byte(pipelineText))
|
||||
assert.NilError(t, err, "pipeline json marshal should be success")
|
||||
assert.Equal(t, pipelienList.Total, 2)
|
||||
assert.Equal(t, len(pipelienList.Items), 2)
|
||||
assert.Equal(t, pipelienList.Items[0].DisplayName, name)
|
||||
assert.Equal(t, pipelienList.Items[0].WeatherScore, 100)
|
||||
assert.Equal(t, pipelienList.Items[1].DisplayName, fmt.Sprintf("%s-1", name))
|
||||
assert.Equal(t, pipelienList.Items[1].WeatherScore, 100)
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
/*
|
||||
*
|
||||
project operator, providing API for creating/getting/deleting projects
|
||||
The actual data of the project is stored in the CRD,
|
||||
so we only need to create the project with the corresponding ID in the CI/CD system.
|
||||
*/
|
||||
type ProjectOperator interface {
|
||||
CreateDevOpsProject(projectId string) (string, error)
|
||||
DeleteDevOpsProject(projectId string) error
|
||||
GetDevOpsProject(projectId string) (string, error)
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
import "kubesphere.io/api/devops/v1alpha3"
|
||||
|
||||
type ProjectPipelineOperator interface {
|
||||
CreateProjectPipeline(projectId string, pipeline *v1alpha3.Pipeline) (string, error)
|
||||
DeleteProjectPipeline(projectId string, pipelineId string) (string, error)
|
||||
UpdateProjectPipeline(projectId string, pipeline *v1alpha3.Pipeline) (string, error)
|
||||
GetProjectPipelineConfig(projectId, pipelineId string) (*v1alpha3.Pipeline, error)
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package devops
|
||||
|
||||
// define the id of global permission items
|
||||
type GlobalPermissionIds struct {
|
||||
Administer bool `json:"hudson.model.Hudson.Administer"`
|
||||
GlobalRead bool `json:"hudson.model.Hudson.Read"`
|
||||
CredentialCreate bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.Create"`
|
||||
CredentialUpdate bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.Update"`
|
||||
CredentialView bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.View"`
|
||||
CredentialDelete bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.Delete"`
|
||||
CredentialManageDomains bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains"`
|
||||
SlaveCreate bool `json:"hudson.model.Computer.Create"`
|
||||
SlaveConfigure bool `json:"hudson.model.Computer.Configure"`
|
||||
SlaveDelete bool `json:"hudson.model.Computer.Delete"`
|
||||
SlaveBuild bool `json:"hudson.model.Computer.Build"`
|
||||
SlaveConnect bool `json:"hudson.model.Computer.Connect"`
|
||||
SlaveDisconnect bool `json:"hudson.model.Computer.Disconnect"`
|
||||
ItemBuild bool `json:"hudson.model.Item.Build"`
|
||||
ItemCreate bool `json:"hudson.model.Item.Create"`
|
||||
ItemRead bool `json:"hudson.model.Item.Read"`
|
||||
ItemConfigure bool `json:"hudson.model.Item.Configure"`
|
||||
ItemCancel bool `json:"hudson.model.Item.Cancel"`
|
||||
ItemMove bool `json:"hudson.model.Item.Move"`
|
||||
ItemDiscover bool `json:"hudson.model.Item.Discover"`
|
||||
ItemWorkspace bool `json:"hudson.model.Item.Workspace"`
|
||||
ItemDelete bool `json:"hudson.model.Item.Delete"`
|
||||
RunUpdate bool `json:"hudson.model.Run.Update"`
|
||||
RunDelete bool `json:"hudson.model.Run.Delete"`
|
||||
ViewCreate bool `json:"hudson.model.View.Create"`
|
||||
ViewConfigure bool `json:"hudson.model.View.Configure"`
|
||||
ViewRead bool `json:"hudson.model.View.Read"`
|
||||
ViewDelete bool `json:"hudson.model.View.Delete"`
|
||||
SCMTag bool `json:"hudson.scm.SCM.Tag"`
|
||||
}
|
||||
|
||||
// define the id of project permission items
|
||||
type ProjectPermissionIds struct {
|
||||
CredentialCreate bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.Create"`
|
||||
CredentialUpdate bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.Update"`
|
||||
CredentialView bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.View"`
|
||||
CredentialDelete bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.Delete"`
|
||||
CredentialManageDomains bool `json:"com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains"`
|
||||
ItemBuild bool `json:"hudson.model.Item.Build"`
|
||||
ItemCreate bool `json:"hudson.model.Item.Create"`
|
||||
ItemRead bool `json:"hudson.model.Item.Read"`
|
||||
ItemConfigure bool `json:"hudson.model.Item.Configure"`
|
||||
ItemCancel bool `json:"hudson.model.Item.Cancel"`
|
||||
ItemMove bool `json:"hudson.model.Item.Move"`
|
||||
ItemDiscover bool `json:"hudson.model.Item.Discover"`
|
||||
ItemWorkspace bool `json:"hudson.model.Item.Workspace"`
|
||||
ItemDelete bool `json:"hudson.model.Item.Delete"`
|
||||
RunUpdate bool `json:"hudson.model.Run.Update"`
|
||||
RunDelete bool `json:"hudson.model.Run.Delete"`
|
||||
RunReplay bool `json:"hudson.model.Run.Replay"`
|
||||
SCMTag bool `json:"hudson.scm.SCM.Tag"`
|
||||
}
|
||||
|
||||
// describe the interface of DevOps to operator role
|
||||
type RoleOperator interface {
|
||||
AddGlobalRole(roleName string, ids GlobalPermissionIds, overwrite bool) error
|
||||
GetGlobalRole(roleName string) (string, error)
|
||||
|
||||
AddProjectRole(roleName string, pattern string, ids ProjectPermissionIds, overwrite bool) error
|
||||
DeleteProjectRoles(roleName ...string) error
|
||||
|
||||
AssignProjectRole(roleName string, sid string) error
|
||||
UnAssignProjectRole(roleName string, sid string) error
|
||||
|
||||
AssignGlobalRole(roleName string, sid string) error
|
||||
UnAssignGlobalRole(roleName string, sid string) error
|
||||
|
||||
DeleteUserInProject(sid string) error
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package edgeruntime
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Endpoint string `json:"endpoint" yaml:"endpoint"`
|
||||
}
|
||||
|
||||
func NewEdgeRuntimeOptions() *Options {
|
||||
return &Options{
|
||||
Endpoint: "",
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) ApplyTo(options *Options) {
|
||||
reflectutils.Override(options, o)
|
||||
}
|
||||
|
||||
func (o *Options) Validate() []error {
|
||||
errs := []error{}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (o *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.StringVar(&o.Endpoint, "edgeservice-endpoint", c.Endpoint,
|
||||
"edgeservice endpoint for edgeruntime v1alpha1.")
|
||||
}
|
||||
@@ -1,213 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package es
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/query"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/versions"
|
||||
v1 "kubesphere.io/kubesphere/pkg/simple/client/es/versions/opensearchv1"
|
||||
v2 "kubesphere.io/kubesphere/pkg/simple/client/es/versions/opensearchv2"
|
||||
v5 "kubesphere.io/kubesphere/pkg/simple/client/es/versions/v5"
|
||||
v6 "kubesphere.io/kubesphere/pkg/simple/client/es/versions/v6"
|
||||
v7 "kubesphere.io/kubesphere/pkg/simple/client/es/versions/v7"
|
||||
"kubesphere.io/kubesphere/pkg/utils/esutil"
|
||||
)
|
||||
|
||||
const (
|
||||
ElasticV5 = "5"
|
||||
ElasticV6 = "6"
|
||||
ElasticV7 = "7"
|
||||
OpenSearchV1 = "opensearchv1"
|
||||
OpenSearchV2 = "opensearchv2"
|
||||
)
|
||||
|
||||
// Elasticsearch client
|
||||
type Client struct {
|
||||
host string
|
||||
basicAuth bool
|
||||
username string
|
||||
password string
|
||||
version string
|
||||
index string
|
||||
|
||||
c versions.Client
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
func NewClient(host string, basicAuth bool, username, password, indexPrefix, version string) (*Client, error) {
|
||||
var err error
|
||||
es := &Client{
|
||||
host: host,
|
||||
basicAuth: basicAuth,
|
||||
username: username,
|
||||
password: password,
|
||||
version: version,
|
||||
index: indexPrefix,
|
||||
}
|
||||
|
||||
switch es.version {
|
||||
case OpenSearchV1:
|
||||
es.c, err = v1.New(es.host, es.basicAuth, es.username, es.password, es.index)
|
||||
case OpenSearchV2:
|
||||
es.c, err = v2.New(es.host, es.basicAuth, es.username, es.password, es.index)
|
||||
case ElasticV5:
|
||||
es.c, err = v5.New(es.host, es.basicAuth, es.username, es.password, es.index)
|
||||
case ElasticV6:
|
||||
es.c, err = v6.New(es.host, es.basicAuth, es.username, es.password, es.index)
|
||||
case ElasticV7:
|
||||
es.c, err = v7.New(es.host, es.basicAuth, es.username, es.password, es.index)
|
||||
case "":
|
||||
es.c = nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported elasticsearch version %s", es.version)
|
||||
}
|
||||
|
||||
return es, err
|
||||
}
|
||||
|
||||
func (c *Client) loadClient() error {
|
||||
// Check if Elasticsearch client has been initialized.
|
||||
if c.c != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create Elasticsearch client.
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
|
||||
if c.c != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Detect Elasticsearch server version using Info API.
|
||||
// Info API is backward compatible across v5, v6 and v7.
|
||||
esv6, err := v6.New(c.host, c.basicAuth, c.username, c.password, c.index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := esv6.Client.Info(
|
||||
esv6.Client.Info.WithContext(context.Background()),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
|
||||
var b map[string]interface{}
|
||||
if err = jsoniter.NewDecoder(res.Body).Decode(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
if res.IsError() {
|
||||
// Print the response status and error information.
|
||||
e, _ := b["error"].(map[string]interface{})
|
||||
return fmt.Errorf("[%s] type: %v, reason: %v", res.Status(), e["type"], e["reason"])
|
||||
}
|
||||
|
||||
// get the major version
|
||||
version, _ := b["version"].(map[string]interface{})
|
||||
number, _ := version["number"].(string)
|
||||
if number == "" {
|
||||
return fmt.Errorf("failed to detect elastic version number")
|
||||
}
|
||||
|
||||
var vc versions.Client
|
||||
v := strings.Split(number, ".")[0]
|
||||
distribution, _ := version["distribution"].(string)
|
||||
if distribution == "opensearch" {
|
||||
v = "opensearchv" + v
|
||||
}
|
||||
switch v {
|
||||
case OpenSearchV1:
|
||||
vc, err = v1.New(c.host, c.basicAuth, c.username, c.password, c.index)
|
||||
case OpenSearchV2:
|
||||
vc, err = v2.New(c.host, c.basicAuth, c.username, c.password, c.index)
|
||||
case ElasticV5:
|
||||
vc, err = v5.New(c.host, c.basicAuth, c.username, c.password, c.index)
|
||||
case ElasticV6:
|
||||
vc, err = v6.New(c.host, c.basicAuth, c.username, c.password, c.index)
|
||||
case ElasticV7:
|
||||
vc, err = v7.New(c.host, c.basicAuth, c.username, c.password, c.index)
|
||||
default:
|
||||
err = fmt.Errorf("unsupported elasticsearch version %s", version)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.c = vc
|
||||
c.version = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) Search(builder *query.Builder, startTime, endTime time.Time, scroll bool) (*Response, error) {
|
||||
|
||||
err := c.loadClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initial Search
|
||||
body, err := builder.Bytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := c.c.Search(esutil.ResolveIndexNames(c.index, startTime, endTime), body, scroll)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parseResponse(res)
|
||||
}
|
||||
|
||||
func (c *Client) Scroll(id string) (*Response, error) {
|
||||
|
||||
err := c.loadClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := c.c.Scroll(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parseResponse(res)
|
||||
}
|
||||
|
||||
func (c *Client) ClearScroll(id string) {
|
||||
if id != "" {
|
||||
c.c.ClearScroll(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) GetTotalHitCount(v interface{}) int64 {
|
||||
return c.c.GetTotalHitCount(v)
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package es
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/query"
|
||||
)
|
||||
|
||||
func TestNewClient(t *testing.T) {
|
||||
var tests = []struct {
|
||||
fakeResp string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
fakeResp: "es6_detect_version_major_200.json",
|
||||
expected: ElasticV6,
|
||||
},
|
||||
{
|
||||
fakeResp: "es7_detect_version_major_200.json",
|
||||
expected: ElasticV7,
|
||||
},
|
||||
{
|
||||
fakeResp: "opensearchv2_detect_version_major_200.json",
|
||||
expected: OpenSearchV2,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
es := mockElasticsearchService("/", test.fakeResp, http.StatusOK)
|
||||
defer es.Close()
|
||||
|
||||
client := &Client{host: es.URL}
|
||||
err := client.loadClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(client.version, test.expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClient_Search(t *testing.T) {
|
||||
var tests = []struct {
|
||||
fakeVersion string
|
||||
fakeResp string
|
||||
fakeCode int
|
||||
expected string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
fakeVersion: ElasticV7,
|
||||
fakeResp: "es7_search_200.json",
|
||||
fakeCode: http.StatusOK,
|
||||
expected: "es7_search_200_result.json",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
var expected Response
|
||||
err := JsonFromFile(test.expected, &expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := mockElasticsearchService("/ks-logstash*/_search", test.fakeResp, test.fakeCode)
|
||||
defer srv.Close()
|
||||
|
||||
c, err := NewClient(srv.URL, false, "", "", "ks-logstash", test.fakeVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("create client error, %s", err)
|
||||
}
|
||||
result, err := c.Search(query.NewBuilder(), time.Time{}, time.Now(), false)
|
||||
if test.expectedErr != "" {
|
||||
if diff := cmp.Diff(fmt.Sprint(err), test.expectedErr); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expectedErr, diff)
|
||||
}
|
||||
}
|
||||
if diff := cmp.Diff(result, &expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpensearchClient_Search(t *testing.T) {
|
||||
var tests = []struct {
|
||||
fakeVersion string
|
||||
fakeResp string
|
||||
fakeCode int
|
||||
expected string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
fakeVersion: OpenSearchV2,
|
||||
fakeResp: "opensearchv2_search_200.json",
|
||||
fakeCode: http.StatusOK,
|
||||
expected: "opensearchv2_search_200_result.json",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
var expected Response
|
||||
err := JsonFromFile(test.expected, &expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := mockElasticsearchService("/", test.fakeResp, test.fakeCode)
|
||||
defer srv.Close()
|
||||
|
||||
c, err := NewClient(srv.URL, false, "", "", "ks-logstash", test.fakeVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("create client error, %s", err)
|
||||
}
|
||||
result, err := c.Search(query.NewBuilder(), time.Time{}, time.Now(), false)
|
||||
if test.expectedErr != "" {
|
||||
if diff := cmp.Diff(fmt.Sprint(err), test.expectedErr); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expectedErr, diff)
|
||||
}
|
||||
}
|
||||
if diff := cmp.Diff(result, &expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mockElasticsearchService(pattern, fakeResp string, fakeCode int) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
|
||||
b, _ := os.ReadFile(fmt.Sprintf("./testdata/%s", fakeResp))
|
||||
res.WriteHeader(fakeCode)
|
||||
res.Write(b)
|
||||
})
|
||||
return httptest.NewServer(mux)
|
||||
}
|
||||
|
||||
func JsonFromFile(expectedFile string, expectedJsonPtr interface{}) error {
|
||||
json, err := os.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = jsoniter.Unmarshal(json, expectedJsonPtr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,493 +0,0 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
// TODO: elastic/go-elasticsearch is working on Query DSL support.
|
||||
//
|
||||
// See https://github.com/elastic/go-elasticsearch/issues/42.
|
||||
// We need refactor our query body builder when that is ready.
|
||||
type Builder struct {
|
||||
From int64 `json:"from,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
Sorts []map[string]string `json:"sort,omitempty"`
|
||||
*Query `json:",inline"`
|
||||
*Aggregations `json:"aggs,omitempty"`
|
||||
}
|
||||
|
||||
func NewBuilder() *Builder {
|
||||
return &Builder{}
|
||||
}
|
||||
|
||||
func (b *Builder) Bytes() ([]byte, error) {
|
||||
return jsoniter.Marshal(b)
|
||||
}
|
||||
|
||||
func (b *Builder) WithQuery(q *Query) *Builder {
|
||||
|
||||
if q == nil || q.Bool == nil || !q.IsValid() {
|
||||
return b
|
||||
}
|
||||
|
||||
b.Query = q
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) WithAggregations(aggs *Aggregations) *Builder {
|
||||
|
||||
b.Aggregations = aggs
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) WithFrom(n int64) *Builder {
|
||||
b.From = n
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) WithSize(n int64) *Builder {
|
||||
b.Size = n
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Builder) WithSort(key, order string) *Builder {
|
||||
if order == "" {
|
||||
order = "desc"
|
||||
}
|
||||
b.Sorts = []map[string]string{{key: order}}
|
||||
return b
|
||||
}
|
||||
|
||||
// Query
|
||||
|
||||
type Query struct {
|
||||
*Bool `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
func NewQuery() *Query {
|
||||
return &Query{}
|
||||
}
|
||||
|
||||
func (q *Query) WithBool(b *Bool) *Query {
|
||||
if b == nil || !b.IsValid() {
|
||||
return q
|
||||
}
|
||||
|
||||
q.Bool = b
|
||||
return q
|
||||
}
|
||||
|
||||
// Aggregations
|
||||
|
||||
type Aggregations struct {
|
||||
*CardinalityAggregation `json:"cardinality_aggregation,omitempty"`
|
||||
*DateHistogramAggregation `json:"date_histogram_aggregation,omitempty"`
|
||||
}
|
||||
|
||||
type CardinalityAggregation struct {
|
||||
*Cardinality `json:"cardinality,omitempty"`
|
||||
}
|
||||
|
||||
type Cardinality struct {
|
||||
Field string `json:"field,omitempty"`
|
||||
}
|
||||
|
||||
type DateHistogramAggregation struct {
|
||||
*DateHistogram `json:"date_histogram,omitempty"`
|
||||
}
|
||||
|
||||
type DateHistogram struct {
|
||||
Field string `json:"field,omitempty"`
|
||||
Interval string `json:"interval,omitempty"`
|
||||
}
|
||||
|
||||
func NewAggregations() *Aggregations {
|
||||
return &Aggregations{}
|
||||
}
|
||||
|
||||
func (a *Aggregations) WithCardinalityAggregation(field string) *Aggregations {
|
||||
|
||||
a.CardinalityAggregation = &CardinalityAggregation{
|
||||
&Cardinality{
|
||||
Field: field,
|
||||
},
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *Aggregations) WithDateHistogramAggregation(field string, interval string) *Aggregations {
|
||||
|
||||
a.DateHistogramAggregation = &DateHistogramAggregation{
|
||||
&DateHistogram{
|
||||
Field: field,
|
||||
Interval: interval,
|
||||
},
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
type Item interface {
|
||||
IsValid() bool
|
||||
}
|
||||
|
||||
// Example:
|
||||
// {bool: {filter: <[]Match>}}
|
||||
// {bool: {should: <[]Match>, minimum_should_match: 1}}
|
||||
type Bool struct {
|
||||
*Parameter `json:"bool,omitempty"`
|
||||
}
|
||||
|
||||
type Parameter struct {
|
||||
Filter []interface{} `json:"filter,omitempty"`
|
||||
Should []interface{} `json:"should,omitempty"`
|
||||
MustNot []interface{} `json:"must_not,omitempty"`
|
||||
MinimumShouldMatch int32 `json:"minimum_should_match,omitempty"`
|
||||
}
|
||||
|
||||
func NewBool() *Bool {
|
||||
return &Bool{
|
||||
&Parameter{},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bool) IsValid() bool {
|
||||
if (b.Filter == nil || len(b.Filter) == 0) &&
|
||||
(b.Should == nil || len(b.Should) == 0) &&
|
||||
(b.MustNot == nil || len(b.MustNot) == 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *Bool) AppendFilter(item Item) *Bool {
|
||||
|
||||
if reflect.ValueOf(item).IsNil() || !item.IsValid() {
|
||||
return b
|
||||
}
|
||||
|
||||
b.Filter = append(b.Filter, item)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Bool) AppendMultiFilter(items []Item) *Bool {
|
||||
|
||||
if len(items) == 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if item.IsValid() {
|
||||
b.Filter = append(b.Filter, item)
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Bool) AppendShould(item Item) *Bool {
|
||||
|
||||
if reflect.ValueOf(item).IsNil() || !item.IsValid() {
|
||||
return b
|
||||
}
|
||||
|
||||
b.Should = append(b.Should, item)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Bool) AppendMultiShould(items []Item) *Bool {
|
||||
|
||||
if len(items) == 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if item.IsValid() {
|
||||
b.Should = append(b.Should, item)
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Bool) AppendMustNot(item Item) *Bool {
|
||||
|
||||
if reflect.ValueOf(item).IsNil() || !item.IsValid() {
|
||||
return b
|
||||
}
|
||||
|
||||
b.MustNot = append(b.MustNot, item)
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Bool) AppendMultiMustNot(items []Item) *Bool {
|
||||
|
||||
if len(items) == 0 {
|
||||
return b
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if item.IsValid() {
|
||||
b.MustNot = append(b.MustNot, item)
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Bool) WithMinimumShouldMatch(min int32) *Bool {
|
||||
|
||||
b.MinimumShouldMatch = min
|
||||
return b
|
||||
}
|
||||
|
||||
type MatchPhrase struct {
|
||||
MatchPhrase map[string]string `json:"match_phrase,omitempty"`
|
||||
}
|
||||
|
||||
func (m *MatchPhrase) IsValid() bool {
|
||||
|
||||
if m.MatchPhrase == nil || len(m.MatchPhrase) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func NewMatchPhrase(key, val string) *MatchPhrase {
|
||||
return &MatchPhrase{
|
||||
MatchPhrase: map[string]string{
|
||||
key: val,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewMultiMatchPhrase(key string, val []string) []Item {
|
||||
|
||||
var array []Item
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, v := range val {
|
||||
array = append(array, &MatchPhrase{
|
||||
MatchPhrase: map[string]string{
|
||||
key: v,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return array
|
||||
}
|
||||
|
||||
type MatchPhrasePrefix struct {
|
||||
MatchPhrasePrefix map[string]string `json:"match_phrase_prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (m *MatchPhrasePrefix) IsValid() bool {
|
||||
|
||||
if m.MatchPhrasePrefix == nil || len(m.MatchPhrasePrefix) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func NewMatchPhrasePrefix(key, val string) *MatchPhrasePrefix {
|
||||
return &MatchPhrasePrefix{
|
||||
MatchPhrasePrefix: map[string]string{
|
||||
key: val,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewMultiMatchPhrasePrefix(key string, val []string) []Item {
|
||||
|
||||
var array []Item
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, v := range val {
|
||||
array = append(array, &MatchPhrasePrefix{
|
||||
MatchPhrasePrefix: map[string]string{
|
||||
key: v,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return array
|
||||
}
|
||||
|
||||
type Regexp struct {
|
||||
Regexp map[string]string `json:"regexp,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Regexp) IsValid() bool {
|
||||
|
||||
if m.Regexp == nil || len(m.Regexp) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func NewRegex(key, val string) *Regexp {
|
||||
return &Regexp{
|
||||
Regexp: map[string]string{
|
||||
key: val,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Range struct {
|
||||
Range map[string]map[string]interface{} `json:"range,omitempty"`
|
||||
}
|
||||
|
||||
func NewRange(key string) *Range {
|
||||
return &Range{
|
||||
Range: map[string]map[string]interface{}{
|
||||
key: make(map[string]interface{}),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Range) WithGT(val interface{}) *Range {
|
||||
r.withRange("gt", val)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Range) WithGTE(val interface{}) *Range {
|
||||
r.withRange("gte", val)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Range) WithLT(val interface{}) *Range {
|
||||
r.withRange("lt", val)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Range) WithLTE(val interface{}) *Range {
|
||||
r.withRange("lte", val)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Range) IsValid() bool {
|
||||
if r.Range == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(r.Range) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, v := range r.Range {
|
||||
if len(v) != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Range) withRange(operator string, val interface{}) {
|
||||
if r.Range == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range r.Range {
|
||||
v[operator] = val
|
||||
}
|
||||
}
|
||||
|
||||
type Wildcard struct {
|
||||
Wildcard map[string]string `json:"wildcard,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Wildcard) IsValid() bool {
|
||||
|
||||
if m.Wildcard == nil || len(m.Wildcard) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func NewWildcard(key, val string) *Wildcard {
|
||||
|
||||
return &Wildcard{
|
||||
Wildcard: map[string]string{
|
||||
key: val,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewMultiWildcard(key string, val []string) []Item {
|
||||
|
||||
var array []Item
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, v := range val {
|
||||
array = append(array, &Wildcard{
|
||||
Wildcard: map[string]string{
|
||||
key: v,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return array
|
||||
}
|
||||
|
||||
type Terms struct {
|
||||
Terms map[string]interface{} `json:"terms,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Terms) IsValid() bool {
|
||||
|
||||
if m.Terms == nil || len(m.Terms) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func NewTerms(key string, val interface{}) *Terms {
|
||||
|
||||
if reflect.ValueOf(val).IsNil() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &Terms{
|
||||
Terms: map[string]interface{}{
|
||||
key: val,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Exists struct {
|
||||
Exists map[string]string `json:"exists,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Exists) IsValid() bool {
|
||||
|
||||
if m.Exists == nil || len(m.Exists) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func NewExists(key, val string) *Exists {
|
||||
return &Exists{
|
||||
Exists: map[string]string{
|
||||
key: val,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package es
|
||||
|
||||
import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
type Response struct {
|
||||
ScrollId string `json:"_scroll_id,omitempty"`
|
||||
Hits `json:"hits,omitempty"`
|
||||
Aggregations `json:"aggregations,omitempty"`
|
||||
}
|
||||
|
||||
type Hits struct {
|
||||
Total interface{} `json:"total,omitempty"` // `As of Elasticsearch v7.x, hits.total is changed incompatibly
|
||||
AllHits []Hit `json:"hits,omitempty"`
|
||||
}
|
||||
|
||||
type Hit struct {
|
||||
Source interface{} `json:"_source,omitempty"`
|
||||
Sort []int64 `json:"sort,omitempty"`
|
||||
}
|
||||
|
||||
type Aggregations struct {
|
||||
CardinalityAggregation `json:"cardinality_aggregation,omitempty"`
|
||||
DateHistogramAggregation `json:"date_histogram_aggregation,omitempty"`
|
||||
}
|
||||
|
||||
type CardinalityAggregation struct {
|
||||
Value int64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
type DateHistogramAggregation struct {
|
||||
Buckets []Bucket `json:"buckets,omitempty"`
|
||||
}
|
||||
|
||||
type Bucket struct {
|
||||
Key int64 `json:"key,omitempty"`
|
||||
Count int64 `json:"doc_count,omitempty"`
|
||||
}
|
||||
|
||||
func parseResponse(body []byte) (*Response, error) {
|
||||
var res Response
|
||||
err := jsoniter.Unmarshal(body, &res)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"name" : "elasticsearch-logging-data-0",
|
||||
"cluster_name" : "elasticsearch",
|
||||
"cluster_uuid" : "uLm0838MSd60T1XEh5P2Qg",
|
||||
"version" : {
|
||||
"number" : "6.7.0",
|
||||
"build_flavor" : "oss",
|
||||
"build_type" : "docker",
|
||||
"build_hash" : "8453f77",
|
||||
"build_date" : "2019-03-21T15:32:29.844721Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "7.7.0",
|
||||
"minimum_wire_compatibility_version" : "5.6.0",
|
||||
"minimum_index_compatibility_version" : "5.0.0"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"name" : "elasticsearch-master-2",
|
||||
"cluster_name" : "elasticsearch",
|
||||
"cluster_uuid" : "_A-3shR0R0i-2M9CzOWP8g",
|
||||
"version" : {
|
||||
"number" : "7.7.0",
|
||||
"build_flavor" : "default",
|
||||
"build_type" : "docker",
|
||||
"build_hash" : "81a1e9eda8e6183f5237786246f6dced26a10eaf",
|
||||
"build_date" : "2020-05-12T02:01:37.602180Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "8.5.1",
|
||||
"minimum_wire_compatibility_version" : "6.8.0",
|
||||
"minimum_index_compatibility_version" : "6.0.0-beta1"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
{
|
||||
"took": 772,
|
||||
"timed_out": false,
|
||||
"_shards": {
|
||||
"total": 2,
|
||||
"successful": 2,
|
||||
"skipped": 0,
|
||||
"failed": 0
|
||||
},
|
||||
"hits": {
|
||||
"total": {
|
||||
"value": 10000,
|
||||
"relation": "gte"
|
||||
},
|
||||
"max_score": 1.0,
|
||||
"hits": [
|
||||
{
|
||||
"_index": "ks-logstash-log-2020.05.16",
|
||||
"_type": "flb_type",
|
||||
"_id": "tRt2MXIBlcWZ594bqIUO",
|
||||
"_score": 1.0,
|
||||
"_source": {
|
||||
"@timestamp": "2020-05-16T16:00:42.608Z",
|
||||
"log": "10.233.30.76 redis-ha-announce-0.kubesphere-system.svc.cluster.local\n",
|
||||
"time": "2020-05-16T16:00:42.608962452Z",
|
||||
"kubernetes": {
|
||||
"pod_name": "redis-ha-haproxy-ffb8d889d-8x9kj",
|
||||
"namespace_name": "kubesphere-system",
|
||||
"host": "master0",
|
||||
"container_name": "config-init",
|
||||
"docker_id": "a673327e5e3dfefca3e773273e69eca64baaa4499fdc04e6eb9d621ad8688ad0",
|
||||
"container_hash": "cd4b3d4d27ae5931dc96b9632188590b7a6880469bcf07f478a3280dd0955336"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index": "ks-logstash-log-2020.05.16",
|
||||
"_type": "flb_type",
|
||||
"_id": "tht2MXIBlcWZ594bqIUO",
|
||||
"_score": 1.0,
|
||||
"_source": {
|
||||
"@timestamp": "2020-05-16T16:00:42.670Z",
|
||||
"log": "10.233.30.204 redis-ha-announce-1.kubesphere-system.svc.cluster.local\n",
|
||||
"time": "2020-05-16T16:00:42.670430525Z",
|
||||
"kubernetes": {
|
||||
"pod_name": "redis-ha-haproxy-ffb8d889d-8x9kj",
|
||||
"namespace_name": "kubesphere-system",
|
||||
"host": "master0",
|
||||
"container_name": "config-init",
|
||||
"docker_id": "a673327e5e3dfefca3e773273e69eca64baaa4499fdc04e6eb9d621ad8688ad0",
|
||||
"container_hash": "cd4b3d4d27ae5931dc96b9632188590b7a6880469bcf07f478a3280dd0955336"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index": "ks-logstash-log-2020.05.16",
|
||||
"_type": "flb_type",
|
||||
"_id": "txt2MXIBlcWZ594bqIUO",
|
||||
"_score": 1.0,
|
||||
"_source": {
|
||||
"@timestamp": "2020-05-16T16:00:42.731Z",
|
||||
"log": "scvg14005: inuse: 16, idle: 42, sys: 58, released: 40, consumed: 17 (MB)\n",
|
||||
"time": "2020-05-16T16:00:42.731865428Z",
|
||||
"kubernetes": {
|
||||
"pod_name": "redis-ha-haproxy-ffb8d889d-8x9kj",
|
||||
"namespace_name": "istio-system",
|
||||
"host": "node0",
|
||||
"container_name": "mixer",
|
||||
"docker_id": "a673327e5e3dfefca3e773273e69eca64baaa4499fdc04e6eb9d621ad8688ad0",
|
||||
"container_hash": "cd4b3d4d27ae5931dc96b9632188590b7a6880469bcf07f478a3280dd0955336"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,55 +0,0 @@
|
||||
{
|
||||
"hits": {
|
||||
"total": {
|
||||
"value": 10000,
|
||||
"relation": "gte"
|
||||
},
|
||||
"hits": [
|
||||
{
|
||||
"_source": {
|
||||
"time": "2020-05-16T16:00:42.608962452Z",
|
||||
"kubernetes": {
|
||||
"container_hash": "cd4b3d4d27ae5931dc96b9632188590b7a6880469bcf07f478a3280dd0955336",
|
||||
"pod_name": "redis-ha-haproxy-ffb8d889d-8x9kj",
|
||||
"namespace_name": "kubesphere-system",
|
||||
"host": "master0",
|
||||
"container_name": "config-init",
|
||||
"docker_id": "a673327e5e3dfefca3e773273e69eca64baaa4499fdc04e6eb9d621ad8688ad0"
|
||||
},
|
||||
"@timestamp": "2020-05-16T16:00:42.608Z",
|
||||
"log": "10.233.30.76 redis-ha-announce-0.kubesphere-system.svc.cluster.local\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source": {
|
||||
"@timestamp": "2020-05-16T16:00:42.670Z",
|
||||
"log": "10.233.30.204 redis-ha-announce-1.kubesphere-system.svc.cluster.local\n",
|
||||
"time": "2020-05-16T16:00:42.670430525Z",
|
||||
"kubernetes": {
|
||||
"pod_name": "redis-ha-haproxy-ffb8d889d-8x9kj",
|
||||
"namespace_name": "kubesphere-system",
|
||||
"host": "master0",
|
||||
"container_name": "config-init",
|
||||
"docker_id": "a673327e5e3dfefca3e773273e69eca64baaa4499fdc04e6eb9d621ad8688ad0",
|
||||
"container_hash": "cd4b3d4d27ae5931dc96b9632188590b7a6880469bcf07f478a3280dd0955336"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source": {
|
||||
"@timestamp": "2020-05-16T16:00:42.731Z",
|
||||
"log": "scvg14005: inuse: 16, idle: 42, sys: 58, released: 40, consumed: 17 (MB)\n",
|
||||
"time": "2020-05-16T16:00:42.731865428Z",
|
||||
"kubernetes": {
|
||||
"container_hash": "cd4b3d4d27ae5931dc96b9632188590b7a6880469bcf07f478a3280dd0955336",
|
||||
"pod_name": "redis-ha-haproxy-ffb8d889d-8x9kj",
|
||||
"namespace_name": "istio-system",
|
||||
"host": "node0",
|
||||
"container_name": "mixer",
|
||||
"docker_id": "a673327e5e3dfefca3e773273e69eca64baaa4499fdc04e6eb9d621ad8688ad0"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"name" : "opensearch-cluster-master-1",
|
||||
"cluster_name" : "opensearch-cluster",
|
||||
"cluster_uuid" : "Tyj3pwftSLqC195T-TJ46A",
|
||||
"version" : {
|
||||
"distribution" : "opensearch",
|
||||
"number" : "2.0.1",
|
||||
"build_type" : "tar",
|
||||
"build_hash" : "6462a546240f6d7a158519499729bce12dc1058b",
|
||||
"build_date" : "2022-06-15T08:47:42.243126494Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "9.1.0",
|
||||
"minimum_wire_compatibility_version" : "7.10.0",
|
||||
"minimum_index_compatibility_version" : "7.0.0"
|
||||
},
|
||||
"tagline" : "The OpenSearch Project: https://opensearch.org/"
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
{
|
||||
"took" : 8,
|
||||
"timed_out" : false,
|
||||
"_shards" : {
|
||||
"total" : 1,
|
||||
"successful" : 1,
|
||||
"skipped" : 0,
|
||||
"failed" : 0
|
||||
},
|
||||
"hits" : {
|
||||
"total" : {
|
||||
"value" : 10000,
|
||||
"relation" : "gte"
|
||||
},
|
||||
"max_score" : 1.0,
|
||||
"hits" : [
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "bG3czYEBJ4hVKmXbxLgk",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:45.982Z",
|
||||
"log" : "[2022-07-05T10:16:45,982][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:45.982530956Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "bW3czYEBJ4hVKmXbxLgk",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.103Z",
|
||||
"log" : "[2022-07-05T10:16:46,102][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.103075119Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "723czYEBJ4hVKmXbw7d7",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "level=info msg=\"Killed Fluent Bit\"\n",
|
||||
"time" : "2022-07-05T10:16:43.15685138Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "BdTczYEBDEKcFrNwxtcs",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.154Z",
|
||||
"log" : "[2022-07-05T10:16:46,154][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-2] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.154654969Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-2",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "80ef8f6428d9231d131776e7f8a827a9c27a5d2bcf8959b40494659597e0902f",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "BtTczYEBDEKcFrNwxtcs",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.199Z",
|
||||
"log" : "[2022-07-05T10:16:46,199][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-2] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.199409801Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-2",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "80ef8f6428d9231d131776e7f8a827a9c27a5d2bcf8959b40494659597e0902f",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "bm3czYEBJ4hVKmXbxLgk",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.150Z",
|
||||
"log" : "[2022-07-05T10:16:46,150][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.150653446Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "8G3czYEBJ4hVKmXbw7d7",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "level=info msg=\"Config file changed, stopped Fluent Bit\"\n",
|
||||
"time" : "2022-07-05T10:16:43.156857206Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "8W3czYEBJ4hVKmXbw7d7",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "[2022/07/05 10:16:43] [engine] caught signal (SIGTERM)\n",
|
||||
"time" : "2022-07-05T10:16:43.156864029Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "b23czYEBJ4hVKmXbxLgk",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.194Z",
|
||||
"log" : "[2022-07-05T10:16:46,193][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.194249241Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index" : "ks-logstash-log-2022.07.05",
|
||||
"_id" : "8m3czYEBJ4hVKmXbw7d7",
|
||||
"_score" : 1.0,
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "[2022/07/05 10:16:43] [ info] [input] pausing systemd.0\n",
|
||||
"time" : "2022-07-05T10:16:43.15689069Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
{
|
||||
"hits" : {
|
||||
"total" : {
|
||||
"value" : 10000,
|
||||
"relation" : "gte"
|
||||
},
|
||||
"hits" : [
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:45.982Z",
|
||||
"log" : "[2022-07-05T10:16:45,982][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:45.982530956Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.103Z",
|
||||
"log" : "[2022-07-05T10:16:46,102][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.103075119Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "level=info msg=\"Killed Fluent Bit\"\n",
|
||||
"time" : "2022-07-05T10:16:43.15685138Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.154Z",
|
||||
"log" : "[2022-07-05T10:16:46,154][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-2] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.154654969Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-2",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "80ef8f6428d9231d131776e7f8a827a9c27a5d2bcf8959b40494659597e0902f",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.199Z",
|
||||
"log" : "[2022-07-05T10:16:46,199][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-2] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.199409801Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-2",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "80ef8f6428d9231d131776e7f8a827a9c27a5d2bcf8959b40494659597e0902f",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.150Z",
|
||||
"log" : "[2022-07-05T10:16:46,150][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.150653446Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "level=info msg=\"Config file changed, stopped Fluent Bit\"\n",
|
||||
"time" : "2022-07-05T10:16:43.156857206Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "[2022/07/05 10:16:43] [engine] caught signal (SIGTERM)\n",
|
||||
"time" : "2022-07-05T10:16:43.156864029Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:46.194Z",
|
||||
"log" : "[2022-07-05T10:16:46,193][INFO ][o.o.a.u.d.DestinationMigrationCoordinator] [opensearch-cluster-master-1] Detected cluster change event for destination migration\n",
|
||||
"time" : "2022-07-05T10:16:46.194249241Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "opensearch-cluster-master-1",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "opensearch",
|
||||
"docker_id" : "a8f3ac4772ae75ff151742398a4b2750746d43cafcec3f54732c8927a71bffe0",
|
||||
"container_image" : "opensearchproject/opensearch:2.0.1"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"_source" : {
|
||||
"@timestamp" : "2022-07-05T10:16:43.156Z",
|
||||
"log" : "[2022/07/05 10:16:43] [ info] [input] pausing systemd.0\n",
|
||||
"time" : "2022-07-05T10:16:43.15689069Z",
|
||||
"kubernetes" : {
|
||||
"pod_name" : "fluent-bit-spc9q",
|
||||
"namespace_name" : "fluent",
|
||||
"container_name" : "fluent-bit",
|
||||
"docker_id" : "0a179f94962ccdec532f386e9bb6aa090707aa528b396a3209bb5988adb2bceb",
|
||||
"container_image" : "kubesphere/fluent-bit:v1.9.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package versions
|
||||
|
||||
type Error struct {
|
||||
Status int `json:"status"`
|
||||
Details *ErrorDetails `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"type"`
|
||||
Reason string `json:"reason"`
|
||||
ResourceType string `json:"resource.type,omitempty"`
|
||||
ResourceId string `json:"resource.id,omitempty"`
|
||||
Index string `json:"index,omitempty"`
|
||||
Phase string `json:"phase,omitempty"`
|
||||
Grouped bool `json:"grouped,omitempty"`
|
||||
CausedBy map[string]interface{} `json:"caused_by,omitempty"`
|
||||
RootCause []*ErrorDetails `json:"root_cause,omitempty"`
|
||||
FailedShards []map[string]interface{} `json:"failed_shards,omitempty"`
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package versions
|
||||
|
||||
// versioned es client interface
|
||||
type Client interface {
|
||||
Search(indices string, body []byte, scroll bool) ([]byte, error)
|
||||
Scroll(id string) ([]byte, error)
|
||||
ClearScroll(id string)
|
||||
GetTotalHitCount(v interface{}) int64
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/opensearch-project/opensearch-go"
|
||||
"github.com/opensearch-project/opensearch-go/opensearchapi"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/versions"
|
||||
)
|
||||
|
||||
type OpenSearch struct {
|
||||
client *opensearch.Client
|
||||
index string
|
||||
}
|
||||
|
||||
func New(address string, basicAuth bool, username, password, index string) (*OpenSearch, error) {
|
||||
var client *opensearch.Client
|
||||
var err error
|
||||
|
||||
if !basicAuth {
|
||||
username = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
client, err = opensearch.NewClient(opensearch.Config{
|
||||
Addresses: []string{address},
|
||||
Username: username,
|
||||
Password: password,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return &OpenSearch{client: client, index: index}, err
|
||||
}
|
||||
|
||||
func (o *OpenSearch) Search(indices string, body []byte, scroll bool) ([]byte, error) {
|
||||
opts := []func(*opensearchapi.SearchRequest){
|
||||
o.client.Search.WithContext(context.Background()),
|
||||
o.client.Search.WithIndex(indices),
|
||||
o.client.Search.WithRestTotalHitsAsInt(true),
|
||||
o.client.Search.WithIgnoreUnavailable(true),
|
||||
o.client.Search.WithBody(bytes.NewBuffer(body)),
|
||||
}
|
||||
if scroll {
|
||||
opts = append(opts, o.client.Search.WithScroll(time.Minute))
|
||||
}
|
||||
|
||||
response, err := o.client.Search(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (o *OpenSearch) Scroll(id string) ([]byte, error) {
|
||||
body, err := jsoniter.Marshal(map[string]string{
|
||||
"scroll_id": id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := o.client.Scroll(
|
||||
o.client.Scroll.WithContext(context.Background()),
|
||||
o.client.Scroll.WithBody(bytes.NewBuffer(body)),
|
||||
o.client.Scroll.WithScroll(time.Minute))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (o *OpenSearch) ClearScroll(scrollId string) {
|
||||
response, _ := o.client.ClearScroll(
|
||||
o.client.ClearScroll.WithContext(context.Background()),
|
||||
o.client.ClearScroll.WithScrollID(scrollId))
|
||||
defer response.Body.Close()
|
||||
}
|
||||
|
||||
func (o *OpenSearch) GetTotalHitCount(v interface{}) int64 {
|
||||
f, _ := v.(float64)
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
func parseError(response *opensearchapi.Response) error {
|
||||
var e versions.Error
|
||||
if err := json.NewDecoder(response.Body).Decode(&e); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Print the response status and error information.
|
||||
if len(e.Details.RootCause) != 0 {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.RootCause[0].Reason)
|
||||
} else {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/opensearch-project/opensearch-go/v2"
|
||||
"github.com/opensearch-project/opensearch-go/v2/opensearchapi"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/versions"
|
||||
)
|
||||
|
||||
type OpenSearch struct {
|
||||
client *opensearch.Client
|
||||
index string
|
||||
}
|
||||
|
||||
func New(address string, basicAuth bool, username, password, index string) (*OpenSearch, error) {
|
||||
var client *opensearch.Client
|
||||
var err error
|
||||
|
||||
if !basicAuth {
|
||||
username = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
client, err = opensearch.NewClient(opensearch.Config{
|
||||
Addresses: []string{address},
|
||||
Username: username,
|
||||
Password: password,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return &OpenSearch{client: client, index: index}, err
|
||||
}
|
||||
|
||||
func (o *OpenSearch) Search(indices string, body []byte, scroll bool) ([]byte, error) {
|
||||
opts := []func(*opensearchapi.SearchRequest){
|
||||
o.client.Search.WithContext(context.Background()),
|
||||
o.client.Search.WithRestTotalHitsAsInt(true),
|
||||
o.client.Search.WithIndex(indices),
|
||||
o.client.Search.WithIgnoreUnavailable(true),
|
||||
o.client.Search.WithBody(bytes.NewBuffer(body)),
|
||||
}
|
||||
if scroll {
|
||||
opts = append(opts, o.client.Search.WithScroll(time.Minute))
|
||||
}
|
||||
|
||||
response, err := o.client.Search(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (o *OpenSearch) Scroll(id string) ([]byte, error) {
|
||||
body, err := jsoniter.Marshal(map[string]string{
|
||||
"scroll_id": id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := o.client.Scroll(
|
||||
o.client.Scroll.WithContext(context.Background()),
|
||||
o.client.Scroll.WithBody(bytes.NewBuffer(body)),
|
||||
o.client.Scroll.WithScroll(time.Minute))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (o *OpenSearch) ClearScroll(scrollId string) {
|
||||
response, _ := o.client.ClearScroll(
|
||||
o.client.ClearScroll.WithContext(context.Background()),
|
||||
o.client.ClearScroll.WithScrollID(scrollId))
|
||||
defer response.Body.Close()
|
||||
}
|
||||
|
||||
func (o *OpenSearch) GetTotalHitCount(v interface{}) int64 {
|
||||
f, _ := v.(float64)
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
func parseError(response *opensearchapi.Response) error {
|
||||
var e versions.Error
|
||||
if err := json.NewDecoder(response.Body).Decode(&e); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Print the response status and error information.
|
||||
if len(e.Details.RootCause) != 0 {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.RootCause[0].Reason)
|
||||
} else {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v5
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/go-elasticsearch/v5"
|
||||
"github.com/elastic/go-elasticsearch/v5/esapi"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/versions"
|
||||
)
|
||||
|
||||
type Elastic struct {
|
||||
client *elasticsearch.Client
|
||||
index string
|
||||
}
|
||||
|
||||
func New(address string, basicAuth bool, username, password, index string) (*Elastic, error) {
|
||||
var client *elasticsearch.Client
|
||||
var err error
|
||||
|
||||
if !basicAuth {
|
||||
username = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
client, err = elasticsearch.NewClient(elasticsearch.Config{
|
||||
Addresses: []string{address},
|
||||
Username: username,
|
||||
Password: password,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return &Elastic{client: client, index: index}, err
|
||||
}
|
||||
|
||||
func (e *Elastic) Search(indices string, body []byte, scroll bool) ([]byte, error) {
|
||||
opts := []func(*esapi.SearchRequest){
|
||||
e.client.Search.WithContext(context.Background()),
|
||||
e.client.Search.WithIndex(indices),
|
||||
e.client.Search.WithIgnoreUnavailable(true),
|
||||
e.client.Search.WithBody(bytes.NewBuffer(body)),
|
||||
}
|
||||
if scroll {
|
||||
opts = append(opts, e.client.Search.WithScroll(time.Minute))
|
||||
}
|
||||
|
||||
response, err := e.client.Search(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (e *Elastic) Scroll(id string) ([]byte, error) {
|
||||
body, err := jsoniter.Marshal(map[string]string{
|
||||
"scroll_id": id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := e.client.Scroll(
|
||||
e.client.Scroll.WithContext(context.Background()),
|
||||
e.client.Scroll.WithBody(bytes.NewBuffer(body)),
|
||||
e.client.Scroll.WithScroll(time.Minute))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (e *Elastic) ClearScroll(scrollId string) {
|
||||
response, _ := e.client.ClearScroll(
|
||||
e.client.ClearScroll.WithContext(context.Background()),
|
||||
e.client.ClearScroll.WithScrollID(scrollId))
|
||||
defer response.Body.Close()
|
||||
}
|
||||
|
||||
func (e *Elastic) GetTotalHitCount(v interface{}) int64 {
|
||||
f, _ := v.(float64)
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
func parseError(response *esapi.Response) error {
|
||||
var e versions.Error
|
||||
if err := json.NewDecoder(response.Body).Decode(&e); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Print the response status and error information.
|
||||
if len(e.Details.RootCause) != 0 {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.RootCause[0].Reason)
|
||||
} else {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v6
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/go-elasticsearch/v6"
|
||||
"github.com/elastic/go-elasticsearch/v6/esapi"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/versions"
|
||||
)
|
||||
|
||||
type Elastic struct {
|
||||
Client *elasticsearch.Client
|
||||
index string
|
||||
}
|
||||
|
||||
func New(address string, basicAuth bool, username, password, index string) (*Elastic, error) {
|
||||
var client *elasticsearch.Client
|
||||
var err error
|
||||
|
||||
if !basicAuth {
|
||||
username = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
client, err = elasticsearch.NewClient(elasticsearch.Config{
|
||||
Addresses: []string{address},
|
||||
Username: username,
|
||||
Password: password,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return &Elastic{Client: client, index: index}, err
|
||||
}
|
||||
|
||||
func (e *Elastic) Search(indices string, body []byte, scroll bool) ([]byte, error) {
|
||||
opts := []func(*esapi.SearchRequest){
|
||||
e.Client.Search.WithContext(context.Background()),
|
||||
e.Client.Search.WithIndex(indices),
|
||||
e.Client.Search.WithIgnoreUnavailable(true),
|
||||
e.Client.Search.WithBody(bytes.NewBuffer(body)),
|
||||
}
|
||||
if scroll {
|
||||
opts = append(opts, e.Client.Search.WithScroll(time.Minute))
|
||||
}
|
||||
|
||||
response, err := e.Client.Search(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (e *Elastic) Scroll(id string) ([]byte, error) {
|
||||
body, err := jsoniter.Marshal(map[string]string{
|
||||
"scroll_id": id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := e.Client.Scroll(
|
||||
e.Client.Scroll.WithContext(context.Background()),
|
||||
e.Client.Scroll.WithBody(bytes.NewBuffer(body)),
|
||||
e.Client.Scroll.WithScroll(time.Minute))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (e *Elastic) ClearScroll(scrollId string) {
|
||||
response, _ := e.Client.ClearScroll(
|
||||
e.Client.ClearScroll.WithContext(context.Background()),
|
||||
e.Client.ClearScroll.WithScrollID(scrollId))
|
||||
defer response.Body.Close()
|
||||
}
|
||||
|
||||
func (e *Elastic) GetTotalHitCount(v interface{}) int64 {
|
||||
f, _ := v.(float64)
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
func parseError(response *esapi.Response) error {
|
||||
var e versions.Error
|
||||
if err := json.NewDecoder(response.Body).Decode(&e); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Print the response status and error information.
|
||||
if len(e.Details.RootCause) != 0 {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.RootCause[0].Reason)
|
||||
} else {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/go-elasticsearch/v7"
|
||||
"github.com/elastic/go-elasticsearch/v7/esapi"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/versions"
|
||||
)
|
||||
|
||||
type Elastic struct {
|
||||
client *elasticsearch.Client
|
||||
index string
|
||||
}
|
||||
|
||||
func New(address string, basicAuth bool, username, password, index string) (*Elastic, error) {
|
||||
var client *elasticsearch.Client
|
||||
var err error
|
||||
|
||||
if !basicAuth {
|
||||
username = ""
|
||||
password = ""
|
||||
}
|
||||
|
||||
client, err = elasticsearch.NewClient(elasticsearch.Config{
|
||||
Addresses: []string{address},
|
||||
Username: username,
|
||||
Password: password,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return &Elastic{client: client, index: index}, err
|
||||
}
|
||||
|
||||
func (e *Elastic) Search(indices string, body []byte, scroll bool) ([]byte, error) {
|
||||
opts := []func(*esapi.SearchRequest){
|
||||
e.client.Search.WithContext(context.Background()),
|
||||
e.client.Search.WithIndex(indices),
|
||||
e.client.Search.WithTrackTotalHits(true),
|
||||
e.client.Search.WithIgnoreUnavailable(true),
|
||||
e.client.Search.WithBody(bytes.NewBuffer(body)),
|
||||
}
|
||||
if scroll {
|
||||
opts = append(opts, e.client.Search.WithScroll(time.Minute))
|
||||
}
|
||||
|
||||
response, err := e.client.Search(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
return io.ReadAll(response.Body)
|
||||
}
|
||||
|
||||
func (e *Elastic) Scroll(id string) ([]byte, error) {
|
||||
body, err := jsoniter.Marshal(map[string]string{
|
||||
"scroll_id": id,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response, err := e.client.Scroll(
|
||||
e.client.Scroll.WithContext(context.Background()),
|
||||
e.client.Scroll.WithBody(bytes.NewBuffer(body)),
|
||||
e.client.Scroll.WithScroll(time.Minute))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
if response.IsError() {
|
||||
return nil, parseError(response)
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(response.Body)
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (e *Elastic) ClearScroll(scrollId string) {
|
||||
response, _ := e.client.ClearScroll(
|
||||
e.client.ClearScroll.WithContext(context.Background()),
|
||||
e.client.ClearScroll.WithScrollID(scrollId))
|
||||
defer response.Body.Close()
|
||||
}
|
||||
|
||||
func (e *Elastic) GetTotalHitCount(v interface{}) int64 {
|
||||
m, _ := v.(map[string]interface{})
|
||||
f, _ := m["value"].(float64)
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
func parseError(response *esapi.Response) error {
|
||||
var e versions.Error
|
||||
if err := json.NewDecoder(response.Body).Decode(&e); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Print the response status and error information.
|
||||
if len(e.Details.RootCause) != 0 {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.RootCause[0].Reason)
|
||||
} else {
|
||||
return fmt.Errorf("type: %v, reason: %v", e.Details.Type, e.Details.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/query"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/events"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
c *es.Client
|
||||
}
|
||||
|
||||
func NewClient(options *events.Options) (events.Client, error) {
|
||||
c := &client{}
|
||||
|
||||
var err error
|
||||
c.c, err = es.NewClient(options.Host, options.BasicAuth, options.Username, options.Password, options.IndexPrefix, options.Version)
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (c *client) SearchEvents(filter *events.Filter, from, size int64,
|
||||
sort string) (*events.Events, error) {
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(filter)).
|
||||
WithSort("lastTimestamp", sort).
|
||||
WithFrom(from).
|
||||
WithSize(size)
|
||||
|
||||
resp, err := c.c.Search(b, filter.StartTime, filter.EndTime, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp == nil || len(resp.AllHits) == 0 {
|
||||
return &events.Events{}, nil
|
||||
}
|
||||
|
||||
evts := events.Events{Total: c.c.GetTotalHitCount(resp.Total)}
|
||||
for _, hit := range resp.AllHits {
|
||||
evts.Records = append(evts.Records, hit.Source)
|
||||
}
|
||||
return &evts, nil
|
||||
}
|
||||
|
||||
func (c *client) CountOverTime(filter *events.Filter, interval string) (*events.Histogram, error) {
|
||||
if interval == "" {
|
||||
interval = "15m"
|
||||
}
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(filter)).
|
||||
WithAggregations(query.NewAggregations().
|
||||
WithDateHistogramAggregation("lastTimestamp", interval)).
|
||||
WithSize(0)
|
||||
|
||||
resp, err := c.c.Search(b, filter.StartTime, filter.EndTime, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
histo := events.Histogram{Total: c.c.GetTotalHitCount(resp.Total)}
|
||||
for _, bucket := range resp.Buckets {
|
||||
histo.Buckets = append(histo.Buckets,
|
||||
events.Bucket{Time: bucket.Key, Count: bucket.Count})
|
||||
}
|
||||
return &histo, nil
|
||||
}
|
||||
|
||||
func (c *client) StatisticsOnResources(filter *events.Filter) (*events.Statistics, error) {
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(filter)).
|
||||
WithAggregations(query.NewAggregations().
|
||||
WithCardinalityAggregation("involvedObject.uid.keyword")).
|
||||
WithSize(0)
|
||||
|
||||
resp, err := c.c.Search(b, filter.StartTime, filter.EndTime, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &events.Statistics{
|
||||
Resources: resp.Value,
|
||||
Events: c.c.GetTotalHitCount(resp.Total),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseToQueryPart(f *events.Filter) *query.Query {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var mini int32 = 1
|
||||
b := query.NewBool()
|
||||
|
||||
bi := query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for k, v := range f.InvolvedObjectNamespaceMap {
|
||||
if k == "" {
|
||||
bi.AppendShould(query.NewBool().
|
||||
AppendMustNot(query.NewExists("field", "involvedObject.namespace")))
|
||||
} else {
|
||||
bi.AppendShould(query.NewBool().
|
||||
AppendFilter(query.NewMatchPhrase("involvedObject.namespace.keyword", k)).
|
||||
AppendFilter(query.NewRange("lastTimestamp").
|
||||
WithGTE(v)))
|
||||
}
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("involvedObject.name.keyword", f.InvolvedObjectNames)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("involvedObject.name", f.InvolvedObjectNameFuzzy)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("involvedObject.kind", f.InvolvedObjectkinds)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("reason", f.Reasons)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
bi = query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, r := range f.ReasonFuzzy {
|
||||
bi.AppendShould(query.NewWildcard("reason.keyword", fmt.Sprintf("*"+r+"*")))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("message", f.MessageFuzzy)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
if f.Type != "" {
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendShould(query.NewMatchPhrase("type", f.Type)))
|
||||
}
|
||||
|
||||
r := query.NewRange("lastTimestamp")
|
||||
if !f.StartTime.IsZero() {
|
||||
r.WithGTE(f.StartTime)
|
||||
}
|
||||
if !f.EndTime.IsZero() {
|
||||
r.WithLTE(f.EndTime)
|
||||
}
|
||||
|
||||
b.AppendFilter(r)
|
||||
|
||||
return query.NewQuery().WithBool(b)
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/events"
|
||||
)
|
||||
|
||||
func MockElasticsearchService(pattern string, fakeCode int, fakeResp string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(fakeCode)
|
||||
res.Write([]byte(fakeResp))
|
||||
})
|
||||
return httptest.NewServer(mux)
|
||||
}
|
||||
|
||||
func TestStatisticsOnResources(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
filter events.Filter
|
||||
fakeVersion string
|
||||
fakeCode int
|
||||
fakeResp string
|
||||
expected events.Statistics
|
||||
expectedError bool
|
||||
}{{
|
||||
description: "ES index exists",
|
||||
filter: events.Filter{},
|
||||
fakeVersion: "6",
|
||||
fakeCode: 200,
|
||||
fakeResp: `
|
||||
{
|
||||
"took": 16,
|
||||
"timed_out": false,
|
||||
"_shards": {
|
||||
"total": 1,
|
||||
"successful": 1,
|
||||
"skipped": 0,
|
||||
"failed": 0
|
||||
},
|
||||
"hits": {
|
||||
"total": 10000,
|
||||
"max_score": null,
|
||||
"hits": [
|
||||
|
||||
]
|
||||
},
|
||||
"aggregations": {
|
||||
"cardinality_aggregation": {
|
||||
"value": 100
|
||||
}
|
||||
}
|
||||
}
|
||||
`,
|
||||
expected: events.Statistics{
|
||||
Events: 10000,
|
||||
Resources: 100,
|
||||
},
|
||||
expectedError: false,
|
||||
}, {
|
||||
description: "ES index not exists",
|
||||
filter: events.Filter{},
|
||||
fakeVersion: "6",
|
||||
fakeCode: 404,
|
||||
fakeResp: `
|
||||
{
|
||||
"error": {
|
||||
"root_cause": [
|
||||
{
|
||||
"type": "index_not_found_exception",
|
||||
"reason": "no such index [events]",
|
||||
"resource.type": "index_or_alias",
|
||||
"resource.id": "events",
|
||||
"index_uuid": "_na_",
|
||||
"index": "events"
|
||||
}
|
||||
],
|
||||
"type": "index_not_found_exception",
|
||||
"reason": "no such index [events]",
|
||||
"resource.type": "index_or_alias",
|
||||
"resource.id": "events",
|
||||
"index_uuid": "_na_",
|
||||
"index": "events"
|
||||
},
|
||||
"status": 404
|
||||
}
|
||||
`,
|
||||
expectedError: true,
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
mes := MockElasticsearchService("/", test.fakeCode, test.fakeResp)
|
||||
defer mes.Close()
|
||||
|
||||
c, err := NewClient(&events.Options{
|
||||
Host: mes.URL,
|
||||
IndexPrefix: "ks-logstash-events",
|
||||
Version: test.fakeVersion,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create client error, %s", err)
|
||||
}
|
||||
|
||||
stats, err := c.StatisticsOnResources(&test.filter)
|
||||
|
||||
if test.expectedError {
|
||||
if err == nil {
|
||||
t.Fatalf("expected err like %s", test.fakeResp)
|
||||
} else if !strings.Contains(err.Error(), "index_not_found_exception") {
|
||||
t.Fatalf("err does not contain expected code: %d", test.fakeCode)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
} else if diff := cmp.Diff(stats, &test.expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseToQueryPart(t *testing.T) {
|
||||
q := `
|
||||
{
|
||||
"query":{
|
||||
"bool":{
|
||||
"filter":[
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"bool":{
|
||||
"filter":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"involvedObject.namespace.keyword":"kubesphere-system"
|
||||
}
|
||||
},
|
||||
{
|
||||
"range":{
|
||||
"lastTimestamp":{
|
||||
"gte":"2020-01-01T01:01:01.000000001Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase_prefix":{
|
||||
"involvedObject.name":"istio"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"bool":{
|
||||
"should":[
|
||||
{
|
||||
"match_phrase":{
|
||||
"reason":"unhealthy"
|
||||
}
|
||||
}
|
||||
],
|
||||
"minimum_should_match":1
|
||||
}
|
||||
},
|
||||
{
|
||||
"range":{
|
||||
"lastTimestamp":{
|
||||
"gte":"2019-12-01T01:01:01.000000001Z"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
nsCreateTime := time.Date(2020, time.Month(1), 1, 1, 1, 1, 1, time.UTC)
|
||||
startTime := nsCreateTime.AddDate(0, -1, 0)
|
||||
|
||||
filter := &events.Filter{
|
||||
InvolvedObjectNamespaceMap: map[string]time.Time{
|
||||
"kubesphere-system": nsCreateTime,
|
||||
},
|
||||
InvolvedObjectNameFuzzy: []string{"istio"},
|
||||
Reasons: []string{"unhealthy"},
|
||||
StartTime: startTime,
|
||||
}
|
||||
|
||||
qp := parseToQueryPart(filter)
|
||||
bs, err := json.Marshal(qp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
queryPart := &map[string]interface{}{}
|
||||
if err := json.Unmarshal(bs, queryPart); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
expectedQueryPart := &map[string]interface{}{}
|
||||
if err := json.Unmarshal([]byte(q), expectedQueryPart); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedQueryPart, queryPart)
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
SearchEvents(filter *Filter, from, size int64, sort string) (*Events, error)
|
||||
CountOverTime(filter *Filter, interval string) (*Histogram, error)
|
||||
StatisticsOnResources(filter *Filter) (*Statistics, error)
|
||||
}
|
||||
|
||||
type Filter struct {
|
||||
InvolvedObjectNamespaceMap map[string]time.Time
|
||||
InvolvedObjectNames []string
|
||||
InvolvedObjectNameFuzzy []string
|
||||
InvolvedObjectkinds []string
|
||||
Reasons []string
|
||||
ReasonFuzzy []string
|
||||
MessageFuzzy []string
|
||||
Type string
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
}
|
||||
|
||||
type Events struct {
|
||||
Total int64 `json:"total" description:"total number of matched results"`
|
||||
Records []interface{} `json:"records" description:"actual array of results"`
|
||||
}
|
||||
|
||||
type Histogram struct {
|
||||
Total int64 `json:"total" description:"total number of events"`
|
||||
Buckets []Bucket `json:"buckets" description:"actual array of histogram results"`
|
||||
}
|
||||
type Bucket struct {
|
||||
Time int64 `json:"time" description:"timestamp"`
|
||||
Count int64 `json:"count" description:"total number of events at intervals"`
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
Resources int64 `json:"resources" description:"total number of resources"`
|
||||
Events int64 `json:"events" description:"total number of events"`
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Host string `json:"host" yaml:"host"`
|
||||
BasicAuth bool `json:"basicAuth" yaml:"basicAuth"`
|
||||
Username string `json:"username" yaml:"username"`
|
||||
Password string `json:"password" yaml:"password"`
|
||||
IndexPrefix string `json:"indexPrefix,omitempty" yaml:"indexPrefix,omitempty"`
|
||||
Version string `json:"version" yaml:"version"`
|
||||
}
|
||||
|
||||
func NewEventsOptions() *Options {
|
||||
return &Options{
|
||||
Host: "",
|
||||
IndexPrefix: "ks-logstash-events",
|
||||
Version: "",
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Options) ApplyTo(options *Options) {
|
||||
if s.Host != "" {
|
||||
reflectutils.Override(options, s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Options) Validate() []error {
|
||||
errs := make([]error, 0)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.StringVar(&s.Host, "events-elasticsearch-host", c.Host, ""+
|
||||
"Elasticsearch service host. KubeSphere is using elastic as event store, "+
|
||||
"if this filed left blank, KubeSphere will use kubernetes builtin event API instead, and"+
|
||||
" the following elastic search options will be ignored.")
|
||||
|
||||
fs.BoolVar(&s.BasicAuth, "events-elasticsearch-basicAuth", c.BasicAuth, ""+
|
||||
"Elasticsearch events service basic auth enabled. KubeSphere is using elastic as events store, "+
|
||||
"if it is set to true, KubeSphere will connect to ElasticSearch using provided username and password by "+
|
||||
"events-elasticsearch-username and events-elasticsearch-username. Otherwise, KubeSphere will "+
|
||||
"anonymously access the Elasticsearch.")
|
||||
|
||||
fs.StringVar(&s.Username, "events-elasticsearch-username", c.Username, ""+
|
||||
"ElasticSearch authentication username, only needed when events-elasticsearch-basicAuth is"+
|
||||
"set to true. ")
|
||||
|
||||
fs.StringVar(&s.Password, "events-elasticsearch-password", c.Password, ""+
|
||||
"ElasticSearch authentication password, only needed when events-elasticsearch-basicAuth is"+
|
||||
"set to true. ")
|
||||
|
||||
fs.StringVar(&s.IndexPrefix, "events-index-prefix", c.IndexPrefix, ""+
|
||||
"Index name prefix. KubeSphere will retrieve events against indices matching the prefix.")
|
||||
|
||||
fs.StringVar(&s.Version, "events-elasticsearch-version", c.Version, ""+
|
||||
"Elasticsearch major version, e.g. 5/6/7, if left blank, will detect automatically."+
|
||||
"Currently, minimum supported version is 5.x")
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
// Options contains configuration of the default Gateway
|
||||
type Options struct {
|
||||
WatchesPath string `json:"watchesPath,omitempty" yaml:"watchesPath,omitempty"`
|
||||
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
|
||||
Repository string `json:"repository,omitempty" yaml:"repository,omitempty"`
|
||||
Tag string `json:"tag,omitempty" yaml:"tag,omitempty"`
|
||||
}
|
||||
|
||||
// NewGatewayOptions creates a default Gateway Option
|
||||
func NewGatewayOptions() *Options {
|
||||
return &Options{
|
||||
WatchesPath: "",
|
||||
Namespace: "", // constants.KubeSphereControlNamespace
|
||||
Repository: "",
|
||||
Tag: "",
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Options) IsEmpty() bool {
|
||||
return s.WatchesPath == ""
|
||||
}
|
||||
|
||||
// Validate check options values
|
||||
func (s *Options) Validate() []error {
|
||||
var errors []error
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// ApplyTo overrides options if it's valid, which watchesPath is not empty
|
||||
func (s *Options) ApplyTo(options *Options) {
|
||||
if s.WatchesPath != "" {
|
||||
reflectutils.Override(options, s)
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags add options flags to command line flags,
|
||||
// if watchesPath left empty, following options will be ignored
|
||||
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.StringVar(&s.WatchesPath, "watches-path", c.WatchesPath, "Path to the watches file to use.")
|
||||
fs.StringVar(&s.Namespace, "namespace", c.Namespace, "Working Namespace of the Gateway's Ingress Controller.")
|
||||
fs.StringVar(&s.Repository, "repository", c.Repository, "The Gateway Controller's image repository")
|
||||
fs.StringVar(&s.Tag, "tag", c.Tag, "The Gateway Controller's image tag")
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package gpu
|
||||
|
||||
import "github.com/spf13/pflag"
|
||||
|
||||
type GPUKind struct {
|
||||
ResourceName string `json:"resourceName,omitempty" yaml:"resourceName,omitempty"`
|
||||
ResourceType string `json:"resourceType,omitempty" yaml:"resourceType,omitempty"`
|
||||
Default bool `json:"default,omitempty" yaml:"default,omitempty"`
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Kinds []GPUKind `json:"kinds,omitempty" yaml:"kinds,omitempty"`
|
||||
}
|
||||
|
||||
func NewGPUOptions() *Options {
|
||||
return &Options{
|
||||
Kinds: []GPUKind{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Options) Validate() []error {
|
||||
var errs []error
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *Options) ApplyTo(options *Options) {
|
||||
if s != nil && len(s.Kinds) > 0 {
|
||||
options.Kinds = s.Kinds
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
|
||||
promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
|
||||
istioclient "istio.io/client-go/pkg/clientset/versioned"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
type FakeClient struct {
|
||||
// kubernetes client interface
|
||||
K8sClient kubernetes.Interface
|
||||
|
||||
// discovery client
|
||||
DiscoveryClient *discovery.DiscoveryClient
|
||||
|
||||
// generated clientset
|
||||
KubeSphereClient kubesphere.Interface
|
||||
|
||||
IstioClient istioclient.Interface
|
||||
|
||||
SnapshotClient snapshotclient.Interface
|
||||
|
||||
ApiExtensionClient apiextensionsclient.Interface
|
||||
|
||||
prometheusClient promresourcesclient.Interface
|
||||
|
||||
MasterURL string
|
||||
|
||||
KubeConfig *rest.Config
|
||||
}
|
||||
|
||||
func NewFakeClientSets(k8sClient kubernetes.Interface, discoveryClient *discovery.DiscoveryClient,
|
||||
kubeSphereClient kubesphere.Interface,
|
||||
istioClient istioclient.Interface, snapshotClient snapshotclient.Interface,
|
||||
apiextensionsclient apiextensionsclient.Interface, prometheusClient promresourcesclient.Interface,
|
||||
masterURL string, kubeConfig *rest.Config) Client {
|
||||
return &FakeClient{
|
||||
K8sClient: k8sClient,
|
||||
DiscoveryClient: discoveryClient,
|
||||
KubeSphereClient: kubeSphereClient,
|
||||
IstioClient: istioClient,
|
||||
SnapshotClient: snapshotClient,
|
||||
ApiExtensionClient: apiextensionsclient,
|
||||
prometheusClient: prometheusClient,
|
||||
MasterURL: masterURL,
|
||||
KubeConfig: kubeConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *FakeClient) Kubernetes() kubernetes.Interface {
|
||||
return n.K8sClient
|
||||
}
|
||||
|
||||
func (n *FakeClient) KubeSphere() kubesphere.Interface {
|
||||
return n.KubeSphereClient
|
||||
}
|
||||
|
||||
func (n *FakeClient) Istio() istioclient.Interface {
|
||||
return n.IstioClient
|
||||
}
|
||||
|
||||
func (n *FakeClient) Snapshot() snapshotclient.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *FakeClient) ApiExtensions() apiextensionsclient.Interface {
|
||||
return n.ApiExtensionClient
|
||||
}
|
||||
|
||||
func (n *FakeClient) Discovery() discovery.DiscoveryInterface {
|
||||
return n.DiscoveryClient
|
||||
}
|
||||
|
||||
func (n *FakeClient) Prometheus() promresourcesclient.Interface {
|
||||
return n.prometheusClient
|
||||
}
|
||||
|
||||
func (n *FakeClient) Master() string {
|
||||
return n.MasterURL
|
||||
}
|
||||
|
||||
func (n *FakeClient) Config() *rest.Config {
|
||||
return n.KubeConfig
|
||||
}
|
||||
@@ -1,68 +1,27 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
|
||||
promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
|
||||
istioclient "istio.io/client-go/pkg/clientset/versioned"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
Kubernetes() kubernetes.Interface
|
||||
KubeSphere() kubesphere.Interface
|
||||
Istio() istioclient.Interface
|
||||
Snapshot() snapshotclient.Interface
|
||||
ApiExtensions() apiextensionsclient.Interface
|
||||
Prometheus() promresourcesclient.Interface
|
||||
kubernetes.Interface
|
||||
Master() string
|
||||
Config() *rest.Config
|
||||
}
|
||||
|
||||
type kubernetesClient struct {
|
||||
// kubernetes client interface
|
||||
k8s kubernetes.Interface
|
||||
|
||||
// generated clientset
|
||||
ks kubesphere.Interface
|
||||
|
||||
istio istioclient.Interface
|
||||
|
||||
snapshot snapshotclient.Interface
|
||||
|
||||
apiextensions apiextensionsclient.Interface
|
||||
|
||||
prometheus promresourcesclient.Interface
|
||||
|
||||
kubernetes.Interface
|
||||
master string
|
||||
|
||||
config *rest.Config
|
||||
}
|
||||
|
||||
// NewKubernetesClientOrDie creates KubernetesClient and panic if there is an error
|
||||
func NewKubernetesClientOrDie(options *KubernetesOptions) Client {
|
||||
func NewKubernetesClientOrDie(options *Options) Client {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", options.KubeConfig)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -72,14 +31,9 @@ func NewKubernetesClientOrDie(options *KubernetesOptions) Client {
|
||||
config.Burst = options.Burst
|
||||
|
||||
k := &kubernetesClient{
|
||||
k8s: kubernetes.NewForConfigOrDie(config),
|
||||
ks: kubesphere.NewForConfigOrDie(config),
|
||||
istio: istioclient.NewForConfigOrDie(config),
|
||||
snapshot: snapshotclient.NewForConfigOrDie(config),
|
||||
apiextensions: apiextensionsclient.NewForConfigOrDie(config),
|
||||
prometheus: promresourcesclient.NewForConfigOrDie(config),
|
||||
master: config.Host,
|
||||
config: config,
|
||||
Interface: kubernetes.NewForConfigOrDie(config),
|
||||
master: config.Host,
|
||||
config: config,
|
||||
}
|
||||
|
||||
if options.Master != "" {
|
||||
@@ -95,78 +49,24 @@ func NewKubernetesClientOrDie(options *KubernetesOptions) Client {
|
||||
}
|
||||
|
||||
// NewKubernetesClient creates a KubernetesClient
|
||||
func NewKubernetesClient(options *KubernetesOptions) (Client, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", options.KubeConfig)
|
||||
func NewKubernetesClient(options *Options) (Client, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags(options.Master, options.KubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.QPS = options.QPS
|
||||
config.Burst = options.Burst
|
||||
|
||||
var k kubernetesClient
|
||||
k.k8s, err = kubernetes.NewForConfig(config)
|
||||
var client kubernetesClient
|
||||
client.Interface, err = kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.ks, err = kubesphere.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.istio, err = istioclient.NewForConfig(config)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.snapshot, err = snapshotclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.apiextensions, err = apiextensionsclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.prometheus, err = promresourcesclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.master = options.Master
|
||||
k.config = config
|
||||
|
||||
return &k, nil
|
||||
client.master = options.Master
|
||||
client.config = config
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
func (k *kubernetesClient) Kubernetes() kubernetes.Interface {
|
||||
return k.k8s
|
||||
}
|
||||
|
||||
func (k *kubernetesClient) KubeSphere() kubesphere.Interface {
|
||||
return k.ks
|
||||
}
|
||||
|
||||
func (k *kubernetesClient) Istio() istioclient.Interface {
|
||||
return k.istio
|
||||
}
|
||||
|
||||
func (k *kubernetesClient) Snapshot() snapshotclient.Interface {
|
||||
return k.snapshot
|
||||
}
|
||||
|
||||
func (k *kubernetesClient) ApiExtensions() apiextensionsclient.Interface {
|
||||
return k.apiextensions
|
||||
}
|
||||
|
||||
func (k *kubernetesClient) Prometheus() promresourcesclient.Interface {
|
||||
return k.prometheus
|
||||
}
|
||||
|
||||
// master address used to generate kubeconfig for downloading
|
||||
// Master address used to generate kubeconfig for downloading
|
||||
func (k *kubernetesClient) Master() string {
|
||||
return k.master
|
||||
}
|
||||
|
||||
@@ -1,68 +1,18 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
snapshotclient "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned"
|
||||
promresourcesclient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
|
||||
istio "istio.io/client-go/pkg/clientset/versioned"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
type nullClient struct {
|
||||
kubernetes.Interface
|
||||
}
|
||||
|
||||
func NewNullClient() Client {
|
||||
return &nullClient{}
|
||||
}
|
||||
|
||||
func (n nullClient) Kubernetes() kubernetes.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n nullClient) KubeSphere() kubesphere.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n nullClient) Istio() istio.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n nullClient) Snapshot() snapshotclient.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n nullClient) ApiExtensions() apiextensionsclient.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n nullClient) Discovery() discovery.DiscoveryInterface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *nullClient) Prometheus() promresourcesclient.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n nullClient) Master() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,19 +1,3 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
@@ -24,11 +8,9 @@ import (
|
||||
"k8s.io/client-go/util/homedir"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type KubernetesOptions struct {
|
||||
type Options struct {
|
||||
// kubeconfig path, if not specified, will use
|
||||
// in cluster way to create clientset
|
||||
KubeConfig string `json:"kubeconfig" yaml:"kubeconfig"`
|
||||
@@ -48,8 +30,8 @@ type KubernetesOptions struct {
|
||||
}
|
||||
|
||||
// NewKubernetesOptions returns a `zero` instance
|
||||
func NewKubernetesOptions() (option *KubernetesOptions) {
|
||||
option = &KubernetesOptions{
|
||||
func NewKubernetesOptions() (option *Options) {
|
||||
option = &Options{
|
||||
QPS: 1e6,
|
||||
Burst: 1e6,
|
||||
}
|
||||
@@ -70,8 +52,8 @@ func NewKubernetesOptions() (option *KubernetesOptions) {
|
||||
return
|
||||
}
|
||||
|
||||
func (k *KubernetesOptions) Validate() []error {
|
||||
errors := []error{}
|
||||
func (k *Options) Validate() []error {
|
||||
var errors []error
|
||||
|
||||
if k.KubeConfig != "" {
|
||||
if _, err := os.Stat(k.KubeConfig); err != nil {
|
||||
@@ -81,11 +63,7 @@ func (k *KubernetesOptions) Validate() []error {
|
||||
return errors
|
||||
}
|
||||
|
||||
func (k *KubernetesOptions) ApplyTo(options *KubernetesOptions) {
|
||||
reflectutils.Override(options, k)
|
||||
}
|
||||
|
||||
func (k *KubernetesOptions) AddFlags(fs *pflag.FlagSet, c *KubernetesOptions) {
|
||||
func (k *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.StringVar(&k.KubeConfig, "kubeconfig", c.KubeConfig, ""+
|
||||
"Path for kubernetes kubeconfig file, if left blank, will use "+
|
||||
"in cluster way.")
|
||||
|
||||
@@ -1,179 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kiali
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
)
|
||||
|
||||
// Kiali token Response
|
||||
type TokenResponse struct {
|
||||
// The username for the token
|
||||
Username string `json:"username"`
|
||||
// The authentication token
|
||||
Token string `json:"token"`
|
||||
// The expired time for the token
|
||||
ExpiresOn string `json:"expiresOn"`
|
||||
}
|
||||
|
||||
// Kiali Authentication Strategy
|
||||
type Strategy string
|
||||
|
||||
const (
|
||||
AuthStrategyToken Strategy = "token"
|
||||
AuthStrategyAnonymous Strategy = "anonymous"
|
||||
)
|
||||
|
||||
const (
|
||||
AuthURL = "%s/kiali/api/authenticate"
|
||||
KialiTokenCacheKey = "kubesphere:kubesphere:kiali"
|
||||
)
|
||||
|
||||
type HttpClient interface {
|
||||
// Do is an interface of http client Do method,
|
||||
// that sends an HTTP request and returns an HTTP response.
|
||||
Do(req *http.Request) (*http.Response, error)
|
||||
|
||||
// PostForm is an interface of http client PostForm method,
|
||||
// that issues a POST to the specified URL.
|
||||
PostForm(url string, data url.Values) (resp *http.Response, err error)
|
||||
}
|
||||
|
||||
// Kiali Client
|
||||
type Client struct {
|
||||
Strategy Strategy
|
||||
cache cache.Interface
|
||||
client HttpClient
|
||||
ServiceToken string
|
||||
Host string
|
||||
}
|
||||
|
||||
// NewClient creates an instance of Kiali Client.
|
||||
func NewClient(strategy Strategy,
|
||||
cache cache.Interface,
|
||||
client HttpClient,
|
||||
serviceToken string,
|
||||
host string) *Client {
|
||||
|
||||
return &Client{
|
||||
Strategy: strategy,
|
||||
cache: cache,
|
||||
client: client,
|
||||
ServiceToken: serviceToken,
|
||||
Host: host,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// NewDefaultClient creates an instance of Kiali Client with default http settings.
|
||||
func NewDefaultClient(
|
||||
cache cache.Interface,
|
||||
serviceToken string,
|
||||
host string) *Client {
|
||||
return &Client{
|
||||
Strategy: AuthStrategyToken,
|
||||
cache: cache,
|
||||
client: &http.Client{},
|
||||
ServiceToken: serviceToken,
|
||||
Host: host,
|
||||
}
|
||||
}
|
||||
|
||||
// authenticate sends auth request with Kubernetes token and
|
||||
// get Kiali token from the response.
|
||||
func (c *Client) authenticate() (*TokenResponse, error) {
|
||||
resp, err := c.client.PostForm(fmt.Sprintf(AuthURL, c.Host), url.Values{
|
||||
"token": {c.ServiceToken},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
token := TokenResponse{}
|
||||
err = json.Unmarshal(body, &token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// token strategy in kiali:v1.46 writes the token in the cookie
|
||||
// Related issue: https://github.com/kiali/kiali/issues/4682
|
||||
token.Token = resp.Header.Get("Set-Cookie")
|
||||
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
// Get issues a GET to the Kiali server with the url.
|
||||
func (c *Client) Get(url string) (resp *http.Response, err error) {
|
||||
|
||||
if req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s%s", c.Host, url), nil); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
if c.Strategy == AuthStrategyToken {
|
||||
err := c.SetToken(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
resp, err := c.client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
c.clearTokenCache(err)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) clearTokenCache(err error) {
|
||||
if c.cache != nil && err != nil {
|
||||
c.cache.Del(KialiTokenCacheKey)
|
||||
}
|
||||
}
|
||||
|
||||
// SetToken gets token from the Kiali server/cache and sets Bearer token to the request header.
|
||||
func (c *Client) SetToken(req *http.Request) error {
|
||||
if c.cache != nil {
|
||||
token, err := c.cache.Get(KialiTokenCacheKey)
|
||||
if err == nil {
|
||||
req.Header.Set("Cookie", token)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
token, err := c.authenticate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// token strategy in kiali:v1.46 writes the token in the cookie.
|
||||
// https://github.com/kiali/kiali-operator/blob/v1.50.1/molecule/asserts/token-test/assert-token-access.yml#L47-L56
|
||||
req.Header.Set("Cookie", token.Token)
|
||||
|
||||
if c.cache != nil {
|
||||
c.cache.Set(KialiTokenCacheKey, token.Token, time.Hour)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kiali
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
)
|
||||
|
||||
func TestClient_Get(t *testing.T) {
|
||||
type fields struct {
|
||||
Strategy Strategy
|
||||
cache cache.Interface
|
||||
client HttpClient
|
||||
ServiceToken string
|
||||
Host string
|
||||
}
|
||||
type args struct {
|
||||
url string
|
||||
}
|
||||
|
||||
inMemoryCache, err := cache.NewInMemoryCache(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
token, _ := json.Marshal(
|
||||
&TokenResponse{
|
||||
Username: "test",
|
||||
Token: "test",
|
||||
},
|
||||
)
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantResp *http.Response
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Anonymous",
|
||||
fields: fields{
|
||||
Strategy: AuthStrategyAnonymous,
|
||||
cache: nil,
|
||||
client: &MockClient{
|
||||
RequestResult: "fake",
|
||||
},
|
||||
ServiceToken: "token",
|
||||
Host: "http://kiali.istio-system.svc",
|
||||
},
|
||||
args: args{url: "http://kiali.istio-system.svc"},
|
||||
wantResp: &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewReader([]byte("fake"))),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Token",
|
||||
fields: fields{
|
||||
Strategy: AuthStrategyToken,
|
||||
cache: nil,
|
||||
client: &MockClient{
|
||||
TokenResult: token,
|
||||
RequestResult: "fake",
|
||||
},
|
||||
ServiceToken: "token",
|
||||
Host: "http://kiali.istio-system.svc",
|
||||
},
|
||||
args: args{url: "http://kiali.istio-system.svc"},
|
||||
wantResp: &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewReader([]byte("fake"))),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Token",
|
||||
fields: fields{
|
||||
Strategy: AuthStrategyToken,
|
||||
cache: inMemoryCache,
|
||||
client: &MockClient{
|
||||
TokenResult: token,
|
||||
RequestResult: "fake",
|
||||
},
|
||||
ServiceToken: "token",
|
||||
Host: "http://kiali.istio-system.svc",
|
||||
},
|
||||
args: args{url: "http://kiali.istio-system.svc"},
|
||||
wantResp: &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewReader([]byte("fake"))),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := NewClient(
|
||||
tt.fields.Strategy,
|
||||
tt.fields.cache,
|
||||
tt.fields.client,
|
||||
tt.fields.ServiceToken,
|
||||
tt.fields.Host,
|
||||
)
|
||||
//nolint:bodyclose
|
||||
gotResp, err := c.Get(tt.args.url)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Client.Get() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(gotResp, tt.wantResp) {
|
||||
t.Errorf("Client.Get() = %v, want %v", gotResp, tt.wantResp)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package kiali
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type MockClient struct {
|
||||
TokenResult []byte
|
||||
RequestResult string
|
||||
}
|
||||
|
||||
func (c *MockClient) Do(req *http.Request) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewReader([]byte(c.RequestResult))),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *MockClient) PostForm(url string, data url.Values) (resp *http.Response, err error) {
|
||||
return &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewReader(c.TokenResult)),
|
||||
}, nil
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeedge
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Endpoint string `json:"endpoint" yaml:"endpoint"`
|
||||
}
|
||||
|
||||
func NewKubeEdgeOptions() *Options {
|
||||
return &Options{
|
||||
Endpoint: "",
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) ApplyTo(options *Options) {
|
||||
reflectutils.Override(options, o)
|
||||
}
|
||||
|
||||
func (o *Options) Validate() []error {
|
||||
errs := []error{}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (o *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
|
||||
fs.StringVar(&o.Endpoint, "edge-watcher-endpoint", c.Endpoint,
|
||||
"edge watcher endpoint for kubeedge v1alpha1.")
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/go-ldap/ldap"
|
||||
)
|
||||
|
||||
// channelPool implements the Pool interface based on buffered channels.
|
||||
type channelPool struct {
|
||||
// storage for our net.Conn connections
|
||||
mu sync.Mutex
|
||||
conns chan ldap.Client
|
||||
|
||||
name string
|
||||
aliveChecks bool
|
||||
|
||||
// net.Conn generator
|
||||
factory PoolFactory
|
||||
closeAt []uint16
|
||||
}
|
||||
|
||||
// PoolFactory is a function to create new connections.
|
||||
type PoolFactory func(string) (ldap.Client, error)
|
||||
|
||||
// newChannelPool returns a new pool based on buffered channels with an initial
|
||||
// capacity and maximum capacity. Factory is used when initial capacity is
|
||||
// greater than zero to fill the pool. A zero initialCap doesn't fill the Pool
|
||||
// until a new Get() is called. During a Get(), If there is no new connection
|
||||
// available in the pool, a new connection will be created via the Factory()
|
||||
// method.
|
||||
//
|
||||
// closeAt will automagically mark the connection as unusable if the return code
|
||||
// of the call is one of those passed, most likely you want to set this to something
|
||||
// like
|
||||
//
|
||||
// []uint8{ldap.LDAPResultTimeLimitExceeded, ldap.ErrorNetwork}
|
||||
func newChannelPool(initialCap, maxCap int, name string, factory PoolFactory, closeAt []uint16) (Pool, error) {
|
||||
if initialCap < 0 || maxCap <= 0 || initialCap > maxCap {
|
||||
return nil, errors.New("invalid capacity settings")
|
||||
}
|
||||
|
||||
c := &channelPool{
|
||||
conns: make(chan ldap.Client, maxCap),
|
||||
name: name,
|
||||
factory: factory,
|
||||
closeAt: closeAt,
|
||||
aliveChecks: true,
|
||||
}
|
||||
|
||||
// create initial connections, if something goes wrong,
|
||||
// just close the pool error out.
|
||||
for i := 0; i < initialCap; i++ {
|
||||
conn, err := factory(c.name)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, errors.New("factory is not able to fill the pool: " + err.Error())
|
||||
}
|
||||
c.conns <- conn
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *channelPool) AliveChecks(on bool) {
|
||||
c.mu.Lock()
|
||||
c.aliveChecks = on
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelPool) getConns() chan ldap.Client {
|
||||
c.mu.Lock()
|
||||
conns := c.conns
|
||||
c.mu.Unlock()
|
||||
return conns
|
||||
}
|
||||
|
||||
// Get implements the Pool interfaces Get() method. If there is no new
|
||||
// connection available in the pool, a new connection will be created via the
|
||||
// Factory() method.
|
||||
func (c *channelPool) Get() (*PoolConn, error) {
|
||||
conns := c.getConns()
|
||||
if conns == nil {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
// wrap our connections with our ldap.Client implementation (wrapConn
|
||||
// method) that puts the connection back to the pool if it's closed.
|
||||
select {
|
||||
case conn := <-conns:
|
||||
if conn == nil {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
if !c.aliveChecks || isAlive(conn) {
|
||||
return c.wrapConn(conn, c.closeAt), nil
|
||||
}
|
||||
conn.Close()
|
||||
return c.NewConn()
|
||||
default:
|
||||
return c.NewConn()
|
||||
}
|
||||
}
|
||||
|
||||
func isAlive(conn ldap.Client) bool {
|
||||
_, err := conn.Search(&ldap.SearchRequest{BaseDN: "", Scope: ldap.ScopeBaseObject, Filter: "(&)", Attributes: []string{"1.1"}})
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (c *channelPool) NewConn() (*PoolConn, error) {
|
||||
conn, err := c.factory(c.name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.wrapConn(conn, c.closeAt), nil
|
||||
}
|
||||
|
||||
// put puts the connection back to the pool. If the pool is full or closed,
|
||||
// conn is simply closed. A nil conn will be rejected.
|
||||
func (c *channelPool) put(conn ldap.Client) {
|
||||
if conn == nil {
|
||||
log.Printf("connection is nil. rejecting")
|
||||
return
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.conns == nil {
|
||||
// pool is closed, close passed connection
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
// put the resource back into the pool. If the pool is full, this will
|
||||
// block and the default case will be executed.
|
||||
select {
|
||||
case c.conns <- conn:
|
||||
return
|
||||
default:
|
||||
// pool is full, close passed connection
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelPool) Close() {
|
||||
c.mu.Lock()
|
||||
conns := c.conns
|
||||
c.conns = nil
|
||||
c.factory = nil
|
||||
c.mu.Unlock()
|
||||
|
||||
if conns == nil {
|
||||
return
|
||||
}
|
||||
|
||||
close(conns)
|
||||
for conn := range conns {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelPool) Len() int { return len(c.getConns()) }
|
||||
|
||||
func (c *channelPool) wrapConn(conn ldap.Client, closeAt []uint16) *PoolConn {
|
||||
p := &PoolConn{c: c, closeAt: closeAt}
|
||||
p.Conn = conn
|
||||
return p
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/go-ldap/ldap"
|
||||
)
|
||||
|
||||
// PoolConn implements Client to override the Close() method
|
||||
type PoolConn struct {
|
||||
Conn ldap.Client
|
||||
c *channelPool
|
||||
unusable bool
|
||||
closeAt []uint16
|
||||
}
|
||||
|
||||
func (p *PoolConn) Start() {
|
||||
p.Conn.Start()
|
||||
}
|
||||
|
||||
func (p *PoolConn) StartTLS(config *tls.Config) error {
|
||||
// FIXME - check if already TLS and then ignore?
|
||||
return p.Conn.StartTLS(config)
|
||||
}
|
||||
|
||||
// Close() puts the given connects back to the pool instead of closing it.
|
||||
func (p *PoolConn) Close() {
|
||||
if p.unusable {
|
||||
log.Printf("Closing unusable connection")
|
||||
if p.Conn != nil {
|
||||
p.Conn.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
p.c.put(p.Conn)
|
||||
}
|
||||
|
||||
func (p *PoolConn) SimpleBind(simpleBindRequest *ldap.SimpleBindRequest) (*ldap.SimpleBindResult, error) {
|
||||
return p.Conn.SimpleBind(simpleBindRequest)
|
||||
}
|
||||
|
||||
func (p *PoolConn) Bind(username, password string) error {
|
||||
return p.Conn.Bind(username, password)
|
||||
}
|
||||
|
||||
func (p *PoolConn) ModifyDN(modifyDNRequest *ldap.ModifyDNRequest) error {
|
||||
return p.Conn.ModifyDN(modifyDNRequest)
|
||||
}
|
||||
|
||||
// MarkUnusable() marks the connection not usable any more, to let the pool close it
|
||||
// instead of returning it to pool.
|
||||
func (p *PoolConn) MarkUnusable() {
|
||||
p.unusable = true
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (p *PoolConn) autoClose(err error) {
|
||||
for _, code := range p.closeAt {
|
||||
if ldap.IsErrorWithCode(err, code) {
|
||||
p.MarkUnusable()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PoolConn) SetTimeout(t time.Duration) {
|
||||
p.Conn.SetTimeout(t)
|
||||
}
|
||||
|
||||
func (p *PoolConn) Add(addRequest *ldap.AddRequest) error {
|
||||
return p.Conn.Add(addRequest)
|
||||
}
|
||||
|
||||
func (p *PoolConn) Del(delRequest *ldap.DelRequest) error {
|
||||
return p.Conn.Del(delRequest)
|
||||
}
|
||||
|
||||
func (p *PoolConn) Modify(modifyRequest *ldap.ModifyRequest) error {
|
||||
return p.Conn.Modify(modifyRequest)
|
||||
}
|
||||
|
||||
func (p *PoolConn) Compare(dn, attribute, value string) (bool, error) {
|
||||
return p.Conn.Compare(dn, attribute, value)
|
||||
}
|
||||
|
||||
func (p *PoolConn) PasswordModify(passwordModifyRequest *ldap.PasswordModifyRequest) (*ldap.PasswordModifyResult, error) {
|
||||
return p.Conn.PasswordModify(passwordModifyRequest)
|
||||
}
|
||||
|
||||
func (p *PoolConn) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) {
|
||||
return p.Conn.Search(searchRequest)
|
||||
}
|
||||
func (p *PoolConn) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize uint32) (*ldap.SearchResult, error) {
|
||||
return p.Conn.SearchWithPaging(searchRequest, pagingSize)
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
)
|
||||
|
||||
// Interface defines CRUD behaviors of manipulating users
|
||||
type Interface interface {
|
||||
// Create create a new user in ldap
|
||||
Create(user *iamv1alpha2.User) error
|
||||
|
||||
// Update updates a user information, return error if user not exists
|
||||
Update(user *iamv1alpha2.User) error
|
||||
|
||||
// Delete deletes a user from ldap, return nil if user not exists
|
||||
Delete(name string) error
|
||||
|
||||
// Get gets a user by its username from ldap, return ErrUserNotExists if user not exists
|
||||
Get(name string) (*iamv1alpha2.User, error)
|
||||
|
||||
// Authenticate checks if (name, password) is valid, return ErrInvalidCredentials if not
|
||||
Authenticate(name string, password string) error
|
||||
|
||||
List(query *query.Query) (*api.ListResult, error)
|
||||
}
|
||||
@@ -1,405 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-ldap/ldap"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
ldapAttributeObjectClass = "objectClass"
|
||||
ldapAttributeCommonName = "cn"
|
||||
ldapAttributeSerialNumber = "sn"
|
||||
ldapAttributeUserID = "uid"
|
||||
ldapAttributeMail = "mail"
|
||||
ldapAttributeUserPassword = "userPassword"
|
||||
ldapAttributeDescription = "description"
|
||||
ldapAttributeCreateTimestamp = "createTimestamp"
|
||||
ldapAttributeOrganizationUnit = "ou"
|
||||
|
||||
// ldap create timestamp attribute layout
|
||||
ldapAttributeCreateTimestampLayout = "20060102150405Z"
|
||||
)
|
||||
|
||||
var ErrUserAlreadyExisted = errors.New("user already existed")
|
||||
var ErrUserNotExists = errors.New("user not exists")
|
||||
var ErrInvalidCredentials = errors.New("invalid credentials")
|
||||
|
||||
type ldapInterfaceImpl struct {
|
||||
pool Pool
|
||||
userSearchBase string
|
||||
groupSearchBase string
|
||||
managerDN string
|
||||
managerPassword string
|
||||
}
|
||||
|
||||
var _ Interface = &ldapInterfaceImpl{}
|
||||
|
||||
func NewLdapClient(options *Options, stopCh <-chan struct{}) (Interface, error) {
|
||||
|
||||
poolFactory := func(s string) (ldap.Client, error) {
|
||||
conn, err := ldap.Dial("tcp", options.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
pool, err := newChannelPool(options.InitialCap,
|
||||
options.MaxCap,
|
||||
options.PoolName,
|
||||
poolFactory,
|
||||
[]uint16{ldap.LDAPResultAdminLimitExceeded, ldap.ErrorNetwork})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &ldapInterfaceImpl{
|
||||
pool: pool,
|
||||
userSearchBase: options.UserSearchBase,
|
||||
groupSearchBase: options.GroupSearchBase,
|
||||
managerDN: options.ManagerDN,
|
||||
managerPassword: options.ManagerPassword,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-stopCh
|
||||
client.close()
|
||||
}()
|
||||
|
||||
_ = client.createSearchBase()
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) createSearchBase() error {
|
||||
conn, err := l.newConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
createIfNotExistsFunc := func(request *ldap.AddRequest) error {
|
||||
searchRequest := &ldap.SearchRequest{
|
||||
BaseDN: request.DN,
|
||||
Scope: ldap.ScopeWholeSubtree,
|
||||
DerefAliases: ldap.NeverDerefAliases,
|
||||
SizeLimit: 0,
|
||||
TimeLimit: 0,
|
||||
TypesOnly: false,
|
||||
Filter: "(objectClass=*)",
|
||||
}
|
||||
|
||||
_, err = conn.Search(searchRequest)
|
||||
if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) {
|
||||
return conn.Add(request)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
userSearchBaseAddRequest := &ldap.AddRequest{
|
||||
DN: l.userSearchBase,
|
||||
Attributes: []ldap.Attribute{
|
||||
{
|
||||
Type: ldapAttributeObjectClass,
|
||||
Vals: []string{"organizationalUnit", "top"},
|
||||
},
|
||||
{
|
||||
Type: ldapAttributeOrganizationUnit,
|
||||
Vals: []string{"Users"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = createIfNotExistsFunc(userSearchBaseAddRequest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groupSearchBaseAddRequest := *userSearchBaseAddRequest
|
||||
groupSearchBaseAddRequest.DN = l.groupSearchBase
|
||||
|
||||
return createIfNotExistsFunc(&groupSearchBaseAddRequest)
|
||||
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) close() {
|
||||
if l.pool != nil {
|
||||
l.pool.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) newConn() (ldap.Client, error) {
|
||||
conn, err := l.pool.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = conn.Bind(l.managerDN, l.managerPassword)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) dnForUsername(username string) string {
|
||||
return fmt.Sprintf("uid=%s,%s", username, l.userSearchBase)
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) filterForUsername(username string) string {
|
||||
return fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(%s=%s)(%s=%s)))", ldapAttributeUserID, username, ldapAttributeMail, username)
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) Get(name string) (*iamv1alpha2.User, error) {
|
||||
conn, err := l.newConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
searchRequest := &ldap.SearchRequest{
|
||||
BaseDN: l.userSearchBase,
|
||||
Scope: ldap.ScopeWholeSubtree,
|
||||
DerefAliases: ldap.NeverDerefAliases,
|
||||
SizeLimit: 0,
|
||||
TimeLimit: 0,
|
||||
TypesOnly: false,
|
||||
Filter: l.filterForUsername(name),
|
||||
Attributes: []string{
|
||||
ldapAttributeMail,
|
||||
ldapAttributeDescription,
|
||||
ldapAttributeCreateTimestamp,
|
||||
},
|
||||
}
|
||||
|
||||
searchResults, err := conn.Search(searchRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(searchResults.Entries) == 0 {
|
||||
return nil, ErrUserNotExists
|
||||
}
|
||||
|
||||
userEntry := searchResults.Entries[0]
|
||||
|
||||
user := &iamv1alpha2.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: userEntry.GetAttributeValue(ldapAttributeUserID),
|
||||
},
|
||||
Spec: iamv1alpha2.UserSpec{
|
||||
Email: userEntry.GetAttributeValue(ldapAttributeMail),
|
||||
Description: userEntry.GetAttributeValue(ldapAttributeDescription),
|
||||
},
|
||||
}
|
||||
|
||||
createTimestamp, _ := time.Parse(ldapAttributeCreateTimestampLayout, userEntry.GetAttributeValue(ldapAttributeCreateTimestamp))
|
||||
user.ObjectMeta.CreationTimestamp.Time = createTimestamp
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) Create(user *iamv1alpha2.User) error {
|
||||
createRequest := &ldap.AddRequest{
|
||||
DN: l.dnForUsername(user.Name),
|
||||
Attributes: []ldap.Attribute{
|
||||
{
|
||||
Type: ldapAttributeObjectClass,
|
||||
Vals: []string{"inetOrgPerson", "top"},
|
||||
},
|
||||
{
|
||||
Type: ldapAttributeCommonName,
|
||||
Vals: []string{user.Name},
|
||||
},
|
||||
{
|
||||
Type: ldapAttributeSerialNumber,
|
||||
Vals: []string{user.Name},
|
||||
},
|
||||
{
|
||||
Type: ldapAttributeUserPassword,
|
||||
Vals: []string{user.Spec.EncryptedPassword},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
conn, err := l.newConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
return conn.Add(createRequest)
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) Delete(name string) error {
|
||||
conn, err := l.newConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
_, err = l.Get(name)
|
||||
if err != nil {
|
||||
if err == ErrUserNotExists {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
deleteRequest := &ldap.DelRequest{
|
||||
DN: l.dnForUsername(name),
|
||||
}
|
||||
|
||||
return conn.Del(deleteRequest)
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) Update(newUser *iamv1alpha2.User) error {
|
||||
conn, err := l.newConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// check user existed
|
||||
_, err = l.Get(newUser.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modifyRequest := &ldap.ModifyRequest{
|
||||
DN: l.dnForUsername(newUser.Name),
|
||||
}
|
||||
|
||||
if newUser.Spec.EncryptedPassword != "" {
|
||||
modifyRequest.Replace(ldapAttributeUserPassword, []string{newUser.Spec.EncryptedPassword})
|
||||
}
|
||||
|
||||
return conn.Modify(modifyRequest)
|
||||
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) Authenticate(username, password string) error {
|
||||
conn, err := l.newConn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
dn := l.dnForUsername(username)
|
||||
err = conn.Bind(dn, password)
|
||||
if ldap.IsErrorWithCode(err, ldap.LDAPResultInvalidCredentials) {
|
||||
return ErrInvalidCredentials
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *ldapInterfaceImpl) List(query *query.Query) (*api.ListResult, error) {
|
||||
conn, err := l.newConn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
pageControl := ldap.NewControlPaging(1000)
|
||||
|
||||
users := make([]iamv1alpha2.User, 0)
|
||||
|
||||
filter := "(&(objectClass=inetOrgPerson))"
|
||||
|
||||
for {
|
||||
userSearchRequest := ldap.NewSearchRequest(
|
||||
l.userSearchBase,
|
||||
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
|
||||
filter,
|
||||
[]string{ldapAttributeUserID, ldapAttributeMail, ldapAttributeDescription, ldapAttributeCreateTimestamp},
|
||||
[]ldap.Control{pageControl},
|
||||
)
|
||||
|
||||
response, err := conn.Search(userSearchRequest)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, entry := range response.Entries {
|
||||
|
||||
uid := entry.GetAttributeValue(ldapAttributeUserID)
|
||||
email := entry.GetAttributeValue(ldapAttributeMail)
|
||||
description := entry.GetAttributeValue(ldapAttributeDescription)
|
||||
createTimestamp, _ := time.Parse(ldapAttributeCreateTimestampLayout, entry.GetAttributeValue(ldapAttributeCreateTimestamp))
|
||||
|
||||
user := iamv1alpha2.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: uid,
|
||||
CreationTimestamp: metav1.Time{Time: createTimestamp}},
|
||||
Spec: iamv1alpha2.UserSpec{
|
||||
Email: email,
|
||||
Description: description,
|
||||
}}
|
||||
|
||||
users = append(users, user)
|
||||
}
|
||||
|
||||
updatedControl := ldap.FindControl(response.Controls, ldap.ControlTypePaging)
|
||||
if ctrl, ok := updatedControl.(*ldap.ControlPaging); ctrl != nil && ok && len(ctrl.Cookie) != 0 {
|
||||
pageControl.SetCookie(ctrl.Cookie)
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
sort.Slice(users, func(i, j int) bool {
|
||||
if !query.Ascending {
|
||||
i, j = j, i
|
||||
}
|
||||
switch query.SortBy {
|
||||
case "username":
|
||||
return strings.Compare(users[i].Name, users[j].Name) <= 0
|
||||
case "createTime":
|
||||
fallthrough
|
||||
default:
|
||||
return users[i].CreationTimestamp.Before(&users[j].CreationTimestamp)
|
||||
}
|
||||
})
|
||||
|
||||
items := make([]interface{}, 0)
|
||||
|
||||
for i, user := range users {
|
||||
if i >= query.Pagination.Offset && len(items) < query.Pagination.Limit {
|
||||
items = append(items, user)
|
||||
}
|
||||
}
|
||||
|
||||
return &api.ListResult{
|
||||
Items: items,
|
||||
TotalItems: len(users),
|
||||
}, nil
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Host string `json:"host,omitempty" yaml:"host,omitempty"`
|
||||
ManagerDN string `json:"managerDN,omitempty" yaml:"managerDN,omitempty"`
|
||||
ManagerPassword string `json:"managerPassword,omitempty" yaml:"managerPassword,omitempty"`
|
||||
UserSearchBase string `json:"userSearchBase,omitempty" yaml:"userSearchBase,omitempty"`
|
||||
GroupSearchBase string `json:"groupSearchBase,omitempty" yaml:"groupSearchBase,omitempty"`
|
||||
InitialCap int `json:"initialCap,omitempty" yaml:"initialCap,omitempty"`
|
||||
MaxCap int `json:"maxCap,omitempty" yaml:"maxCap,omitempty"`
|
||||
PoolName string `json:"poolName,omitempty" yaml:"poolName,omitempty"`
|
||||
}
|
||||
|
||||
// NewOptions return a default option
|
||||
// which host field point to nowhere.
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
Host: "",
|
||||
ManagerDN: "cn=admin,dc=example,dc=org",
|
||||
UserSearchBase: "ou=Users,dc=example,dc=org",
|
||||
GroupSearchBase: "ou=Groups,dc=example,dc=org",
|
||||
InitialCap: 10,
|
||||
MaxCap: 100,
|
||||
PoolName: "ldap",
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Options) Validate() []error {
|
||||
var errors []error
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
func (l *Options) ApplyTo(options *Options) {
|
||||
if l.Host != "" {
|
||||
reflectutils.Override(options, l)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Options) AddFlags(fs *pflag.FlagSet, s *Options) {
|
||||
fs.StringVar(&l.Host, "ldap-host", s.Host, ""+
|
||||
"Ldap service host, if left blank, all of the following ldap options will "+
|
||||
"be ignored and ldap will be disabled.")
|
||||
|
||||
fs.StringVar(&l.ManagerDN, "ldap-manager-dn", s.ManagerDN, ""+
|
||||
"Ldap manager account domain name.")
|
||||
|
||||
fs.StringVar(&l.ManagerPassword, "ldap-manager-password", s.ManagerPassword, ""+
|
||||
"Ldap manager account password.")
|
||||
|
||||
fs.StringVar(&l.UserSearchBase, "ldap-user-search-base", s.UserSearchBase, ""+
|
||||
"Ldap user search base.")
|
||||
|
||||
fs.StringVar(&l.GroupSearchBase, "ldap-group-search-base", s.GroupSearchBase, ""+
|
||||
"Ldap group search base.")
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrClosed is the error resulting if the pool is closed via pool.Close().
|
||||
ErrClosed = errors.New("pool is closed")
|
||||
)
|
||||
|
||||
// Pool interface describes a pool implementation. A pool should have maximum
|
||||
// capacity. An ideal pool is threadsafe and easy to use.
|
||||
type Pool interface {
|
||||
// Get returns a new connection from the pool. Closing the connections puts
|
||||
// it back to the Pool. Closing it when the pool is destroyed or full will
|
||||
// be counted as an error.
|
||||
Get() (*PoolConn, error)
|
||||
|
||||
// Close closes the pool and all its connections. After Close() the pool is
|
||||
// no longer usable.
|
||||
Close()
|
||||
|
||||
// Len returns the current number of connections of the pool.
|
||||
Len() int
|
||||
}
|
||||
@@ -1,111 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
)
|
||||
|
||||
const FAKE_HOST string = "FAKE"
|
||||
|
||||
// simpleLdap is a implementation of ldap.Interface, you should never use this in production env!
|
||||
type simpleLdap struct {
|
||||
store map[string]*iamv1alpha2.User
|
||||
}
|
||||
|
||||
func NewSimpleLdap() Interface {
|
||||
sl := &simpleLdap{
|
||||
store: map[string]*iamv1alpha2.User{},
|
||||
}
|
||||
|
||||
// initialize with a admin user
|
||||
admin := &iamv1alpha2.User{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "admin",
|
||||
},
|
||||
Spec: iamv1alpha2.UserSpec{
|
||||
Email: "admin@kubesphere.io",
|
||||
Lang: "eng",
|
||||
Description: "administrator",
|
||||
Groups: nil,
|
||||
EncryptedPassword: "P@88w0rd",
|
||||
},
|
||||
}
|
||||
sl.store[admin.Name] = admin
|
||||
return sl
|
||||
}
|
||||
|
||||
func (s simpleLdap) Create(user *iamv1alpha2.User) error {
|
||||
s.store[user.Name] = user
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s simpleLdap) Update(user *iamv1alpha2.User) error {
|
||||
_, err := s.Get(user.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.store[user.Name] = user
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s simpleLdap) Delete(name string) error {
|
||||
_, err := s.Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delete(s.store, name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s simpleLdap) Get(name string) (*iamv1alpha2.User, error) {
|
||||
if user, ok := s.store[name]; !ok {
|
||||
return nil, ErrUserNotExists
|
||||
} else {
|
||||
return user, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s simpleLdap) Authenticate(name string, password string) error {
|
||||
if user, err := s.Get(name); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if user.Spec.EncryptedPassword != password {
|
||||
return ErrInvalidCredentials
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *simpleLdap) List(query *query.Query) (*api.ListResult, error) {
|
||||
items := make([]interface{}, 0)
|
||||
|
||||
for _, user := range l.store {
|
||||
items = append(items, user)
|
||||
}
|
||||
|
||||
return &api.ListResult{
|
||||
Items: items,
|
||||
TotalItems: len(items),
|
||||
}, nil
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ldap
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
|
||||
)
|
||||
|
||||
func TestSimpleLdap(t *testing.T) {
|
||||
ldapClient := NewSimpleLdap()
|
||||
|
||||
foo := &iamv1alpha2.User{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: iamv1alpha2.SchemeGroupVersion.String()},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "jerry",
|
||||
},
|
||||
Spec: iamv1alpha2.UserSpec{
|
||||
Email: "jerry@kubesphere.io",
|
||||
Lang: "en",
|
||||
Description: "Jerry is kind and gentle.",
|
||||
Groups: []string{},
|
||||
EncryptedPassword: "P@88w0rd",
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("should create user", func(t *testing.T) {
|
||||
err := ldapClient.Create(foo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check if user really created
|
||||
user, err := ldapClient.Get(foo.Name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(user, foo); len(diff) != 0 {
|
||||
t.Fatalf("%T differ (-got, +want): %s", user, diff)
|
||||
}
|
||||
|
||||
_ = ldapClient.Delete(foo.Name)
|
||||
})
|
||||
|
||||
t.Run("should update user", func(t *testing.T) {
|
||||
err := ldapClient.Create(foo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
foo.Spec.Description = "Jerry needs some drinks."
|
||||
err = ldapClient.Update(foo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check if user really created
|
||||
user, err := ldapClient.Get(foo.Name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(user, foo); len(diff) != 0 {
|
||||
t.Fatalf("%T differ (-got, +want): %s", user, diff)
|
||||
}
|
||||
|
||||
_ = ldapClient.Delete(foo.Name)
|
||||
})
|
||||
|
||||
t.Run("should delete user", func(t *testing.T) {
|
||||
err := ldapClient.Create(foo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ldapClient.Delete(foo.Name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = ldapClient.Get(foo.Name)
|
||||
if err == nil || err != ErrUserNotExists {
|
||||
t.Fatalf("expected ErrUserNotExists error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("should verify username and password", func(t *testing.T) {
|
||||
err := ldapClient.Create(foo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = ldapClient.Authenticate(foo.Name, foo.Spec.EncryptedPassword)
|
||||
if err != nil {
|
||||
t.Fatalf("should pass but got an error %v", err)
|
||||
}
|
||||
|
||||
err = ldapClient.Authenticate(foo.Name, "gibberish")
|
||||
if err == nil || err != ErrInvalidCredentials {
|
||||
t.Fatalf("expected error ErrInvalidCrenentials but got %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,313 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/query"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/logging"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
const (
|
||||
podNameMaxLength = 63
|
||||
podNameSuffixLength = 6 // 5 characters + 1 hyphen
|
||||
replicaSetSuffixMaxLength = 11 // max 10 characters + 1 hyphen
|
||||
)
|
||||
|
||||
type Source struct {
|
||||
Log string `json:"log"`
|
||||
Time string `json:"time"`
|
||||
Kubernetes `json:"kubernetes"`
|
||||
}
|
||||
|
||||
type Kubernetes struct {
|
||||
Namespace string `json:"namespace_name"`
|
||||
Pod string `json:"pod_name"`
|
||||
Container string `json:"container_name"`
|
||||
Host string `json:"host"`
|
||||
}
|
||||
|
||||
// Elasticsearch implement logging interface
|
||||
type client struct {
|
||||
c *es.Client
|
||||
ExportLogsLimit int
|
||||
}
|
||||
|
||||
func NewClient(options *logging.Options) (logging.Client, error) {
|
||||
|
||||
c := &client{
|
||||
ExportLogsLimit: options.ExportLogsLimit,
|
||||
}
|
||||
|
||||
var err error
|
||||
c.c, err = es.NewClient(options.Host, options.BasicAuth, options.Username, options.Password, options.IndexPrefix, options.Version)
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (c *client) GetCurrentStats(sf logging.SearchFilter) (logging.Statistics, error) {
|
||||
var err error
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(sf)).
|
||||
WithAggregations(query.NewAggregations().
|
||||
WithCardinalityAggregation("kubernetes.docker_id.keyword")).
|
||||
WithSize(0)
|
||||
|
||||
resp, err := c.c.Search(b, sf.Starttime, sf.Endtime, false)
|
||||
if err != nil {
|
||||
return logging.Statistics{}, err
|
||||
}
|
||||
|
||||
return logging.Statistics{
|
||||
Containers: resp.Value,
|
||||
Logs: c.c.GetTotalHitCount(resp.Total),
|
||||
},
|
||||
nil
|
||||
}
|
||||
|
||||
func (c *client) CountLogsByInterval(sf logging.SearchFilter, interval string) (logging.Histogram, error) {
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(sf)).
|
||||
WithAggregations(query.NewAggregations().
|
||||
WithDateHistogramAggregation("time", interval)).
|
||||
WithSize(0)
|
||||
|
||||
resp, err := c.c.Search(b, sf.Starttime, sf.Endtime, false)
|
||||
if err != nil {
|
||||
return logging.Histogram{}, err
|
||||
}
|
||||
|
||||
h := logging.Histogram{
|
||||
Total: c.c.GetTotalHitCount(resp.Total),
|
||||
}
|
||||
for _, bucket := range resp.Buckets {
|
||||
h.Buckets = append(h.Buckets, logging.Bucket{
|
||||
Time: bucket.Key,
|
||||
Count: bucket.Count,
|
||||
})
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (c *client) SearchLogs(sf logging.SearchFilter, f, s int64, o string) (logging.Logs, error) {
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(sf)).
|
||||
WithSort("time", o).
|
||||
WithFrom(f).
|
||||
WithSize(s)
|
||||
|
||||
resp, err := c.c.Search(b, sf.Starttime, sf.Endtime, false)
|
||||
if err != nil {
|
||||
return logging.Logs{}, err
|
||||
}
|
||||
|
||||
l := logging.Logs{
|
||||
Total: c.c.GetTotalHitCount(resp.Total),
|
||||
}
|
||||
|
||||
for _, hit := range resp.AllHits {
|
||||
s := c.getSource(hit.Source)
|
||||
l.Records = append(l.Records, logging.Record{
|
||||
Log: s.Log,
|
||||
Time: s.Time,
|
||||
Namespace: s.Namespace,
|
||||
Pod: s.Pod,
|
||||
Container: s.Container,
|
||||
})
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (c *client) ExportLogs(sf logging.SearchFilter, w io.Writer) error {
|
||||
|
||||
var id string
|
||||
var data []string
|
||||
|
||||
b := query.NewBuilder().
|
||||
WithQuery(parseToQueryPart(sf)).
|
||||
WithSort("time", "desc").
|
||||
WithFrom(0).
|
||||
WithSize(1000)
|
||||
|
||||
resp, err := c.c.Search(b, sf.Starttime, sf.Endtime, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer c.c.ClearScroll(id)
|
||||
|
||||
id = resp.ScrollId
|
||||
for _, hit := range resp.AllHits {
|
||||
data = append(data, c.getSource(hit.Source).Log)
|
||||
}
|
||||
|
||||
size := 0
|
||||
for {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
output := new(bytes.Buffer)
|
||||
for _, l := range data {
|
||||
output.WriteString(stringutils.StripAnsi(l))
|
||||
}
|
||||
_, err = io.Copy(w, output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
size = size + 1000
|
||||
if size >= c.ExportLogsLimit {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, id, err = c.scroll(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *client) scroll(id string) ([]string, string, error) {
|
||||
resp, err := c.c.Scroll(id)
|
||||
if err != nil {
|
||||
return nil, id, err
|
||||
}
|
||||
|
||||
var data []string
|
||||
for _, hit := range resp.AllHits {
|
||||
data = append(data, c.getSource(hit.Source).Log)
|
||||
}
|
||||
return data, resp.ScrollId, nil
|
||||
}
|
||||
|
||||
func (c *client) getSource(val interface{}) Source {
|
||||
|
||||
s := Source{}
|
||||
|
||||
bs, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return s
|
||||
}
|
||||
|
||||
err = json.Unmarshal(bs, &s)
|
||||
if err != nil {
|
||||
return s
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func parseToQueryPart(sf logging.SearchFilter) *query.Query {
|
||||
|
||||
var mini int32 = 1
|
||||
b := query.NewBool()
|
||||
|
||||
bi := query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for ns, t := range sf.NamespaceFilter {
|
||||
ct := time.Time{}
|
||||
if t != nil {
|
||||
ct = *t
|
||||
}
|
||||
|
||||
bi.AppendShould(query.NewBool().
|
||||
AppendFilter(query.NewMatchPhrase("kubernetes.namespace_name.keyword", ns)).
|
||||
AppendFilter(query.NewRange("time").WithGTE(ct)))
|
||||
}
|
||||
b.AppendFilter(bi)
|
||||
|
||||
if sf.WorkloadFilter != nil {
|
||||
bi := query.NewBool().WithMinimumShouldMatch(mini)
|
||||
for _, wk := range sf.WorkloadFilter {
|
||||
bi.AppendShould(query.NewRegex("kubernetes.pod_name.keyword", podNameRegex(wk)))
|
||||
}
|
||||
|
||||
b.AppendFilter(bi)
|
||||
}
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("kubernetes.pod_name.keyword", sf.PodFilter)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrase("kubernetes.container_name.keyword", sf.ContainerFilter)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
// fuzzy matching
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("kubernetes.pod_name", sf.WorkloadSearch)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("kubernetes.pod_name", sf.PodSearch)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("kubernetes.container_name", sf.ContainerSearch)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
b.AppendFilter(query.NewBool().
|
||||
AppendMultiShould(query.NewMultiMatchPhrasePrefix("log", sf.LogSearch)).
|
||||
WithMinimumShouldMatch(mini))
|
||||
|
||||
r := query.NewRange("time")
|
||||
if !sf.Starttime.IsZero() {
|
||||
r.WithGTE(sf.Starttime)
|
||||
}
|
||||
if !sf.Endtime.IsZero() {
|
||||
r.WithLTE(sf.Endtime)
|
||||
}
|
||||
|
||||
b.AppendFilter(r)
|
||||
|
||||
return query.NewQuery().WithBool(b)
|
||||
}
|
||||
|
||||
func podNameRegex(workloadName string) string {
|
||||
var regex string
|
||||
if len(workloadName) <= podNameMaxLength-replicaSetSuffixMaxLength-podNameSuffixLength {
|
||||
// match deployment pods, eg. <deploy>-579dfbcddd-24znw
|
||||
// replicaset rand string is limited to vowels
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/rand/rand.go#L83
|
||||
regex += workloadName + "-[bcdfghjklmnpqrstvwxz2456789]{1,10}-[a-z0-9]{5}|"
|
||||
// match statefulset pods, eg. <sts>-0
|
||||
regex += workloadName + "-[0-9]+|"
|
||||
// match pods of daemonset or job, eg. <ds>-29tdk, <job>-5xqvl
|
||||
regex += workloadName + "-[a-z0-9]{5}"
|
||||
} else if len(workloadName) <= podNameMaxLength-podNameSuffixLength {
|
||||
replicaSetSuffixLength := podNameMaxLength - podNameSuffixLength - len(workloadName)
|
||||
regex += fmt.Sprintf("%s%d%s", workloadName+"-[bcdfghjklmnpqrstvwxz2456789]{", replicaSetSuffixLength, "}[a-z0-9]{5}|")
|
||||
regex += workloadName + "-[0-9]+|"
|
||||
regex += workloadName + "-[a-z0-9]{5}"
|
||||
} else {
|
||||
// Rand suffix may overwrites the workload name if the name is too long
|
||||
// This won't happen for StatefulSet because long name will cause ReplicaSet fails during StatefulSet creation.
|
||||
regex += workloadName[:podNameMaxLength-podNameSuffixLength+1] + "[a-z0-9]{5}|"
|
||||
regex += workloadName + "-[0-9]+"
|
||||
}
|
||||
return regex
|
||||
}
|
||||
@@ -1,311 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 KubeSphere Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package elasticsearch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/es/query"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/logging"
|
||||
)
|
||||
|
||||
func TestGetCurrentStats(t *testing.T) {
|
||||
var tests = []struct {
|
||||
fakeVersion string
|
||||
fakeResp string
|
||||
fakeCode int
|
||||
expected logging.Statistics
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
fakeVersion: es.ElasticV6,
|
||||
fakeResp: "es6_get_current_stats_200.json",
|
||||
fakeCode: http.StatusOK,
|
||||
expected: logging.Statistics{
|
||||
Containers: 93,
|
||||
Logs: 241222,
|
||||
},
|
||||
},
|
||||
{
|
||||
fakeVersion: es.ElasticV6,
|
||||
fakeResp: "es6_get_current_stats_404.json",
|
||||
fakeCode: http.StatusNotFound,
|
||||
expectedErr: "type: index_not_found_exception, reason: no such index",
|
||||
},
|
||||
{
|
||||
fakeVersion: es.ElasticV7,
|
||||
fakeResp: "es7_get_current_stats_200.json",
|
||||
fakeCode: http.StatusOK,
|
||||
expected: logging.Statistics{
|
||||
Containers: 48,
|
||||
Logs: 9726,
|
||||
},
|
||||
},
|
||||
{
|
||||
fakeVersion: es.ElasticV7,
|
||||
fakeResp: "es7_get_current_stats_404.json",
|
||||
fakeCode: http.StatusNotFound,
|
||||
expectedErr: "type: index_not_found_exception, reason: no such index [ks-logstash-log-2020.05.2]",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
srv := mockElasticsearchService("/ks-logstash-log*/_search", test.fakeResp, test.fakeCode)
|
||||
defer srv.Close()
|
||||
|
||||
client, err := NewClient(&logging.Options{
|
||||
Host: srv.URL,
|
||||
IndexPrefix: "ks-logstash-log",
|
||||
Version: test.fakeVersion,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create client error, %s", err)
|
||||
}
|
||||
|
||||
result, err := client.GetCurrentStats(logging.SearchFilter{})
|
||||
if test.expectedErr != "" {
|
||||
if diff := cmp.Diff(fmt.Sprint(err), test.expectedErr); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expectedErr, diff)
|
||||
}
|
||||
}
|
||||
if diff := cmp.Diff(result, test.expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCountLogsByInterval(t *testing.T) {
|
||||
var tests = []struct {
|
||||
fakeVersion string
|
||||
fakeResp string
|
||||
fakeCode int
|
||||
expected logging.Histogram
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
fakeVersion: es.ElasticV7,
|
||||
fakeResp: "es7_count_logs_by_interval_200.json",
|
||||
fakeCode: http.StatusOK,
|
||||
expected: logging.Histogram{
|
||||
Total: 10000,
|
||||
Buckets: []logging.Bucket{
|
||||
{
|
||||
Time: 1589644800000,
|
||||
Count: 410,
|
||||
},
|
||||
{
|
||||
Time: 1589646600000,
|
||||
Count: 7465,
|
||||
},
|
||||
{
|
||||
Time: 1589648400000,
|
||||
Count: 12790,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
fakeVersion: es.ElasticV7,
|
||||
fakeResp: "es7_count_logs_by_interval_400.json",
|
||||
fakeCode: http.StatusBadRequest,
|
||||
expectedErr: "type: search_phase_execution_exception, reason: Unable to parse interval [30m0s]",
|
||||
},
|
||||
{
|
||||
fakeVersion: es.ElasticV7,
|
||||
fakeResp: "es7_count_logs_by_interval_404.json",
|
||||
fakeCode: http.StatusNotFound,
|
||||
expectedErr: "type: index_not_found_exception, reason: no such index [ks-logstash-log-20]",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
srv := mockElasticsearchService("/ks-logstash-log*/_search", test.fakeResp, test.fakeCode)
|
||||
defer srv.Close()
|
||||
|
||||
client, err := NewClient(&logging.Options{
|
||||
Host: srv.URL,
|
||||
IndexPrefix: "ks-logstash-log",
|
||||
Version: test.fakeVersion,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create client error, %s", err)
|
||||
}
|
||||
|
||||
result, err := client.CountLogsByInterval(logging.SearchFilter{}, "15m")
|
||||
if test.expectedErr != "" {
|
||||
if diff := cmp.Diff(fmt.Sprint(err), test.expectedErr); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expectedErr, diff)
|
||||
}
|
||||
}
|
||||
if diff := cmp.Diff(result, test.expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearchLogs(t *testing.T) {
|
||||
var tests = []struct {
|
||||
fakeVersion string
|
||||
fakeResp string
|
||||
fakeCode int
|
||||
expected string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
fakeVersion: es.ElasticV7,
|
||||
fakeResp: "es7_search_logs_200.json",
|
||||
fakeCode: http.StatusOK,
|
||||
expected: "es7_search_logs_200_result.json",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
var expected logging.Logs
|
||||
err := JsonFromFile(test.expected, &expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
srv := mockElasticsearchService("/ks-logstash-log*/_search", test.fakeResp, test.fakeCode)
|
||||
defer srv.Close()
|
||||
|
||||
client, err := NewClient(&logging.Options{
|
||||
Host: srv.URL,
|
||||
IndexPrefix: "ks-logstash-log",
|
||||
Version: test.fakeVersion,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("create client error, %s", err)
|
||||
}
|
||||
|
||||
result, err := client.SearchLogs(logging.SearchFilter{}, 0, 10, "asc")
|
||||
if test.expectedErr != "" {
|
||||
if diff := cmp.Diff(fmt.Sprint(err), test.expectedErr); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", test.expectedErr, diff)
|
||||
}
|
||||
}
|
||||
if diff := cmp.Diff(result, expected); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseToQueryPart(t *testing.T) {
|
||||
var tests = []struct {
|
||||
filter logging.SearchFilter
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
filter: logging.SearchFilter{
|
||||
NamespaceFilter: map[string]*time.Time{
|
||||
"default": func() *time.Time { t := time.Unix(1589981934, 0); return &t }(),
|
||||
},
|
||||
},
|
||||
expected: "api_body_1.json",
|
||||
},
|
||||
{
|
||||
filter: logging.SearchFilter{
|
||||
WorkloadFilter: []string{"mysql"},
|
||||
Starttime: time.Unix(1589980934, 0),
|
||||
Endtime: time.Unix(1589981934, 0),
|
||||
},
|
||||
expected: "api_body_2.json",
|
||||
},
|
||||
{
|
||||
filter: logging.SearchFilter{
|
||||
PodFilter: []string{"mysql"},
|
||||
PodSearch: []string{"mysql-a8w3s-10945j"},
|
||||
LogSearch: []string{"info"},
|
||||
},
|
||||
expected: "api_body_3.json",
|
||||
},
|
||||
{
|
||||
filter: logging.SearchFilter{
|
||||
ContainerFilter: []string{"mysql-1"},
|
||||
ContainerSearch: []string{"mysql-3"},
|
||||
},
|
||||
expected: "api_body_4.json",
|
||||
},
|
||||
{
|
||||
filter: logging.SearchFilter{
|
||||
Starttime: time.Unix(1590744676, 0),
|
||||
},
|
||||
expected: "api_body_7.json",
|
||||
},
|
||||
{
|
||||
filter: logging.SearchFilter{
|
||||
NamespaceFilter: map[string]*time.Time{
|
||||
"default": nil,
|
||||
},
|
||||
},
|
||||
expected: "api_body_8.json",
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
|
||||
expected, err := os.ReadFile(fmt.Sprintf("./testdata/%s", test.expected))
|
||||
if err != nil {
|
||||
t.Fatalf("read expected error, %s", err.Error())
|
||||
}
|
||||
|
||||
result, _ := query.NewBuilder().WithQuery(parseToQueryPart(test.filter)).Bytes()
|
||||
if diff := cmp.Diff(string(result), string(result)); diff != "" {
|
||||
t.Fatalf("%T differ (-got, +want): %s", expected, diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mockElasticsearchService(pattern, fakeResp string, fakeCode int) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) {
|
||||
b, _ := os.ReadFile(fmt.Sprintf("./testdata/%s", fakeResp))
|
||||
res.WriteHeader(fakeCode)
|
||||
_, _ = res.Write(b)
|
||||
})
|
||||
return httptest.NewServer(mux)
|
||||
}
|
||||
|
||||
func JsonFromFile(expectedFile string, expectedJsonPtr interface{}) error {
|
||||
json, err := os.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = jsoniter.Unmarshal(json, expectedJsonPtr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user