Compare commits

...

12 Commits

Author SHA1 Message Date
KubeSphere CI Bot
5b9c357160 [release-3.3] Fix: when placement is empty return error (#5218)
Fix: when placement is empty return error

Co-authored-by: Wenhao Zhou <wenhaozhou@yunfiy.com>
2022-09-15 19:38:47 +08:00
KubeSphere CI Bot
c385dd92e4 [release-3.3] Add authorization control for patching workspacetemplates (#5217)
* update patch workspacetemplate for supporting patch with JsonPatchType and change the authorization processing

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>

* make goimports

* Fix: Of the type is not string will lead to panic

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>

* Add jsonpatchutil for handling json patch data

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>

* Updated patch workspacetemplate to to make the code run more efficiently

* fix: multiple clusterrolebindings cannot autorizate

* Correct wrong spelling

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>
Co-authored-by: Wenhao Zhou <wenhaozhou@yunify.com>
2022-09-15 19:32:47 +08:00
KubeSphere CI Bot
1e1b2bd594 [release-3.3] support recording disable and enable users in auditing (#5202)
support recording disable and enable users in auditing

Signed-off-by: wanjunlei <wanjunlei@kubesphere.io>

Signed-off-by: wanjunlei <wanjunlei@kubesphere.io>
Co-authored-by: wanjunlei <wanjunlei@kubesphere.io>
2022-09-08 10:25:41 +08:00
KubeSphere CI Bot
951b86648c [release-3.3] fix bug helm repo paging query (#5201)
* fix bug helmrepo paging query

* fix bug helmrepo paging query

* fix bug helm repo paging query

Co-authored-by: mayongxing <mayongxing@cmsr.chinamobile.com>
2022-09-08 10:17:41 +08:00
KubeSphere CI Bot
04433c139d [release-3.3] Fix: index out of range when merging two repo indexes (#5169)
Fix: index out of range when merging two repo indexes

Co-authored-by: LiHui <andrewli@kubesphere.io>
2022-08-25 16:06:36 +08:00
KubeSphere CI Bot
3b8c28d21e [release-3.3] Support for filtering workspace roles using labelSelector (#5162)
Support for filtering workspace roles using labelSelector

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>
Co-authored-by: Wenhao Zhou <wenhaozhou@yunify.com>
2022-08-23 10:30:21 +08:00
KubeSphere CI Bot
9489718270 [release-3.3] fill field status of helmrepo in response (#5158)
fill field status of helmrepo in response

Signed-off-by: x893675 <x893675@icloud.com>

Signed-off-by: x893675 <x893675@icloud.com>
Co-authored-by: x893675 <x893675@icloud.com>
2022-08-22 16:15:00 +08:00
KubeSphere CI Bot
54df6b8c8c [release-3.3] fix cluster ready condition always true (#5137)
fix cluster ready condition always true

Signed-off-by: x893675 <x893675@icloud.com>

Signed-off-by: x893675 <x893675@icloud.com>
Co-authored-by: x893675 <x893675@icloud.com>
2022-08-16 14:12:46 +08:00
KubeSphere CI Bot
d917905529 [release-3.3] Fix ingress P95 delay time promql statement (#5132)
Fix ingress P95 delay time promql statement

Co-authored-by: Xinzhao Xu <z2d@jifangcheng.com>
2022-08-14 16:49:35 +08:00
KubeSphere CI Bot
cd6f940f1d [release-3.3] Adjust container terminal priority: bash, sh (#5076)
Adjust container terminal priority: bash, sh

Co-authored-by: tal66 <77445020+tal66@users.noreply.github.com>
2022-07-21 11:16:29 +08:00
KubeSphere CI Bot
921a8f068b [release-3.3] skip generated code when fmt code (#5079)
skip generated code when fmt code

Co-authored-by: LiHui <andrewli@kubesphere.io>
2022-07-21 11:16:14 +08:00
KubeSphere CI Bot
641aa1dfcf [release-3.3] close remote terminal.(#5023) (#5028)
close remote terminal.(kubesphere#5023)

Co-authored-by: lixueduan <li.xueduan@99cloud.net>
2022-07-06 18:08:34 +08:00
16 changed files with 394 additions and 23 deletions

View File

@@ -39,6 +39,7 @@ find_files() {
-o -wholename '*/third_party/*' \
-o -wholename '*/vendor/*' \
-o -wholename './staging/src/kubesphere.io/client-go/*vendor/*' \
-o -wholename './staging/src/kubesphere.io/api/*/zz_generated.deepcopy.go' \
\) -prune \
\) -name '*.go'
}

1
hack/verify-gofmt.sh Normal file → Executable file
View File

@@ -44,6 +44,7 @@ find_files() {
-o -wholename '*/third_party/*' \
-o -wholename '*/vendor/*' \
-o -wholename './staging/src/kubesphere.io/client-go/*vendor/*' \
-o -wholename './staging/src/kubesphere.io/api/*/zz_generated.deepcopy.go' \
-o -wholename '*/bindata.go' \
\) -prune \
\) -name '*.go'

View File

@@ -33,8 +33,8 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apiserver/pkg/apis/audit"
"k8s.io/klog"
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
"kubesphere.io/api/iam/v1alpha2"
auditv1alpha1 "kubesphere.io/kubesphere/pkg/apiserver/auditing/v1alpha1"
"kubesphere.io/kubesphere/pkg/apiserver/query"
@@ -192,7 +192,7 @@ func (a *auditing) LogRequestObject(req *http.Request, info *request.RequestInfo
}
}
if (e.Level.GreaterOrEqual(audit.LevelRequest) || e.Verb == "create") && req.ContentLength > 0 {
if a.needAnalyzeRequestBody(e, req) {
body, err := ioutil.ReadAll(req.Body)
if err != nil {
klog.Error(err)
@@ -212,11 +212,45 @@ func (a *auditing) LogRequestObject(req *http.Request, info *request.RequestInfo
e.ObjectRef.Name = obj.Name
}
}
// for recording disable and enable user
if e.ObjectRef.Resource == "users" && e.Verb == "update" {
u := &v1alpha2.User{}
if err := json.Unmarshal(body, u); err == nil {
if u.Status.State == v1alpha2.UserActive {
e.Verb = "enable"
} else if u.Status.State == v1alpha2.UserDisabled {
e.Verb = "disable"
}
}
}
}
return e
}
func (a *auditing) needAnalyzeRequestBody(e *auditv1alpha1.Event, req *http.Request) bool {
if req.ContentLength <= 0 {
return false
}
if e.Level.GreaterOrEqual(audit.LevelRequest) {
return true
}
if e.Verb == "create" {
return true
}
// for recording disable and enable user
if e.ObjectRef.Resource == "users" && e.Verb == "update" {
return true
}
return false
}
func (a *auditing) LogResponseObject(e *auditv1alpha1.Event, resp *ResponseCapture) {
e.StageTimestamp = metav1.NowMicro()

View File

@@ -418,6 +418,15 @@ func (c *clusterController) syncCluster(key string) error {
Message: "Cluster can not join federation control plane",
}
c.updateClusterCondition(cluster, federationNotReadyCondition)
notReadyCondition := clusterv1alpha1.ClusterCondition{
Type: clusterv1alpha1.ClusterReady,
Status: v1.ConditionFalse,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: "Cluster join federation control plane failed",
Message: "Cluster is Not Ready now",
}
c.updateClusterCondition(cluster, notReadyCondition)
_, err = c.ksClient.ClusterV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{})
if err != nil {

View File

@@ -380,6 +380,7 @@ func (h *iamHandler) ListWorkspaceRoles(request *restful.Request, response *rest
queryParam.Filters[iamv1alpha2.ScopeWorkspace] = query.Value(workspace)
// shared workspace role template
if string(queryParam.Filters[query.FieldLabel]) == fmt.Sprintf("%s=%s", iamv1alpha2.RoleTemplateLabel, "true") ||
strings.Contains(queryParam.LabelSelector, iamv1alpha2.RoleTemplateLabel) ||
queryParam.Filters[iamv1alpha2.AggregateTo] != "" {
delete(queryParam.Filters, iamv1alpha2.ScopeWorkspace)
}

View File

@@ -520,33 +520,44 @@ func (h *tenantHandler) PatchNamespace(request *restful.Request, response *restf
response.WriteEntity(patched)
}
func (h *tenantHandler) PatchWorkspaceTemplate(request *restful.Request, response *restful.Response) {
workspaceName := request.PathParameter("workspace")
func (h *tenantHandler) PatchWorkspaceTemplate(req *restful.Request, resp *restful.Response) {
workspaceName := req.PathParameter("workspace")
var data json.RawMessage
err := request.ReadEntity(&data)
err := req.ReadEntity(&data)
if err != nil {
klog.Error(err)
api.HandleBadRequest(response, request, err)
api.HandleBadRequest(resp, req, err)
return
}
patched, err := h.tenant.PatchWorkspaceTemplate(workspaceName, data)
requestUser, ok := request.UserFrom(req.Request.Context())
if !ok {
err := fmt.Errorf("cannot obtain user info")
klog.Errorln(err)
api.HandleForbidden(resp, req, err)
}
patched, err := h.tenant.PatchWorkspaceTemplate(requestUser, workspaceName, data)
if err != nil {
klog.Error(err)
if errors.IsNotFound(err) {
api.HandleNotFound(response, request, err)
api.HandleNotFound(resp, req, err)
return
}
if errors.IsBadRequest(err) {
api.HandleBadRequest(response, request, err)
api.HandleBadRequest(resp, req, err)
return
}
api.HandleInternalError(response, request, err)
if errors.IsNotFound(err) {
api.HandleForbidden(resp, req, err)
return
}
api.HandleInternalError(resp, req, err)
return
}
response.WriteEntity(patched)
resp.WriteEntity(patched)
}
func (h *tenantHandler) ListClusters(r *restful.Request, response *restful.Response) {

View File

@@ -302,7 +302,7 @@ func (c *repoOperator) ListRepos(conditions *params.Conditions, orderBy string,
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
repos = repos[start:end]
items := make([]interface{}, 0, len(repos))
for i, j := offset, 0; i < len(repos) && j < limit; i, j = i+1, j+1 {
for i := range repos {
items = append(items, convertRepo(repos[i]))
}
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil

View File

@@ -713,7 +713,7 @@ type Repo struct {
// selectors
Selectors RepoSelectors `json:"selectors"`
// status eg.[active|deleted]
// status eg.[successful|failed|syncing]
Status string `json:"status,omitempty"`
// record status changed time

View File

@@ -431,6 +431,10 @@ func convertRepo(in *v1alpha1.HelmRepo) *Repo {
out.Name = in.GetTrueName()
out.Status = in.Status.State
// set default status `syncing` when helmrepo not reconcile yet
if out.Status == "" {
out.Status = v1alpha1.RepoStateSyncing
}
date := strfmt.DateTime(time.Unix(in.CreationTimestamp.Unix(), 0))
out.CreateTime = &date

View File

@@ -24,7 +24,9 @@ import (
"strings"
"time"
"github.com/mitchellh/mapstructure"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -69,6 +71,8 @@ import (
loggingclient "kubesphere.io/kubesphere/pkg/simple/client/logging"
meteringclient "kubesphere.io/kubesphere/pkg/simple/client/metering"
monitoringclient "kubesphere.io/kubesphere/pkg/simple/client/monitoring"
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
jsonpatchutil "kubesphere.io/kubesphere/pkg/utils/josnpatchutil"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
)
@@ -81,7 +85,7 @@ type Interface interface {
CreateWorkspaceTemplate(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error)
DeleteWorkspaceTemplate(workspace string, opts metav1.DeleteOptions) error
UpdateWorkspaceTemplate(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error)
PatchWorkspaceTemplate(workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error)
PatchWorkspaceTemplate(user user.Info, workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error)
DescribeWorkspaceTemplate(workspace string) (*tenantv1alpha2.WorkspaceTemplate, error)
ListNamespaces(user user.Info, workspace string, query *query.Query) (*api.ListResult, error)
ListDevOpsProjects(user user.Info, workspace string, query *query.Query) (*api.ListResult, error)
@@ -117,6 +121,7 @@ type tenantOperator struct {
auditing auditing.Interface
mo monitoring.MonitoringOperator
opRelease openpitrix.ReleaseInterface
clusterClient clusterclient.ClusterClients
}
func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ksclient kubesphere.Interface, evtsClient eventsclient.Client, loggingClient loggingclient.Client, auditingclient auditingclient.Client, am am.AccessManagementInterface, im im.IdentityManagementInterface, authorizer authorizer.Authorizer, monitoringclient monitoringclient.Interface, resourceGetter *resourcev1alpha3.ResourceGetter, opClient openpitrix.Interface) Interface {
@@ -132,6 +137,7 @@ func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ks
auditing: auditing.NewEventsOperator(auditingclient),
mo: monitoring.NewMonitoringOperator(monitoringclient, nil, k8sclient, informers, resourceGetter, nil),
opRelease: opClient,
clusterClient: clusterclient.NewClusterClient(informers.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters()),
}
}
@@ -470,8 +476,132 @@ func (t *tenantOperator) PatchNamespace(workspace string, namespace *corev1.Name
return t.k8sclient.CoreV1().Namespaces().Patch(context.Background(), namespace.Name, types.MergePatchType, data, metav1.PatchOptions{})
}
func (t *tenantOperator) PatchWorkspaceTemplate(workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error) {
return t.ksclient.TenantV1alpha2().WorkspaceTemplates().Patch(context.Background(), workspace, types.MergePatchType, data, metav1.PatchOptions{})
func (t *tenantOperator) PatchWorkspaceTemplate(user user.Info, workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error) {
var manageWorkspaceTemplateRequest bool
clusterNames := sets.NewString()
patchs, err := jsonpatchutil.Parse(data)
if err != nil {
klog.Error(err)
return nil, err
}
if len(patchs) > 0 {
for _, patch := range patchs {
path, err := patch.Path()
if err != nil {
klog.Error(err)
return nil, err
}
// If the request path is cluster, just collecting cluster name to set and continue to check cluster permission later.
// Or indicate that want to manage the workspace templates, so check if user has the permission to manage workspace templates.
if strings.HasPrefix(path, "/spec/placement") {
if patch.Kind() != "add" && patch.Kind() != "remove" {
err := errors.NewBadRequest("not support operation type")
klog.Error(err)
return nil, err
}
clusterValue := make(map[string]interface{})
err := jsonpatchutil.GetValue(patch, &clusterValue)
if err != nil {
klog.Error(err)
return nil, err
}
// if the placement is empty, the first patch need fill with "clusters" field.
if cName := clusterValue["name"]; cName != nil {
cn, ok := cName.(string)
if ok {
clusterNames.Insert(cn)
}
} else if cluster := clusterValue["clusters"]; cluster != nil {
clusterRefrences := []typesv1beta1.GenericClusterReference{}
err := mapstructure.Decode(cluster, &clusterRefrences)
if err != nil {
klog.Error(err)
return nil, err
}
for _, v := range clusterRefrences {
clusterNames.Insert(v.Name)
}
}
} else {
manageWorkspaceTemplateRequest = true
}
}
}
if manageWorkspaceTemplateRequest {
deleteWST := authorizer.AttributesRecord{
User: user,
Verb: authorizer.VerbDelete,
APIGroup: tenantv1alpha2.SchemeGroupVersion.Group,
APIVersion: tenantv1alpha2.SchemeGroupVersion.Version,
Resource: tenantv1alpha2.ResourcePluralWorkspaceTemplate,
ResourceRequest: true,
ResourceScope: request.GlobalScope,
}
authorize, reason, err := t.authorizer.Authorize(deleteWST)
if err != nil {
klog.Error(err)
return nil, err
}
if authorize != authorizer.DecisionAllow {
err := errors.NewForbidden(tenantv1alpha2.Resource(tenantv1alpha2.ResourcePluralWorkspaceTemplate), workspace, fmt.Errorf(reason))
klog.Error(err)
return nil, err
}
}
// Checking whether the user can manage the cluster requires authentication from two aspects.
// First check whether the user has relevant global permissions,
// and then check whether the user has relevant cluster permissions in the target cluster
if clusterNames.Len() > 0 {
for _, clusterName := range clusterNames.List() {
deleteCluster := authorizer.AttributesRecord{
User: user,
Verb: authorizer.VerbDelete,
APIGroup: clusterv1alpha1.SchemeGroupVersion.Version,
APIVersion: clusterv1alpha1.SchemeGroupVersion.Version,
Resource: clusterv1alpha1.ResourcesPluralCluster,
Cluster: clusterName,
ResourceRequest: true,
ResourceScope: request.GlobalScope,
}
authorize, reason, err := t.authorizer.Authorize(deleteCluster)
if err != nil {
klog.Error(err)
return nil, err
}
if authorize == authorizer.DecisionAllow {
continue
}
list, err := t.getClusterRoleBindingsByUser(clusterName, user.GetName())
if err != nil {
klog.Error(err)
return nil, err
}
allowed := false
for _, clusterRolebinding := range list.Items {
if clusterRolebinding.RoleRef.Name == iamv1alpha2.ClusterAdmin {
allowed = true
break
}
}
if !allowed {
err = errors.NewForbidden(clusterv1alpha1.Resource(clusterv1alpha1.ResourcesPluralCluster), clusterName, fmt.Errorf(reason))
klog.Error(err)
return nil, err
}
}
}
return t.ksclient.TenantV1alpha2().WorkspaceTemplates().Patch(context.Background(), workspace, types.JSONPatchType, data, metav1.PatchOptions{})
}
func (t *tenantOperator) CreateWorkspaceTemplate(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error) {
@@ -1081,6 +1211,16 @@ func (t *tenantOperator) MeteringHierarchy(user user.Info, queryParam *meteringv
return resourceStats, nil
}
func (t *tenantOperator) getClusterRoleBindingsByUser(clusterName, user string) (*rbacv1.ClusterRoleBindingList, error) {
kubernetesClientSet, err := t.clusterClient.GetKubernetesClientSet(clusterName)
if err != nil {
return nil, err
}
return kubernetesClientSet.RbacV1().ClusterRoleBindings().
List(context.Background(),
metav1.ListOptions{LabelSelector: labels.FormatLabels(map[string]string{"iam.kubesphere.io/user-ref": user})})
}
func contains(objects []runtime.Object, object runtime.Object) bool {
for _, item := range objects {
if item == object {

View File

@@ -44,6 +44,8 @@ import (
const (
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
// ctrl+d to close terminal.
endOfTransmission = "\u0004"
)
// PtyHandler is what remotecommand expects from a pty
@@ -76,7 +78,7 @@ type TerminalMessage struct {
Rows, Cols uint16
}
// TerminalSize handles pty->process resize events
// Next handles pty->process resize events
// Called in a loop from remotecommand as long as the process is running
func (t TerminalSession) Next() *remotecommand.TerminalSize {
select {
@@ -95,7 +97,7 @@ func (t TerminalSession) Read(p []byte) (int, error) {
var msg TerminalMessage
err := t.conn.ReadJSON(&msg)
if err != nil {
return 0, err
return copy(p, endOfTransmission), err
}
switch msg.Op {
@@ -105,7 +107,7 @@ func (t TerminalSession) Read(p []byte) (int, error) {
t.sizeChan <- remotecommand.TerminalSize{Width: msg.Cols, Height: msg.Rows}
return 0, nil
default:
return 0, fmt.Errorf("unknown message type '%s'", msg.Op)
return copy(p, endOfTransmission), fmt.Errorf("unknown message type '%s'", msg.Op)
}
}
@@ -215,7 +217,7 @@ func (n *NodeTerminaler) getNSEnterPod() (*v1.Pod, error) {
pod, err := n.client.CoreV1().Pods(n.Namespace).Get(context.Background(), n.PodName, metav1.GetOptions{})
if err != nil || (pod.Status.Phase != v1.PodRunning && pod.Status.Phase != v1.PodPending) {
//pod has timed out, but has not been cleaned up
// pod has timed out, but has not been cleaned up
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
err := n.client.CoreV1().Pods(n.Namespace).Delete(context.Background(), n.PodName, metav1.DeleteOptions{})
if err != nil {
@@ -328,7 +330,7 @@ func isValidShell(validShells []string, shell string) bool {
func (t *terminaler) HandleSession(shell, namespace, podName, containerName string, conn *websocket.Conn) {
var err error
validShells := []string{"sh", "bash"}
validShells := []string{"bash", "sh"}
session := &TerminalSession{conn: conn, sizeChan: make(chan remotecommand.TerminalSize)}

View File

@@ -177,7 +177,7 @@ var promQLTemplates = map[string]string{
"ingress_success_rate": `sum(rate(nginx_ingress_controller_requests{$1,$2,status!~"[4-5].*"}[$3])) / sum(rate(nginx_ingress_controller_requests{$1,$2}[$3]))`,
"ingress_request_duration_average": `sum_over_time(nginx_ingress_controller_request_duration_seconds_sum{$1,$2}[$3])/sum_over_time(nginx_ingress_controller_request_duration_seconds_count{$1,$2}[$3])`,
"ingress_request_duration_50percentage": `histogram_quantile(0.50, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
"ingress_request_duration_95percentage": `histogram_quantile(0.90, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
"ingress_request_duration_95percentage": `histogram_quantile(0.95, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
"ingress_request_duration_99percentage": `histogram_quantile(0.99, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
"ingress_request_volume": `round(sum(irate(nginx_ingress_controller_requests{$1,$2}[$3])), 0.001)`,
"ingress_request_volume_by_ingress": `round(sum(irate(nginx_ingress_controller_requests{$1,$2}[$3])) by (ingress), 0.001)`,

View File

@@ -99,6 +99,9 @@ func MergeRepoIndex(repo *v1alpha1.HelmRepo, index *helmrepo.IndexFile, existsSa
allAppNames := make(map[string]struct{}, len(index.Entries))
for name, versions := range index.Entries {
if len(versions) == 0 {
continue
}
// add new applications
if application, exists := saved.Applications[name]; !exists {
application = &Application{

View File

@@ -50,5 +50,102 @@ func TestLoadRepo(t *testing.T) {
_ = chartData
break
}
}
var indexData1 = `
apiVersion: v1
entries:
apisix: []
apisix-dashboard:
- apiVersion: v2
appVersion: 2.9.0
created: "2021-11-15T08:23:00.343784368Z"
description: A Helm chart for Apache APISIX Dashboard
digest: 76f794b1300f7bfb756ede352fe71eb863b89f1995b495e8b683990709e310ad
icon: https://apache.org/logos/res/apisix/apisix.png
maintainers:
- email: zhangjintao@apache.org
name: tao12345666333
name: apisix-dashboard
type: application
urls:
- https://charts.kubesphere.io/main/apisix-dashboard-0.3.0.tgz
version: 0.3.0
`
var indexData2 = `
apiVersion: v1
entries:
apisix:
- apiVersion: v2
appVersion: 2.10.0
created: "2021-11-15T08:23:00.343234584Z"
dependencies:
- condition: etcd.enabled
name: etcd
repository: https://charts.bitnami.com/bitnami
version: 6.2.6
- alias: dashboard
condition: dashboard.enabled
name: apisix-dashboard
repository: https://charts.apiseven.com
version: 0.3.0
- alias: ingress-controller
condition: ingress-controller.enabled
name: apisix-ingress-controller
repository: https://charts.apiseven.com
version: 0.8.0
description: A Helm chart for Apache APISIX
digest: fed38a11c0fb54d385144767227e43cb2961d1b50d36ea207fdd122bddd3de28
icon: https://apache.org/logos/res/apisix/apisix.png
maintainers:
- email: zhangjintao@apache.org
name: tao12345666333
name: apisix
type: application
urls:
- https://charts.kubesphere.io/main/apisix-0.7.2.tgz
version: 0.7.2
apisix-dashboard:
- apiVersion: v2
appVersion: 2.9.0
created: "2021-11-15T08:23:00.343784368Z"
description: A Helm chart for Apache APISIX Dashboard
digest: 76f794b1300f7bfb756ede352fe71eb863b89f1995b495e8b683990709e310ad
icon: https://apache.org/logos/res/apisix/apisix.png
maintainers:
- email: zhangjintao@apache.org
name: tao12345666333
name: apisix-dashboard
type: application
urls:
- https://charts.kubesphere.io/main/apisix-dashboard-0.3.0.tgz
version: 0.3.0
`
func TestMergeRepo(t *testing.T) {
repoIndex1, err := loadIndex([]byte(indexData1))
if err != nil {
t.Errorf("failed to load repo index")
t.Failed()
}
existsSavedIndex := &SavedIndex{}
repoCR := &v1alpha1.HelmRepo{}
savedIndex1 := MergeRepoIndex(repoCR, repoIndex1, existsSavedIndex)
if len(savedIndex1.Applications) != 1 {
t.Errorf("faied to merge repo index with empty repo")
t.Failed()
}
repoIndex2, err := loadIndex([]byte(indexData2))
if err != nil {
t.Errorf("failed to load repo index")
t.Failed()
}
savedIndex2 := MergeRepoIndex(repoCR, repoIndex2, savedIndex1)
if len(savedIndex2.Applications) != 2 {
t.Errorf("faied to merge two repo index")
t.Failed()
}
}

View File

@@ -23,6 +23,7 @@ import (
"sync"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
@@ -30,6 +31,7 @@ import (
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
clusterinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster/v1alpha1"
clusterlister "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1"
)
@@ -54,6 +56,8 @@ type ClusterClients interface {
GetClusterKubeconfig(string) (string, error)
Get(string) (*clusterv1alpha1.Cluster, error)
GetInnerCluster(string) *innerCluster
GetKubernetesClientSet(string) (*kubernetes.Clientset, error)
GetKubeSphereClientSet(string) (*kubesphere.Clientset, error)
}
func NewClusterClient(clusterInformer clusterinformer.ClusterInformer) ClusterClients {
@@ -182,3 +186,45 @@ func (c *clusterClients) IsHostCluster(cluster *clusterv1alpha1.Cluster) bool {
}
return false
}
func (c *clusterClients) GetKubeSphereClientSet(name string) (*kubesphere.Clientset, error) {
kubeconfig, err := c.GetClusterKubeconfig(name)
if err != nil {
return nil, err
}
restConfig, err := newRestConfigFromString(kubeconfig)
if err != nil {
return nil, err
}
clientSet, err := kubesphere.NewForConfig(restConfig)
if err != nil {
return nil, err
}
return clientSet, nil
}
func (c *clusterClients) GetKubernetesClientSet(name string) (*kubernetes.Clientset, error) {
kubeconfig, err := c.GetClusterKubeconfig(name)
if err != nil {
return nil, err
}
restConfig, err := newRestConfigFromString(kubeconfig)
if err != nil {
return nil, err
}
clientSet, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, err
}
return clientSet, nil
}
func newRestConfigFromString(kubeconfig string) (*rest.Config, error) {
bytes, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig))
if err != nil {
return nil, err
}
return bytes.ClientConfig()
}

View File

@@ -0,0 +1,22 @@
package josnpatchutil
import (
jsonpatch "github.com/evanphx/json-patch"
"github.com/mitchellh/mapstructure"
)
func Parse(raw []byte) (jsonpatch.Patch, error) {
return jsonpatch.DecodePatch(raw)
}
func GetValue(patch jsonpatch.Operation, value interface{}) error {
valueInterface, err := patch.ValueInterface()
if err != nil {
return err
}
if err := mapstructure.Decode(valueInterface, value); err != nil {
return err
}
return nil
}