From 1342a9abe10d61d25cabf6279ed7484a787e180e Mon Sep 17 00:00:00 2001 From: lynxcat Date: Mon, 27 Dec 2021 15:34:45 +0800 Subject: [PATCH 1/6] add shell access to node Signed-off-by: lynxcat --- pkg/apiserver/apiserver.go | 2 +- pkg/apiserver/config/config.go | 3 + pkg/apiserver/config/config_test.go | 6 + pkg/kapis/terminal/v1alpha2/handler.go | 39 ++++- pkg/kapis/terminal/v1alpha2/register.go | 13 +- pkg/models/terminal/options.go | 28 ++++ pkg/models/terminal/terminal.go | 180 +++++++++++++++++++++++- tools/cmd/doc-gen/main.go | 2 +- 8 files changed, 263 insertions(+), 10 deletions(-) create mode 100644 pkg/models/terminal/options.go diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 1354ef6ed..4620051eb 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -225,7 +225,7 @@ func (s *APIServer) installKubeSphereAPIs() { s.KubernetesClient.Master())) urlruntime.Must(tenantv1alpha2.AddToContainer(s.container, s.InformerFactory, s.KubernetesClient.Kubernetes(), s.KubernetesClient.KubeSphere(), s.EventsClient, s.LoggingClient, s.AuditingClient, amOperator, rbacAuthorizer, s.MonitoringClient, s.RuntimeCache, s.Config.MeteringOptions)) - urlruntime.Must(terminalv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), rbacAuthorizer, s.KubernetesClient.Config())) + urlruntime.Must(terminalv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), rbacAuthorizer, s.KubernetesClient.Config(), s.Config.TerminalOptions)) urlruntime.Must(clusterkapisv1alpha1.AddToContainer(s.container, s.KubernetesClient.KubeSphere(), s.InformerFactory.KubernetesSharedInformerFactory(), diff --git a/pkg/apiserver/config/config.go b/pkg/apiserver/config/config.go index 64e78cad7..6f1c46c5b 100644 --- a/pkg/apiserver/config/config.go +++ b/pkg/apiserver/config/config.go @@ -28,6 +28,7 @@ import ( networkv1alpha1 "kubesphere.io/api/network/v1alpha1" + "kubesphere.io/kubesphere/pkg/models/terminal" "kubesphere.io/kubesphere/pkg/simple/client/alerting" "kubesphere.io/kubesphere/pkg/simple/client/auditing" "kubesphere.io/kubesphere/pkg/simple/client/cache" @@ -109,6 +110,7 @@ type Config struct { MeteringOptions *metering.Options `json:"metering,omitempty" yaml:"metering,omitempty" mapstructure:"metering"` GatewayOptions *gateway.Options `json:"gateway,omitempty" yaml:"gateway,omitempty" mapstructure:"gateway"` GPUOptions *gpu.Options `json:"gpu,omitempty" yaml:"gpu,omitempty" mapstructure:"gpu"` + TerminalOptions *terminal.Options `json:"terminal,omitempty" yaml:"terminal,omitempty" mapstructure:"terminal"` } // newConfig creates a default non-empty Config @@ -136,6 +138,7 @@ func New() *Config { MeteringOptions: metering.NewMeteringOptions(), GatewayOptions: gateway.NewGatewayOptions(), GPUOptions: gpu.NewGPUOptions(), + TerminalOptions: terminal.NewTerminalOptions(), } } diff --git a/pkg/apiserver/config/config_test.go b/pkg/apiserver/config/config_test.go index 26250d1da..3b35a301c 100644 --- a/pkg/apiserver/config/config_test.go +++ b/pkg/apiserver/config/config_test.go @@ -19,6 +19,7 @@ package config import ( "fmt" "io/ioutil" + "os" "testing" "time" @@ -32,6 +33,7 @@ import ( networkv1alpha1 "kubesphere.io/api/network/v1alpha1" "kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth" + "kubesphere.io/kubesphere/pkg/models/terminal" "kubesphere.io/kubesphere/pkg/simple/client/alerting" "kubesphere.io/kubesphere/pkg/simple/client/auditing" "kubesphere.io/kubesphere/pkg/simple/client/cache" @@ -190,6 +192,10 @@ func newTestConfig() (*Config, error) { GPUOptions: &gpu.Options{ Kinds: []gpu.GPUKind{}, }, + TerminalOptions: &terminal.Options{ + Image: "alpine:3.15", + Timeout: 600, + }, } return conf, nil } diff --git a/pkg/kapis/terminal/v1alpha2/handler.go b/pkg/kapis/terminal/v1alpha2/handler.go index aaa42c3aa..9e093a2f5 100644 --- a/pkg/kapis/terminal/v1alpha2/handler.go +++ b/pkg/kapis/terminal/v1alpha2/handler.go @@ -45,10 +45,10 @@ type terminalHandler struct { authorizer authorizer.Authorizer } -func newTerminalHandler(client kubernetes.Interface, authorizer authorizer.Authorizer, config *rest.Config) *terminalHandler { +func newTerminalHandler(client kubernetes.Interface, authorizer authorizer.Authorizer, config *rest.Config, options *terminal.Options) *terminalHandler { return &terminalHandler{ authorizer: authorizer, - terminaler: terminal.NewTerminaler(client, config), + terminaler: terminal.NewTerminaler(client, config, options), } } @@ -89,3 +89,38 @@ func (t *terminalHandler) handleTerminalSession(request *restful.Request, respon t.terminaler.HandleSession(shell, namespace, podName, containerName, conn) } + +func (t *terminalHandler) handleShellAccessToNode(request *restful.Request, response *restful.Response) { + nodename := request.PathParameter("nodename") + + user, _ := requestctx.UserFrom(request.Request.Context()) + + createPodsExec := authorizer.AttributesRecord{ + User: user, + Verb: "create", + Resource: "pods", + Subresource: "exec", + Namespace: "kubesphere-controls-system", + ResourceRequest: true, + ResourceScope: requestctx.NamespaceScope, + } + + decision, reason, err := t.authorizer.Authorize(createPodsExec) + if err != nil { + api.HandleInternalError(response, request, err) + return + } + + if decision != authorizer.DecisionAllow { + api.HandleForbidden(response, request, errors.New(reason)) + return + } + + conn, err := upgrader.Upgrade(response.ResponseWriter, request.Request, nil) + if err != nil { + klog.Warning(err) + return + } + + t.terminaler.HandleShellAccessToNode(nodename, conn) +} diff --git a/pkg/kapis/terminal/v1alpha2/register.go b/pkg/kapis/terminal/v1alpha2/register.go index 402006340..ff23ae9d4 100644 --- a/pkg/kapis/terminal/v1alpha2/register.go +++ b/pkg/kapis/terminal/v1alpha2/register.go @@ -28,6 +28,7 @@ import ( "kubesphere.io/kubesphere/pkg/apiserver/runtime" "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/terminal" ) const ( @@ -36,11 +37,11 @@ const ( var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} -func AddToContainer(c *restful.Container, client kubernetes.Interface, authorizer authorizer.Authorizer, config *rest.Config) error { +func AddToContainer(c *restful.Container, client kubernetes.Interface, authorizer authorizer.Authorizer, config *rest.Config, options *terminal.Options) error { webservice := runtime.NewWebService(GroupVersion) - handler := newTerminalHandler(client, authorizer, config) + handler := newTerminalHandler(client, authorizer, config, options) webservice.Route(webservice.GET("/namespaces/{namespace}/pods/{pod}/exec"). To(handler.handleTerminalSession). @@ -50,6 +51,14 @@ func AddToContainer(c *restful.Container, client kubernetes.Interface, authorize Metadata(restfulspec.KeyOpenAPITags, []string{constants.TerminalTag}). Writes(models.PodInfo{})) + //Add new Route to support shell access to the node + webservice.Route(webservice.GET("/nodes/{nodename}/exec"). + To(handler.handleShellAccessToNode). + Param(webservice.PathParameter("nodename", "name of cluster node")). + Doc("create shell access to node session"). + Metadata(restfulspec.KeyOpenAPITags, []string{constants.TerminalTag}). + Writes(models.PodInfo{})) + c.Add(webservice) return nil diff --git a/pkg/models/terminal/options.go b/pkg/models/terminal/options.go new file mode 100644 index 000000000..c574c1159 --- /dev/null +++ b/pkg/models/terminal/options.go @@ -0,0 +1,28 @@ +package terminal + +import "github.com/spf13/pflag" + +type Options struct { + Image string `json:"image,omitempty" yaml:"image"` + Timeout int `json:"timeout,omitempty" yaml:"timeout"` +} + +func NewTerminalOptions() *Options { + return &Options{ + Image: "alpine:3.15", + Timeout: 600, + } +} + +func (s *Options) Validate() []error { + var errs []error + return errs +} + +func (s *Options) ApplyTo(options *Options) { + +} + +func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) { + +} diff --git a/pkg/models/terminal/terminal.go b/pkg/models/terminal/terminal.go index dd5dff3bb..d02c623c4 100644 --- a/pkg/models/terminal/terminal.go +++ b/pkg/models/terminal/terminal.go @@ -21,13 +21,18 @@ limitations under the License. package terminal import ( + "context" "encoding/json" "fmt" "io" + "strconv" + "sync" + "sync/atomic" "time" "github.com/gorilla/websocket" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -53,6 +58,10 @@ type TerminalSession struct { sizeChan chan remotecommand.TerminalSize } +var ( + NodeSessionCounter sync.Map +) + // TerminalMessage is the messaging protocol between ShellController and TerminalSession. // // OP DIRECTION FIELD(S) USED DESCRIPTION @@ -140,15 +149,133 @@ func (t TerminalSession) Close(status uint32, reason string) { type Interface interface { HandleSession(shell, namespace, podName, containerName string, conn *websocket.Conn) + HandleShellAccessToNode(nodename string, conn *websocket.Conn) } type terminaler struct { - client kubernetes.Interface - config *rest.Config + client kubernetes.Interface + config *rest.Config + options *Options } -func NewTerminaler(client kubernetes.Interface, config *rest.Config) Interface { - return &terminaler{client: client, config: config} +type NodeTerminaler struct { + Nodename string + Namespace string + PodName string + ContainerName string + Shell string + Privileged bool + Config *Options + client kubernetes.Interface +} + +func NewTerminaler(client kubernetes.Interface, config *rest.Config, options *Options) Interface { + return &terminaler{client: client, config: config, options: options} +} + +func NewNodeTerminaler(nodename string, options *Options, client kubernetes.Interface) (*NodeTerminaler, error) { + + n := &NodeTerminaler{ + Namespace: "kubesphere-controls-system", + ContainerName: "nsenter", + Nodename: nodename, + PodName: nodename + "-shell-access", + Shell: "sh", + Privileged: true, + Config: options, + client: client, + } + + node, err := n.client.CoreV1().Nodes().Get(context.Background(), n.Nodename, metav1.GetOptions{}) + + if err != nil { + return n, fmt.Errorf("node cannot exist. nodename:%s, err: %v", n.Nodename, err) + } + + flag := false + for _, condition := range node.Status.Conditions { + if condition.Type == v1.NodeReady && condition.Status == v1.ConditionTrue { + flag = true + break + } + } + if !flag { + return n, fmt.Errorf("node status error. node: %s", n.Nodename) + } + + idx := int64(0) + NodeSessionCounter.LoadOrStore(nodename, &idx) + + return n, nil +} + +func (n *NodeTerminaler) getNSEnterPod() (*v1.Pod, error) { + pod, err := n.client.CoreV1().Pods(n.Namespace).Get(context.Background(), n.PodName, metav1.GetOptions{}) + + if err != nil || (pod.Status.Phase != v1.PodRunning && pod.Status.Phase != v1.PodPending) { + //pod has timed out, but has not been cleaned up + if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed { + err := n.client.CoreV1().Pods(n.Namespace).Delete(context.Background(), n.PodName, metav1.DeleteOptions{}) + if err != nil { + return pod, err + } + } + + var p = &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: n.PodName, + }, + Spec: v1.PodSpec{ + NodeName: n.Nodename, + HostPID: true, + HostNetwork: true, + RestartPolicy: v1.RestartPolicyNever, + Containers: []v1.Container{ + { + Name: n.ContainerName, + Image: n.Config.Image, + Command: []string{ + "nsenter", "-m", "-u", "-i", "-n", "-p", "-t", "1", + }, + Stdin: true, + TTY: true, + SecurityContext: &v1.SecurityContext{ + Privileged: &n.Privileged, + }, + }, + }, + }, + } + + if n.Config.Timeout == 0 { + p.Spec.Containers[0].Args = []string{"tail", "-f", "/dev/null"} + } else { + p.Spec.Containers[0].Args = []string{"sleep", strconv.Itoa(n.Config.Timeout)} + } + + pod, err = n.client.CoreV1().Pods(n.Namespace).Create(context.Background(), p, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("create pod failed on %s node: %v", n.Nodename, err) + } + } + + return pod, nil +} + +func (n NodeTerminaler) CleanUpNSEnterPod() { + idx, _ := NodeSessionCounter.Load(n.Nodename) + atomic.AddInt64(idx.(*int64), -1) + + if *(idx.(*int64)) == 0 { + err := n.client.CoreV1().Pods(n.Namespace).Delete(context.Background(), n.PodName, metav1.DeleteOptions{}) + if err != nil { + klog.Warning(err) + } + } } // startProcess is called by handleAttach @@ -224,3 +351,48 @@ func (t *terminaler) HandleSession(shell, namespace, podName, containerName stri session.Close(1, "Process exited") } + +func (t *terminaler) HandleShellAccessToNode(nodename string, conn *websocket.Conn) { + succ, fail := make(chan bool), make(chan bool) + + nodeTerminaler, err := NewNodeTerminaler(nodename, t.options, t.client) + if err != nil { + klog.Warning("node terminaler init error: ", err) + return + } + + pod, err := nodeTerminaler.getNSEnterPod() + if err != nil { + klog.Warning("get nsenter pod error: ", err) + return + } + + go nodeTerminaler.WatchPodStatusBeRunning(pod, succ, fail) + select { + case <-succ: + t.HandleSession(nodeTerminaler.Shell, nodeTerminaler.Namespace, nodeTerminaler.PodName, nodeTerminaler.ContainerName, conn) + defer nodeTerminaler.CleanUpNSEnterPod() + case <-fail: + klog.Warning("watching pod status error") + } +} + +func (n *NodeTerminaler) WatchPodStatusBeRunning(pod *v1.Pod, succ chan bool, fail chan bool) { + var err error + for i := 0; i < 5; i++ { + if pod.Status.Phase == v1.PodRunning { + idx, _ := NodeSessionCounter.Load(n.Nodename) + atomic.AddInt64(idx.(*int64), 1) + close(succ) + return + } + time.Sleep(time.Second) + pod, err = n.client.CoreV1().Pods(pod.ObjectMeta.Namespace).Get(context.Background(), pod.ObjectMeta.Name, metav1.GetOptions{}) + if err != nil { + klog.Warning(err) + close(fail) + return + } + } + close(fail) +} diff --git a/tools/cmd/doc-gen/main.go b/tools/cmd/doc-gen/main.go index 3dc7db970..041ae516a 100644 --- a/tools/cmd/doc-gen/main.go +++ b/tools/cmd/doc-gen/main.go @@ -130,7 +130,7 @@ func generateSwaggerJson() []byte { urlruntime.Must(resourcesv1alpha2.AddToContainer(container, clientsets.Kubernetes(), informerFactory, "")) urlruntime.Must(resourcesv1alpha3.AddToContainer(container, informerFactory, nil)) urlruntime.Must(tenantv1alpha2.AddToContainer(container, informerFactory, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)) - urlruntime.Must(terminalv1alpha2.AddToContainer(container, clientsets.Kubernetes(), nil, nil)) + urlruntime.Must(terminalv1alpha2.AddToContainer(container, clientsets.Kubernetes(), nil, nil, nil)) urlruntime.Must(metricsv1alpha2.AddToContainer(nil, container, clientsets.Kubernetes(), nil)) urlruntime.Must(networkv1alpha2.AddToContainer(container, "")) alertingOptions := &alerting.Options{} From 78730a2b96d2dc9ffd397e8cd4e5dd62ce742617 Mon Sep 17 00:00:00 2001 From: lynxcat Date: Thu, 6 Jan 2022 09:45:02 +0800 Subject: [PATCH 2/6] use wait.Poll to watch pod status Signed-off-by: lynxcat --- pkg/models/terminal/terminal.go | 39 ++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/pkg/models/terminal/terminal.go b/pkg/models/terminal/terminal.go index d02c623c4..c829f4396 100644 --- a/pkg/models/terminal/terminal.go +++ b/pkg/models/terminal/terminal.go @@ -25,6 +25,7 @@ import ( "encoding/json" "fmt" "io" + "k8s.io/apimachinery/pkg/util/wait" "strconv" "sync" "sync/atomic" @@ -189,7 +190,7 @@ func NewNodeTerminaler(nodename string, options *Options, client kubernetes.Inte node, err := n.client.CoreV1().Nodes().Get(context.Background(), n.Nodename, metav1.GetOptions{}) if err != nil { - return n, fmt.Errorf("node cannot exist. nodename:%s, err: %v", n.Nodename, err) + return n, fmt.Errorf("getting node error. nodename:%s, err: %v", n.Nodename, err) } flag := false @@ -378,21 +379,33 @@ func (t *terminaler) HandleShellAccessToNode(nodename string, conn *websocket.Co } func (n *NodeTerminaler) WatchPodStatusBeRunning(pod *v1.Pod, succ chan bool, fail chan bool) { - var err error - for i := 0; i < 5; i++ { - if pod.Status.Phase == v1.PodRunning { - idx, _ := NodeSessionCounter.Load(n.Nodename) - atomic.AddInt64(idx.(*int64), 1) - close(succ) - return - } - time.Sleep(time.Second) + if pod.Status.Phase == v1.PodRunning { + idx, _ := NodeSessionCounter.Load(n.Nodename) + atomic.AddInt64(idx.(*int64), 1) + close(succ) + return + } + + err := wait.Poll(time.Millisecond*500, time.Second*5, func() (done bool, err error) { pod, err = n.client.CoreV1().Pods(pod.ObjectMeta.Namespace).Get(context.Background(), pod.ObjectMeta.Name, metav1.GetOptions{}) if err != nil { klog.Warning(err) - close(fail) - return + return false, err } + + if pod.Status.Phase == v1.PodRunning { + idx, _ := NodeSessionCounter.Load(n.Nodename) + atomic.AddInt64(idx.(*int64), 1) + return true, nil + } + + return false, nil + }) + + if err != nil { + klog.Warning("watching pod status error: ", err) + close(fail) + } else { + close(succ) } - close(fail) } From 0c1ba9e32e390df45bfb49fc7ff188b8cb987ccd Mon Sep 17 00:00:00 2001 From: lynxcat Date: Thu, 6 Jan 2022 09:52:37 +0800 Subject: [PATCH 3/6] formatting code Signed-off-by: lynxcat --- pkg/models/terminal/terminal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/models/terminal/terminal.go b/pkg/models/terminal/terminal.go index c829f4396..803fdaf2a 100644 --- a/pkg/models/terminal/terminal.go +++ b/pkg/models/terminal/terminal.go @@ -25,7 +25,6 @@ import ( "encoding/json" "fmt" "io" - "k8s.io/apimachinery/pkg/util/wait" "strconv" "sync" "sync/atomic" @@ -34,6 +33,7 @@ import ( "github.com/gorilla/websocket" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" From 6ef7010533c28fbec6656826687ca60d9062e450 Mon Sep 17 00:00:00 2001 From: lynxcat Date: Thu, 6 Jan 2022 17:27:24 +0800 Subject: [PATCH 4/6] delete channel Signed-off-by: lynxcat --- pkg/models/terminal/terminal.go | 47 +++++++++++++++------------------ 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/pkg/models/terminal/terminal.go b/pkg/models/terminal/terminal.go index 803fdaf2a..cd570a70f 100644 --- a/pkg/models/terminal/terminal.go +++ b/pkg/models/terminal/terminal.go @@ -204,9 +204,6 @@ func NewNodeTerminaler(nodename string, options *Options, client kubernetes.Inte return n, fmt.Errorf("node status error. node: %s", n.Nodename) } - idx := int64(0) - NodeSessionCounter.LoadOrStore(nodename, &idx) - return n, nil } @@ -354,7 +351,6 @@ func (t *terminaler) HandleSession(shell, namespace, podName, containerName stri } func (t *terminaler) HandleShellAccessToNode(nodename string, conn *websocket.Conn) { - succ, fail := make(chan bool), make(chan bool) nodeTerminaler, err := NewNodeTerminaler(nodename, t.options, t.client) if err != nil { @@ -368,25 +364,29 @@ func (t *terminaler) HandleShellAccessToNode(nodename string, conn *websocket.Co return } - go nodeTerminaler.WatchPodStatusBeRunning(pod, succ, fail) - select { - case <-succ: + if err := nodeTerminaler.WatchPodStatusBeRunning(pod); err != nil { + klog.Warning("watching pod status error: ", err) + return + } else { t.HandleSession(nodeTerminaler.Shell, nodeTerminaler.Namespace, nodeTerminaler.PodName, nodeTerminaler.ContainerName, conn) defer nodeTerminaler.CleanUpNSEnterPod() - case <-fail: - klog.Warning("watching pod status error") } } -func (n *NodeTerminaler) WatchPodStatusBeRunning(pod *v1.Pod, succ chan bool, fail chan bool) { +func (n *NodeTerminaler) WatchPodStatusBeRunning(pod *v1.Pod) error { if pod.Status.Phase == v1.PodRunning { - idx, _ := NodeSessionCounter.Load(n.Nodename) - atomic.AddInt64(idx.(*int64), 1) - close(succ) - return + idx, ok := NodeSessionCounter.Load(n.Nodename) + if ok { + atomic.AddInt64(idx.(*int64), 1) + } else { + i := int64(1) + NodeSessionCounter.LoadOrStore(n.Nodename, &i) + } + + return nil } - err := wait.Poll(time.Millisecond*500, time.Second*5, func() (done bool, err error) { + return wait.Poll(time.Millisecond*500, time.Second*5, func() (done bool, err error) { pod, err = n.client.CoreV1().Pods(pod.ObjectMeta.Namespace).Get(context.Background(), pod.ObjectMeta.Name, metav1.GetOptions{}) if err != nil { klog.Warning(err) @@ -394,18 +394,15 @@ func (n *NodeTerminaler) WatchPodStatusBeRunning(pod *v1.Pod, succ chan bool, fa } if pod.Status.Phase == v1.PodRunning { - idx, _ := NodeSessionCounter.Load(n.Nodename) - atomic.AddInt64(idx.(*int64), 1) + idx, ok := NodeSessionCounter.Load(n.Nodename) + if ok { + atomic.AddInt64(idx.(*int64), 1) + } else { + i := int64(1) + NodeSessionCounter.LoadOrStore(n.Nodename, &i) + } return true, nil } - return false, nil }) - - if err != nil { - klog.Warning("watching pod status error: ", err) - close(fail) - } else { - close(succ) - } } From 5e5c9a8d04042f9e1d939b1863f62d3a80a2bf79 Mon Sep 17 00:00:00 2001 From: lynxcat Date: Mon, 10 Jan 2022 15:34:04 +0800 Subject: [PATCH 5/6] Update pkg/kapis/terminal/v1alpha2/handler.go Co-authored-by: hongming --- pkg/kapis/terminal/v1alpha2/handler.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pkg/kapis/terminal/v1alpha2/handler.go b/pkg/kapis/terminal/v1alpha2/handler.go index 9e093a2f5..5a3e114b6 100644 --- a/pkg/kapis/terminal/v1alpha2/handler.go +++ b/pkg/kapis/terminal/v1alpha2/handler.go @@ -95,14 +95,13 @@ func (t *terminalHandler) handleShellAccessToNode(request *restful.Request, resp user, _ := requestctx.UserFrom(request.Request.Context()) - createPodsExec := authorizer.AttributesRecord{ + createNodesExec := authorizer.AttributesRecord{ User: user, Verb: "create", - Resource: "pods", + Resource: "nodes", Subresource: "exec", - Namespace: "kubesphere-controls-system", ResourceRequest: true, - ResourceScope: requestctx.NamespaceScope, + ResourceScope: requestctx.ClusterScope, } decision, reason, err := t.authorizer.Authorize(createPodsExec) From 1b382e81cb6a34d3f4644a95bda3c011a777e635 Mon Sep 17 00:00:00 2001 From: lynxcat Date: Mon, 10 Jan 2022 15:36:00 +0800 Subject: [PATCH 6/6] update check permission Signed-off-by: lynxcat --- pkg/kapis/terminal/v1alpha2/handler.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/kapis/terminal/v1alpha2/handler.go b/pkg/kapis/terminal/v1alpha2/handler.go index 5a3e114b6..193638592 100644 --- a/pkg/kapis/terminal/v1alpha2/handler.go +++ b/pkg/kapis/terminal/v1alpha2/handler.go @@ -104,7 +104,7 @@ func (t *terminalHandler) handleShellAccessToNode(request *restful.Request, resp ResourceScope: requestctx.ClusterScope, } - decision, reason, err := t.authorizer.Authorize(createPodsExec) + decision, reason, err := t.authorizer.Authorize(createNodesExec) if err != nil { api.HandleInternalError(response, request, err) return