Compare commits
50 Commits
v3.3.0-rc.
...
v3.3.2-rc.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c8e131fc13 | ||
|
|
839a31ac1d | ||
|
|
a0ba5f6085 | ||
|
|
658497aa0a | ||
|
|
a47bf848df | ||
|
|
dbb3f04b9e | ||
|
|
705ea4af40 | ||
|
|
366d1e16e4 | ||
|
|
690d5be824 | ||
|
|
c0419ddab5 | ||
|
|
80b0301f79 | ||
|
|
7162d41310 | ||
|
|
6b10d346ca | ||
|
|
6a0d5ba93c | ||
|
|
d87a782257 | ||
|
|
82e55578a8 | ||
|
|
5b9c357160 | ||
|
|
c385dd92e4 | ||
|
|
1e1b2bd594 | ||
|
|
951b86648c | ||
|
|
04433c139d | ||
|
|
3b8c28d21e | ||
|
|
9489718270 | ||
|
|
54df6b8c8c | ||
|
|
d917905529 | ||
|
|
cd6f940f1d | ||
|
|
921a8f068b | ||
|
|
641aa1dfcf | ||
|
|
4522c841af | ||
|
|
8e906ed3de | ||
|
|
ac36ff5752 | ||
|
|
098b77fb4c | ||
|
|
e97f27e580 | ||
|
|
bc00b67a6e | ||
|
|
8b0f2674bd | ||
|
|
108963f87b | ||
|
|
6525a3c3b3 | ||
|
|
f0cc7f6430 | ||
|
|
47563af08c | ||
|
|
26b871ecf4 | ||
|
|
5e02f1b86b | ||
|
|
c78ab9039a | ||
|
|
02e99365c7 | ||
|
|
0c2a419a5e | ||
|
|
77e0373777 | ||
|
|
04d70b1db4 | ||
|
|
86beabdb32 | ||
|
|
1e8cea4971 | ||
|
|
107e2ec64c | ||
|
|
17b97d7ada |
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,5 +1,6 @@
|
||||
---
|
||||
name: Bug report
|
||||
labels: ["kind/bug"]
|
||||
about: Create a report to help us improve
|
||||
---
|
||||
|
||||
|
||||
50
SECURITY.md
Normal file
50
SECURITY.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 3.2.x | :white_check_mark: |
|
||||
| 3.1.x | :white_check_mark: |
|
||||
| 3.0.x | :white_check_mark: |
|
||||
| 2.1.x | :white_check_mark: |
|
||||
| < 2.1.x | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
# Security Vulnerability Disclosure and Response Process
|
||||
|
||||
To ensure KubeSphere security, a security vulnerability disclosure and response process is adopted. And the security team is set up in KubeSphere community, also any issue and PR is welcome for every contributors.
|
||||
|
||||
The primary goal of this process is to reduce the total exposure time of users to publicly known vulnerabilities. To quickly fix vulnerabilities of KubeSphere, the security team is responsible for the entire vulnerability management process, including internal communication and external disclosure.
|
||||
|
||||
If you find a vulnerability or encounter a security incident involving vulnerabilities of KubeSphere, please report it as soon as possible to the KubeSphere security team (security@kubesphere.io).
|
||||
|
||||
Please kindly help provide as much vulnerability information as possible in the following format:
|
||||
|
||||
- Issue title(Please add 'Security' lable)*:
|
||||
|
||||
- Overview*:
|
||||
|
||||
- Affected components and version number*:
|
||||
|
||||
- CVE number (if any):
|
||||
|
||||
- Vulnerability verification process*:
|
||||
|
||||
- Contact information*:
|
||||
|
||||
The asterisk (*) indicates the required field.
|
||||
|
||||
# Response Time
|
||||
|
||||
The KubeSphere security team will confirm the vulnerabilities and contact you within 2 working days after your submission.
|
||||
|
||||
We will publicly thank you after fixing the security vulnerability. To avoid negative impact, please keep the vulnerability confidential until we fix it. We would appreciate it if you could obey the following code of conduct:
|
||||
|
||||
The vulnerability will not be disclosed until KubeSphere releases a patch for it.
|
||||
|
||||
The details of the vulnerability, for example, exploits code, will not be disclosed.
|
||||
@@ -82,6 +82,9 @@ type KubeSphereControllerManagerOptions struct {
|
||||
// * has the lowest priority.
|
||||
// e.g. *,-foo, means "disable 'foo'"
|
||||
ControllerGates []string
|
||||
|
||||
// Enable gops or not.
|
||||
GOPSEnabled bool
|
||||
}
|
||||
|
||||
func NewKubeSphereControllerManagerOptions() *KubeSphereControllerManagerOptions {
|
||||
@@ -144,6 +147,9 @@ func (s *KubeSphereControllerManagerOptions) Flags(allControllerNameSelectors []
|
||||
"named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s",
|
||||
strings.Join(allControllerNameSelectors, ", ")))
|
||||
|
||||
gfs.BoolVar(&s.GOPSEnabled, "gops", s.GOPSEnabled, "Whether to enable gops or not. When enabled this option, "+
|
||||
"controller-manager will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the controller-manager currently running.")
|
||||
|
||||
kfs := fss.FlagSet("klog")
|
||||
local := flag.NewFlagSet("klog", flag.ExitOnError)
|
||||
klog.InitFlags(local)
|
||||
@@ -236,4 +242,5 @@ func (s *KubeSphereControllerManagerOptions) MergeConfig(cfg *controllerconfig.C
|
||||
s.MultiClusterOptions = cfg.MultiClusterOptions
|
||||
s.ServiceMeshOptions = cfg.ServiceMeshOptions
|
||||
s.GatewayOptions = cfg.GatewayOptions
|
||||
s.MonitoringOptions = cfg.MonitoringOptions
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/google/gops/agent"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -73,12 +74,21 @@ func NewControllerManagerCommand() *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "controller-manager",
|
||||
Long: `KubeSphere controller manager is a daemon that`,
|
||||
Long: `KubeSphere controller manager is a daemon that embeds the control loops shipped with KubeSphere.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if errs := s.Validate(allControllers); len(errs) != 0 {
|
||||
klog.Error(utilerrors.NewAggregate(errs))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if s.GOPSEnabled {
|
||||
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
|
||||
// Bind to a random port on address 127.0.0.1
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = Run(s, controllerconfig.WatchConfigChange(), signals.SetupSignalHandler()); err != nil {
|
||||
klog.Error(err)
|
||||
os.Exit(1)
|
||||
|
||||
@@ -20,6 +20,9 @@ import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1"
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
@@ -41,9 +44,6 @@ import (
|
||||
auditingclient "kubesphere.io/kubesphere/pkg/simple/client/auditing/elasticsearch"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
|
||||
eventsclient "kubesphere.io/kubesphere/pkg/simple/client/events/elasticsearch"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
@@ -59,15 +59,18 @@ type ServerRunOptions struct {
|
||||
ConfigFile string
|
||||
GenericServerRunOptions *genericoptions.ServerRunOptions
|
||||
*apiserverconfig.Config
|
||||
schemeOnce sync.Once
|
||||
DebugMode bool
|
||||
|
||||
//
|
||||
DebugMode bool
|
||||
// Enable gops or not.
|
||||
GOPSEnabled bool
|
||||
}
|
||||
|
||||
func NewServerRunOptions() *ServerRunOptions {
|
||||
s := &ServerRunOptions{
|
||||
GenericServerRunOptions: genericoptions.NewServerRunOptions(),
|
||||
Config: apiserverconfig.New(),
|
||||
schemeOnce: sync.Once{},
|
||||
}
|
||||
|
||||
return s
|
||||
@@ -76,13 +79,14 @@ func NewServerRunOptions() *ServerRunOptions {
|
||||
func (s *ServerRunOptions) Flags() (fss cliflag.NamedFlagSets) {
|
||||
fs := fss.FlagSet("generic")
|
||||
fs.BoolVar(&s.DebugMode, "debug", false, "Don't enable this if you don't know what it means.")
|
||||
fs.BoolVar(&s.GOPSEnabled, "gops", false, "Whether to enable gops or not. When enabled this option, "+
|
||||
"ks-apiserver will listen on a random port on 127.0.0.1, then you can use the gops tool to list and diagnose the ks-apiserver currently running.")
|
||||
s.GenericServerRunOptions.AddFlags(fs, s.GenericServerRunOptions)
|
||||
s.KubernetesOptions.AddFlags(fss.FlagSet("kubernetes"), s.KubernetesOptions)
|
||||
s.AuthenticationOptions.AddFlags(fss.FlagSet("authentication"), s.AuthenticationOptions)
|
||||
s.AuthorizationOptions.AddFlags(fss.FlagSet("authorization"), s.AuthorizationOptions)
|
||||
s.DevopsOptions.AddFlags(fss.FlagSet("devops"), s.DevopsOptions)
|
||||
s.SonarQubeOptions.AddFlags(fss.FlagSet("sonarqube"), s.SonarQubeOptions)
|
||||
s.RedisOptions.AddFlags(fss.FlagSet("redis"), s.RedisOptions)
|
||||
s.S3Options.AddFlags(fss.FlagSet("s3"), s.S3Options)
|
||||
s.OpenPitrixOptions.AddFlags(fss.FlagSet("openpitrix"), s.OpenPitrixOptions)
|
||||
s.NetworkOptions.AddFlags(fss.FlagSet("network"), s.NetworkOptions)
|
||||
@@ -171,21 +175,23 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
|
||||
apiServer.SonarClient = sonarqube.NewSonar(sonarClient.SonarQube())
|
||||
}
|
||||
|
||||
var cacheClient cache.Interface
|
||||
if s.RedisOptions != nil && len(s.RedisOptions.Host) != 0 {
|
||||
if s.RedisOptions.Host == fakeInterface && s.DebugMode {
|
||||
apiServer.CacheClient = cache.NewSimpleCache()
|
||||
} else {
|
||||
cacheClient, err = cache.NewRedisClient(s.RedisOptions, stopCh)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to redis service, please check redis status, error: %v", err)
|
||||
}
|
||||
apiServer.CacheClient = cacheClient
|
||||
// If debug mode is on or CacheOptions is nil, will create a fake cache.
|
||||
if s.CacheOptions.Type != "" {
|
||||
if s.DebugMode {
|
||||
s.CacheOptions.Type = cache.DefaultCacheType
|
||||
}
|
||||
cacheClient, err := cache.New(s.CacheOptions, stopCh)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache, error: %v", err)
|
||||
}
|
||||
apiServer.CacheClient = cacheClient
|
||||
} else {
|
||||
klog.Warning("ks-apiserver starts without redis provided, it will use in memory cache. " +
|
||||
"This may cause inconsistencies when running ks-apiserver with multiple replicas.")
|
||||
apiServer.CacheClient = cache.NewSimpleCache()
|
||||
s.CacheOptions = &cache.Options{Type: cache.DefaultCacheType}
|
||||
// fake cache has no error to return
|
||||
cacheClient, _ := cache.New(s.CacheOptions, stopCh)
|
||||
apiServer.CacheClient = cacheClient
|
||||
klog.Warning("ks-apiserver starts without cache provided, it will use in memory cache. " +
|
||||
"This may cause inconsistencies when running ks-apiserver with multiple replicas, and memory leak risk")
|
||||
}
|
||||
|
||||
if s.EventsOptions.Host != "" {
|
||||
@@ -217,7 +223,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
|
||||
apiServer.ClusterClient = cc
|
||||
}
|
||||
|
||||
apiServer.OpenpitrixClient = openpitrixv1.NewOpenpitrixClient(informerFactory, apiServer.KubernetesClient.KubeSphere(), s.OpenPitrixOptions, apiServer.ClusterClient, stopCh)
|
||||
apiServer.OpenpitrixClient = openpitrixv1.NewOpenpitrixClient(informerFactory, apiServer.KubernetesClient.KubeSphere(), s.OpenPitrixOptions, apiServer.ClusterClient)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", s.GenericServerRunOptions.InsecurePort),
|
||||
@@ -236,9 +242,11 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
|
||||
}
|
||||
|
||||
sch := scheme.Scheme
|
||||
if err := apis.AddToScheme(sch); err != nil {
|
||||
klog.Fatalf("unable add APIs to scheme: %v", err)
|
||||
}
|
||||
s.schemeOnce.Do(func() {
|
||||
if err := apis.AddToScheme(sch); err != nil {
|
||||
klog.Fatalf("unable add APIs to scheme: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
apiServer.RuntimeCache, err = runtimecache.New(apiServer.KubernetesClient.Config(), runtimecache.Options{Scheme: sch})
|
||||
if err != nil {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/gops/agent"
|
||||
"github.com/spf13/cobra"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
@@ -57,6 +58,15 @@ cluster's shared state through which all other components interact.`,
|
||||
if errs := s.Validate(); len(errs) != 0 {
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
if s.GOPSEnabled {
|
||||
// Add agent to report additional information such as the current stack trace, Go version, memory stats, etc.
|
||||
// Bind to a random port on address 127.0.0.1.
|
||||
if err := agent.Listen(agent.Options{}); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return Run(s, apiserverconfig.WatchConfigChange(), signals.SetupSignalHandler())
|
||||
},
|
||||
SilenceUsage: true,
|
||||
|
||||
3
go.mod
3
go.mod
@@ -50,6 +50,7 @@ require (
|
||||
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa
|
||||
github.com/google/go-cmp v0.5.6
|
||||
github.com/google/go-containerregistry v0.6.0
|
||||
github.com/google/gops v0.3.23
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/gorilla/handlers v1.4.0 // indirect
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
@@ -81,6 +82,8 @@ require (
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/common v0.26.0
|
||||
github.com/prometheus/prometheus v1.8.2-0.20200907175821-8219b442c864
|
||||
github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7 // indirect
|
||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
|
||||
github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009
|
||||
github.com/speps/go-hashids v2.0.0+incompatible
|
||||
github.com/spf13/cobra v1.2.1
|
||||
|
||||
12
go.sum
12
go.sum
@@ -59,6 +59,7 @@ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/O
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
@@ -287,6 +288,8 @@ github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM=
|
||||
github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.2.6-0.20210915003542-8b1f7f90f6b1/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE=
|
||||
github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
|
||||
github.com/go-openapi/errors v0.19.4 h1:fSGwO1tSYHFu70NKaWJt5Qh0qoBRtCm/mXS1yhf+0W0=
|
||||
@@ -388,6 +391,8 @@ github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASu
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gops v0.3.23 h1:OjsHRINl5FiIyTc8jivIg4UN0GY6Nh32SL8KRbl8GQo=
|
||||
github.com/google/gops v0.3.23/go.mod h1:7diIdLsqpCihPSX3fQagksT/Ku/y4RL9LHTlKyEUDl8=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
@@ -501,6 +506,7 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20150923205031-648daed35d49/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
@@ -741,6 +747,9 @@ github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfP
|
||||
github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ=
|
||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
@@ -784,6 +793,8 @@ github.com/thanos-io/thanos v0.13.1-0.20200910143741-e0b7f7b32e9c/go.mod h1:1Ize
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
|
||||
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
@@ -991,6 +1002,7 @@ k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
||||
kubesphere.io/monitoring-dashboard v0.2.2 h1:aniATtXLgRAAvKOjd2UxWWHMh4/T7a0HoQ9bd+/bGcA=
|
||||
kubesphere.io/monitoring-dashboard v0.2.2/go.mod h1:ksDjmOuoN0C0GuYp0s5X3186cPgk2asLUaO1WlEKISY=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo=
|
||||
rsc.io/letsencrypt v0.0.1 h1:DV0d09Ne9E7UUa9ZqWktZ9L2VmybgTgfq7xlfFR/bbU=
|
||||
rsc.io/letsencrypt v0.0.1/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
||||
@@ -39,6 +39,7 @@ find_files() {
|
||||
-o -wholename '*/third_party/*' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
-o -wholename './staging/src/kubesphere.io/client-go/*vendor/*' \
|
||||
-o -wholename './staging/src/kubesphere.io/api/*/zz_generated.deepcopy.go' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
}
|
||||
|
||||
1
hack/verify-gofmt.sh
Normal file → Executable file
1
hack/verify-gofmt.sh
Normal file → Executable file
@@ -44,6 +44,7 @@ find_files() {
|
||||
-o -wholename '*/third_party/*' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
-o -wholename './staging/src/kubesphere.io/client-go/*vendor/*' \
|
||||
-o -wholename './staging/src/kubesphere.io/api/*/zz_generated.deepcopy.go' \
|
||||
-o -wholename '*/bindata.go' \
|
||||
\) -prune \
|
||||
\) -name '*.go'
|
||||
|
||||
@@ -394,6 +394,10 @@ func waitForCacheSync(discoveryClient discovery.DiscoveryInterface, sharedInform
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.Warningf("group version %s not exists in the cluster", groupVersion)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("failed to fetch group version resources %s: %s", groupVersion, err)
|
||||
}
|
||||
for _, resourceName := range resourceNames {
|
||||
|
||||
@@ -141,6 +141,7 @@ func (b *Backend) sendEvents(events *v1alpha1.EventList) {
|
||||
defer cancel()
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
skipReturnSender := false
|
||||
|
||||
send := func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), b.getSenderTimeout)
|
||||
@@ -149,6 +150,7 @@ func (b *Backend) sendEvents(events *v1alpha1.EventList) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
klog.Error("Get auditing event sender timeout")
|
||||
skipReturnSender = true
|
||||
return
|
||||
case b.senderCh <- struct{}{}:
|
||||
}
|
||||
@@ -182,7 +184,9 @@ func (b *Backend) sendEvents(events *v1alpha1.EventList) {
|
||||
go send()
|
||||
|
||||
defer func() {
|
||||
<-b.senderCh
|
||||
if !skipReturnSender {
|
||||
<-b.senderCh
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
|
||||
@@ -33,8 +33,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/apis/audit"
|
||||
"k8s.io/klog"
|
||||
|
||||
devopsv1alpha3 "kubesphere.io/api/devops/v1alpha3"
|
||||
"kubesphere.io/api/iam/v1alpha2"
|
||||
|
||||
auditv1alpha1 "kubesphere.io/kubesphere/pkg/apiserver/auditing/v1alpha1"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
@@ -192,7 +192,7 @@ func (a *auditing) LogRequestObject(req *http.Request, info *request.RequestInfo
|
||||
}
|
||||
}
|
||||
|
||||
if (e.Level.GreaterOrEqual(audit.LevelRequest) || e.Verb == "create") && req.ContentLength > 0 {
|
||||
if a.needAnalyzeRequestBody(e, req) {
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
@@ -212,11 +212,45 @@ func (a *auditing) LogRequestObject(req *http.Request, info *request.RequestInfo
|
||||
e.ObjectRef.Name = obj.Name
|
||||
}
|
||||
}
|
||||
|
||||
// for recording disable and enable user
|
||||
if e.ObjectRef.Resource == "users" && e.Verb == "update" {
|
||||
u := &v1alpha2.User{}
|
||||
if err := json.Unmarshal(body, u); err == nil {
|
||||
if u.Status.State == v1alpha2.UserActive {
|
||||
e.Verb = "enable"
|
||||
} else if u.Status.State == v1alpha2.UserDisabled {
|
||||
e.Verb = "disable"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (a *auditing) needAnalyzeRequestBody(e *auditv1alpha1.Event, req *http.Request) bool {
|
||||
|
||||
if req.ContentLength <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if e.Level.GreaterOrEqual(audit.LevelRequest) {
|
||||
return true
|
||||
}
|
||||
|
||||
if e.Verb == "create" {
|
||||
return true
|
||||
}
|
||||
|
||||
// for recording disable and enable user
|
||||
if e.ObjectRef.Resource == "users" && e.Verb == "update" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *auditing) LogResponseObject(e *auditv1alpha1.Event, resp *ResponseCapture) {
|
||||
|
||||
e.StageTimestamp = metav1.NowMicro()
|
||||
|
||||
@@ -45,7 +45,7 @@ func init() {
|
||||
type ldapProvider struct {
|
||||
// Host and optional port of the LDAP server in the form "host:port".
|
||||
// If the port is not supplied, 389 for insecure or StartTLS connections, 636
|
||||
Host string `json:"host,omitempty" yaml:"managerDN"`
|
||||
Host string `json:"host,omitempty" yaml:"host"`
|
||||
// Timeout duration when reading data from remote server. Default to 15s.
|
||||
ReadTimeout int `json:"readTimeout" yaml:"readTimeout"`
|
||||
// If specified, connections will use the ldaps:// protocol
|
||||
|
||||
@@ -160,7 +160,7 @@ type Config struct {
|
||||
ServiceMeshOptions *servicemesh.Options `json:"servicemesh,omitempty" yaml:"servicemesh,omitempty" mapstructure:"servicemesh"`
|
||||
NetworkOptions *network.Options `json:"network,omitempty" yaml:"network,omitempty" mapstructure:"network"`
|
||||
LdapOptions *ldap.Options `json:"-,omitempty" yaml:"ldap,omitempty" mapstructure:"ldap"`
|
||||
RedisOptions *cache.Options `json:"redis,omitempty" yaml:"redis,omitempty" mapstructure:"redis"`
|
||||
CacheOptions *cache.Options `json:"cache,omitempty" yaml:"cache,omitempty" mapstructure:"cache"`
|
||||
S3Options *s3.Options `json:"s3,omitempty" yaml:"s3,omitempty" mapstructure:"s3"`
|
||||
OpenPitrixOptions *openpitrix.Options `json:"openpitrix,omitempty" yaml:"openpitrix,omitempty" mapstructure:"openpitrix"`
|
||||
MonitoringOptions *prometheus.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"`
|
||||
@@ -189,7 +189,7 @@ func New() *Config {
|
||||
ServiceMeshOptions: servicemesh.NewServiceMeshOptions(),
|
||||
NetworkOptions: network.NewNetworkOptions(),
|
||||
LdapOptions: ldap.NewOptions(),
|
||||
RedisOptions: cache.NewRedisOptions(),
|
||||
CacheOptions: cache.NewCacheOptions(),
|
||||
S3Options: s3.NewS3Options(),
|
||||
OpenPitrixOptions: openpitrix.NewOptions(),
|
||||
MonitoringOptions: prometheus.NewPrometheusOptions(),
|
||||
@@ -292,8 +292,8 @@ func (conf *Config) ToMap() map[string]bool {
|
||||
// Remove invalid options before serializing to json or yaml
|
||||
func (conf *Config) stripEmptyOptions() {
|
||||
|
||||
if conf.RedisOptions != nil && conf.RedisOptions.Host == "" {
|
||||
conf.RedisOptions = nil
|
||||
if conf.CacheOptions != nil && conf.CacheOptions.Type == "" {
|
||||
conf.CacheOptions = nil
|
||||
}
|
||||
|
||||
if conf.DevopsOptions != nil && conf.DevopsOptions.Host == "" {
|
||||
|
||||
@@ -88,11 +88,9 @@ func newTestConfig() (*Config, error) {
|
||||
MaxCap: 100,
|
||||
PoolName: "ldap",
|
||||
},
|
||||
RedisOptions: &cache.Options{
|
||||
Host: "localhost",
|
||||
Port: 6379,
|
||||
Password: "KUBESPHERE_REDIS_PASSWORD",
|
||||
DB: 0,
|
||||
CacheOptions: &cache.Options{
|
||||
Type: "redis",
|
||||
Options: map[string]interface{}{},
|
||||
},
|
||||
S3Options: &s3.Options{
|
||||
Endpoint: "http://minio.openpitrix-system.svc",
|
||||
@@ -236,9 +234,6 @@ func TestGet(t *testing.T) {
|
||||
saveTestConfig(t, conf)
|
||||
defer cleanTestConfig(t)
|
||||
|
||||
conf.RedisOptions.Password = "P@88w0rd"
|
||||
os.Setenv("KUBESPHERE_REDIS_PASSWORD", "P@88w0rd")
|
||||
|
||||
conf2, err := TryLoadFromDisk()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -251,7 +246,7 @@ func TestGet(t *testing.T) {
|
||||
func TestStripEmptyOptions(t *testing.T) {
|
||||
var config Config
|
||||
|
||||
config.RedisOptions = &cache.Options{Host: ""}
|
||||
config.CacheOptions = &cache.Options{Type: ""}
|
||||
config.DevopsOptions = &jenkins.Options{Host: ""}
|
||||
config.MonitoringOptions = &prometheus.Options{Endpoint: ""}
|
||||
config.SonarQubeOptions = &sonarqube.Options{Host: ""}
|
||||
@@ -284,7 +279,7 @@ func TestStripEmptyOptions(t *testing.T) {
|
||||
|
||||
config.stripEmptyOptions()
|
||||
|
||||
if config.RedisOptions != nil ||
|
||||
if config.CacheOptions != nil ||
|
||||
config.DevopsOptions != nil ||
|
||||
config.MonitoringOptions != nil ||
|
||||
config.SonarQubeOptions != nil ||
|
||||
|
||||
@@ -246,8 +246,6 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
|
||||
// parsing successful, so we now know the proper value for .Parts
|
||||
requestInfo.Parts = currentParts
|
||||
|
||||
requestInfo.ResourceScope = r.resolveResourceScope(requestInfo)
|
||||
|
||||
// parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret
|
||||
switch {
|
||||
case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb):
|
||||
@@ -260,6 +258,8 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
|
||||
requestInfo.Resource = requestInfo.Parts[0]
|
||||
}
|
||||
|
||||
requestInfo.ResourceScope = r.resolveResourceScope(requestInfo)
|
||||
|
||||
// if there's no name on the request and we thought it was a get before, then the actual verb is a list or a watch
|
||||
if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" {
|
||||
opts := metainternalversion.ListOptions{}
|
||||
|
||||
@@ -196,13 +196,13 @@ func newDeployments(deploymentName, namespace string, labels map[string]string,
|
||||
return deployment
|
||||
}
|
||||
|
||||
func newService(serviceName, namesapce string, labels map[string]string) *corev1.Service {
|
||||
func newService(serviceName, namespace string, labels map[string]string) *corev1.Service {
|
||||
labels["app"] = serviceName
|
||||
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: namesapce,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{
|
||||
"servicemesh.kubesphere.io/enabled": "true",
|
||||
|
||||
@@ -184,7 +184,7 @@ func NewClusterController(
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldCluster := oldObj.(*clusterv1alpha1.Cluster)
|
||||
newCluster := newObj.(*clusterv1alpha1.Cluster)
|
||||
if !reflect.DeepEqual(oldCluster.Spec, newCluster.Spec) {
|
||||
if !reflect.DeepEqual(oldCluster.Spec, newCluster.Spec) || newCluster.DeletionTimestamp != nil {
|
||||
c.enqueueCluster(newObj)
|
||||
}
|
||||
},
|
||||
@@ -297,10 +297,10 @@ func (c *clusterController) resyncClusters() error {
|
||||
}
|
||||
|
||||
for _, cluster := range clusters {
|
||||
if err = c.syncCluster(cluster.Name); err != nil {
|
||||
klog.Warningf("failed to sync cluster %s: %s", cluster.Name, err)
|
||||
}
|
||||
key, _ := cache.MetaNamespaceKeyFunc(cluster)
|
||||
c.queue.Add(key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -418,6 +418,15 @@ func (c *clusterController) syncCluster(key string) error {
|
||||
Message: "Cluster can not join federation control plane",
|
||||
}
|
||||
c.updateClusterCondition(cluster, federationNotReadyCondition)
|
||||
notReadyCondition := clusterv1alpha1.ClusterCondition{
|
||||
Type: clusterv1alpha1.ClusterReady,
|
||||
Status: v1.ConditionFalse,
|
||||
LastUpdateTime: metav1.Now(),
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: "Cluster join federation control plane failed",
|
||||
Message: "Cluster is Not Ready now",
|
||||
}
|
||||
c.updateClusterCondition(cluster, notReadyCondition)
|
||||
|
||||
_, err = c.ksClient.ClusterV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
|
||||
@@ -18,6 +18,7 @@ package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
@@ -268,7 +269,7 @@ func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface
|
||||
|
||||
klog.V(2).Infof("Creating service account in joining cluster: %s", joiningClusterName)
|
||||
|
||||
saName, err := createServiceAccount(joiningClusterClientset, namespace,
|
||||
saName, err := createServiceAccountWithSecret(joiningClusterClientset, namespace,
|
||||
joiningClusterName, hostClusterName, dryRun, errorOnExisting)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error creating service account: %s in joining cluster: %s due to: %v",
|
||||
@@ -320,31 +321,75 @@ func createAuthorizedServiceAccount(joiningClusterClientset kubeclient.Interface
|
||||
return saName, nil
|
||||
}
|
||||
|
||||
// createServiceAccount creates a service account in the cluster associated
|
||||
// createServiceAccountWithSecret creates a service account and secret in the cluster associated
|
||||
// with clusterClientset with credentials that will be used by the host cluster
|
||||
// to access its API server.
|
||||
func createServiceAccount(clusterClientset kubeclient.Interface, namespace,
|
||||
func createServiceAccountWithSecret(clusterClientset kubeclient.Interface, namespace,
|
||||
joiningClusterName, hostClusterName string, dryRun, errorOnExisting bool) (string, error) {
|
||||
saName := util.ClusterServiceAccountName(joiningClusterName, hostClusterName)
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
if dryRun {
|
||||
return saName, nil
|
||||
}
|
||||
|
||||
// Create a new service account.
|
||||
_, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Create(context.Background(), sa, metav1.CreateOptions{})
|
||||
switch {
|
||||
case apierrors.IsAlreadyExists(err) && errorOnExisting:
|
||||
klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName)
|
||||
ctx := context.Background()
|
||||
sa, err := clusterClientset.CoreV1().ServiceAccounts(namespace).Get(ctx, saName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
sa = &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: saName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
// We must create the sa first, then create the associated secret, and update the sa at last.
|
||||
// Or the kube-controller-manager will delete the secret.
|
||||
sa, err = clusterClientset.CoreV1().ServiceAccounts(namespace).Create(ctx, sa, metav1.CreateOptions{})
|
||||
switch {
|
||||
case apierrors.IsAlreadyExists(err) && errorOnExisting:
|
||||
klog.V(2).Infof("Service account %s/%s already exists in target cluster %s", namespace, saName, joiningClusterName)
|
||||
return "", err
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if len(sa.Secrets) > 0 {
|
||||
return saName, nil
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: fmt.Sprintf("%s-token-", saName),
|
||||
Namespace: namespace,
|
||||
Annotations: map[string]string{
|
||||
corev1.ServiceAccountNameKey: saName,
|
||||
},
|
||||
},
|
||||
Type: corev1.SecretTypeServiceAccountToken,
|
||||
}
|
||||
|
||||
// After kubernetes v1.24, kube-controller-manger will not create the default secret for
|
||||
// service account. http://kep.k8s.io/2800
|
||||
// Create a default secret.
|
||||
secret, err = clusterClientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
|
||||
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
klog.V(2).Infof("Could not create secret for service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
|
||||
return "", err
|
||||
case err != nil && !apierrors.IsAlreadyExists(err):
|
||||
klog.V(2).Infof("Could not create service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
|
||||
}
|
||||
|
||||
// At last, update the service account.
|
||||
sa.Secrets = append(sa.Secrets, corev1.ObjectReference{Name: secret.Name})
|
||||
_, err = clusterClientset.CoreV1().ServiceAccounts(namespace).Update(ctx, sa, metav1.UpdateOptions{})
|
||||
switch {
|
||||
case err != nil:
|
||||
klog.Infof("Could not update service account %s/%s in target cluster %s due to: %v", namespace, saName, joiningClusterName, err)
|
||||
return "", err
|
||||
default:
|
||||
return saName, nil
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -177,7 +176,7 @@ func (h *handler) PodLog(request *restful.Request, response *restful.Response) {
|
||||
}
|
||||
|
||||
fw := flushwriter.Wrap(response.ResponseWriter)
|
||||
err := h.gw.GetPodLogs(context.TODO(), podNamespace, podID, logOptions, fw)
|
||||
err := h.gw.GetPodLogs(request.Request.Context(), podNamespace, podID, logOptions, fw)
|
||||
if err != nil {
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
@@ -196,7 +195,7 @@ func (h *handler) PodLogSearch(request *restful.Request, response *restful.Respo
|
||||
api.HandleError(response, request, err)
|
||||
return
|
||||
}
|
||||
// ES log will be filted by pods and namespace by default.
|
||||
// ES log will be filtered by pods and namespace by default.
|
||||
pods, err := h.gw.GetPods(ns, &query.Query{})
|
||||
if err != nil {
|
||||
api.HandleError(response, request, err)
|
||||
|
||||
@@ -380,6 +380,7 @@ func (h *iamHandler) ListWorkspaceRoles(request *restful.Request, response *rest
|
||||
queryParam.Filters[iamv1alpha2.ScopeWorkspace] = query.Value(workspace)
|
||||
// shared workspace role template
|
||||
if string(queryParam.Filters[query.FieldLabel]) == fmt.Sprintf("%s=%s", iamv1alpha2.RoleTemplateLabel, "true") ||
|
||||
strings.Contains(queryParam.LabelSelector, iamv1alpha2.RoleTemplateLabel) ||
|
||||
queryParam.Filters[iamv1alpha2.AggregateTo] != "" {
|
||||
delete(queryParam.Filters, iamv1alpha2.ScopeWorkspace)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ type openpitrixHandler struct {
|
||||
openpitrix openpitrix.Interface
|
||||
}
|
||||
|
||||
func NewOpenpitrixClient(ksInformers informers.InformerFactory, ksClient versioned.Interface, option *openpitrixoptions.Options, cc clusterclient.ClusterClients, stopCh <-chan struct{}) openpitrix.Interface {
|
||||
func NewOpenpitrixClient(ksInformers informers.InformerFactory, ksClient versioned.Interface, option *openpitrixoptions.Options, cc clusterclient.ClusterClients) openpitrix.Interface {
|
||||
var s3Client s3.Interface
|
||||
if option != nil && option.S3Options != nil && len(option.S3Options.Endpoint) != 0 {
|
||||
var err error
|
||||
@@ -62,7 +62,7 @@ func NewOpenpitrixClient(ksInformers informers.InformerFactory, ksClient version
|
||||
}
|
||||
}
|
||||
|
||||
return openpitrix.NewOpenpitrixOperator(ksInformers, ksClient, s3Client, cc, stopCh)
|
||||
return openpitrix.NewOpenpitrixOperator(ksInformers, ksClient, s3Client, cc)
|
||||
}
|
||||
|
||||
func (h *openpitrixHandler) CreateRepo(req *restful.Request, resp *restful.Response) {
|
||||
@@ -753,7 +753,7 @@ func (h *openpitrixHandler) ListApplications(req *restful.Request, resp *restful
|
||||
return
|
||||
}
|
||||
|
||||
resp.WriteAsJson(result)
|
||||
resp.WriteEntity(result)
|
||||
}
|
||||
|
||||
func (h *openpitrixHandler) UpgradeApplication(req *restful.Request, resp *restful.Response) {
|
||||
|
||||
@@ -48,15 +48,17 @@ func NewHandler(o *servicemesh.Options, client kubernetes.Interface, cache cache
|
||||
if o != nil && o.KialiQueryHost != "" {
|
||||
sa, err := client.CoreV1().ServiceAccounts(KubesphereNamespace).Get(context.TODO(), KubeSphereServiceAccount, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
secret, err := client.CoreV1().Secrets(KubesphereNamespace).Get(context.TODO(), sa.Secrets[0].Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return &Handler{
|
||||
opt: o,
|
||||
client: kiali.NewDefaultClient(
|
||||
cache,
|
||||
string(secret.Data["token"]),
|
||||
o.KialiQueryHost,
|
||||
),
|
||||
if len(sa.Secrets) > 0 {
|
||||
secret, err := client.CoreV1().Secrets(KubesphereNamespace).Get(context.TODO(), sa.Secrets[0].Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
return &Handler{
|
||||
opt: o,
|
||||
client: kiali.NewDefaultClient(
|
||||
cache,
|
||||
string(secret.Data["token"]),
|
||||
o.KialiQueryHost,
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
klog.Warningf("get ServiceAccount's Secret failed %v", err)
|
||||
|
||||
142
pkg/kapis/servicemesh/metrics/v1alpha2/handler_test.go
Normal file
142
pkg/kapis/servicemesh/metrics/v1alpha2/handler_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/emicklei/go-restful"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
fakek8s "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/klog"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/kiali"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/servicemesh"
|
||||
"kubesphere.io/kubesphere/pkg/utils/reflectutils"
|
||||
)
|
||||
|
||||
func prepare() (*Handler, error) {
|
||||
var namespaceName = "kubesphere-system"
|
||||
var serviceAccountName = "kubesphere"
|
||||
var secretName = "kiali"
|
||||
clientset := fakek8s.NewSimpleClientset()
|
||||
|
||||
ctx := context.Background()
|
||||
namespacesClient := clientset.CoreV1().Namespaces()
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
},
|
||||
}
|
||||
_, err := namespacesClient.Create(ctx, ns, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("create namespace failed ")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
}
|
||||
|
||||
object := &corev1.ObjectReference{
|
||||
Name: secretName,
|
||||
}
|
||||
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Namespace: namespaceName,
|
||||
},
|
||||
Secrets: []corev1.ObjectReference{*object},
|
||||
}
|
||||
|
||||
serviceAccountClient := clientset.CoreV1().ServiceAccounts(namespaceName)
|
||||
|
||||
_, err = serviceAccountClient.Create(ctx, sa, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("create serviceAccount failed ")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
secretClient := clientset.CoreV1().Secrets(namespaceName)
|
||||
|
||||
_, err = secretClient.Create(ctx, secret, metav1.CreateOptions{})
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("create secret failed ")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// mock jaeger server
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
options := &servicemesh.Options{
|
||||
IstioPilotHost: "",
|
||||
KialiQueryHost: "",
|
||||
JaegerQueryHost: ts.URL,
|
||||
ServicemeshPrometheusHost: "",
|
||||
}
|
||||
handler := NewHandler(options, clientset, nil)
|
||||
|
||||
token, _ := json.Marshal(
|
||||
&kiali.TokenResponse{
|
||||
Username: "test",
|
||||
Token: "test",
|
||||
},
|
||||
)
|
||||
|
||||
mc := &kiali.MockClient{
|
||||
TokenResult: token,
|
||||
RequestResult: "fake",
|
||||
}
|
||||
|
||||
client := kiali.NewClient("token", nil, mc, "token", options.KialiQueryHost)
|
||||
|
||||
err = reflectutils.SetUnExportedField(handler, "client", client)
|
||||
if err != nil {
|
||||
klog.Errorf("apply mock client failed")
|
||||
return nil, err
|
||||
}
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func TestGetServiceTracing(t *testing.T) {
|
||||
handler, err := prepare()
|
||||
if err != nil {
|
||||
t.Fatalf("init handler failed")
|
||||
}
|
||||
|
||||
namespaceName := "namespace-test"
|
||||
serviceName := "service-test"
|
||||
url := fmt.Sprintf("/namespaces/%s/services/%s/traces", namespaceName, serviceName)
|
||||
request, _ := http.NewRequest("GET", url, nil)
|
||||
query := request.URL.Query()
|
||||
query.Add("start", "1650167872000000")
|
||||
query.Add("end", "1650211072000000")
|
||||
query.Add("limit", "10")
|
||||
request.URL.RawQuery = query.Encode()
|
||||
|
||||
restfulRequest := restful.NewRequest(request)
|
||||
pathMap := make(map[string]string)
|
||||
pathMap["namespace"] = namespaceName
|
||||
pathMap["service"] = serviceName
|
||||
if err := reflectutils.SetUnExportedField(restfulRequest, "pathParameters", pathMap); err != nil {
|
||||
t.Fatalf("set pathParameters failed")
|
||||
}
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
restfulResponse := restful.NewResponse(recorder)
|
||||
restfulResponse.SetRequestAccepts("application/json")
|
||||
handler.GetServiceTracing(restfulRequest, restfulResponse)
|
||||
if status := restfulResponse.StatusCode(); status != http.StatusOK {
|
||||
t.Errorf("handler returned wrong status code: got %v want %v", status, http.StatusOK)
|
||||
}
|
||||
}
|
||||
@@ -202,30 +202,40 @@ func (h *tenantHandler) CreateNamespace(request *restful.Request, response *rest
|
||||
response.WriteEntity(created)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) CreateWorkspaceTemplate(request *restful.Request, response *restful.Response) {
|
||||
func (h *tenantHandler) CreateWorkspaceTemplate(req *restful.Request, resp *restful.Response) {
|
||||
var workspace tenantv1alpha2.WorkspaceTemplate
|
||||
|
||||
err := request.ReadEntity(&workspace)
|
||||
err := req.ReadEntity(&workspace)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
requestUser, ok := request.UserFrom(req.Request.Context())
|
||||
if !ok {
|
||||
err := fmt.Errorf("cannot obtain user info")
|
||||
klog.Errorln(err)
|
||||
api.HandleForbidden(resp, req, err)
|
||||
}
|
||||
|
||||
created, err := h.tenant.CreateWorkspaceTemplate(&workspace)
|
||||
created, err := h.tenant.CreateWorkspaceTemplate(requestUser, &workspace)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if errors.IsNotFound(err) {
|
||||
api.HandleNotFound(response, request, err)
|
||||
api.HandleNotFound(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleBadRequest(response, request, err)
|
||||
if errors.IsForbidden(err) {
|
||||
api.HandleForbidden(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.WriteEntity(created)
|
||||
resp.WriteEntity(created)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) DeleteWorkspaceTemplate(request *restful.Request, response *restful.Response) {
|
||||
@@ -253,42 +263,53 @@ func (h *tenantHandler) DeleteWorkspaceTemplate(request *restful.Request, respon
|
||||
response.WriteEntity(servererr.None)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) UpdateWorkspaceTemplate(request *restful.Request, response *restful.Response) {
|
||||
workspaceName := request.PathParameter("workspace")
|
||||
func (h *tenantHandler) UpdateWorkspaceTemplate(req *restful.Request, resp *restful.Response) {
|
||||
workspaceName := req.PathParameter("workspace")
|
||||
var workspace tenantv1alpha2.WorkspaceTemplate
|
||||
|
||||
err := request.ReadEntity(&workspace)
|
||||
err := req.ReadEntity(&workspace)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
if workspaceName != workspace.Name {
|
||||
err := fmt.Errorf("the name of the object (%s) does not match the name on the URL (%s)", workspace.Name, workspaceName)
|
||||
klog.Errorf("%+v", err)
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
updated, err := h.tenant.UpdateWorkspaceTemplate(&workspace)
|
||||
requestUser, ok := request.UserFrom(req.Request.Context())
|
||||
if !ok {
|
||||
err := fmt.Errorf("cannot obtain user info")
|
||||
klog.Errorln(err)
|
||||
api.HandleForbidden(resp, req, err)
|
||||
}
|
||||
|
||||
updated, err := h.tenant.UpdateWorkspaceTemplate(requestUser, &workspace)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if errors.IsNotFound(err) {
|
||||
api.HandleNotFound(response, request, err)
|
||||
api.HandleNotFound(resp, req, err)
|
||||
return
|
||||
}
|
||||
if errors.IsBadRequest(err) {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleInternalError(response, request, err)
|
||||
if errors.IsForbidden(err) {
|
||||
api.HandleForbidden(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleInternalError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.WriteEntity(updated)
|
||||
resp.WriteEntity(updated)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) DescribeWorkspaceTemplate(request *restful.Request, response *restful.Response) {
|
||||
@@ -520,33 +541,44 @@ func (h *tenantHandler) PatchNamespace(request *restful.Request, response *restf
|
||||
response.WriteEntity(patched)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) PatchWorkspaceTemplate(request *restful.Request, response *restful.Response) {
|
||||
workspaceName := request.PathParameter("workspace")
|
||||
func (h *tenantHandler) PatchWorkspaceTemplate(req *restful.Request, resp *restful.Response) {
|
||||
workspaceName := req.PathParameter("workspace")
|
||||
var data json.RawMessage
|
||||
err := request.ReadEntity(&data)
|
||||
err := req.ReadEntity(&data)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
patched, err := h.tenant.PatchWorkspaceTemplate(workspaceName, data)
|
||||
requestUser, ok := request.UserFrom(req.Request.Context())
|
||||
if !ok {
|
||||
err := fmt.Errorf("cannot obtain user info")
|
||||
klog.Errorln(err)
|
||||
api.HandleForbidden(resp, req, err)
|
||||
}
|
||||
|
||||
patched, err := h.tenant.PatchWorkspaceTemplate(requestUser, workspaceName, data)
|
||||
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
if errors.IsNotFound(err) {
|
||||
api.HandleNotFound(response, request, err)
|
||||
api.HandleNotFound(resp, req, err)
|
||||
return
|
||||
}
|
||||
if errors.IsBadRequest(err) {
|
||||
api.HandleBadRequest(response, request, err)
|
||||
api.HandleBadRequest(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleInternalError(response, request, err)
|
||||
if errors.IsNotFound(err) {
|
||||
api.HandleForbidden(resp, req, err)
|
||||
return
|
||||
}
|
||||
api.HandleInternalError(resp, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
response.WriteEntity(patched)
|
||||
resp.WriteEntity(patched)
|
||||
}
|
||||
|
||||
func (h *tenantHandler) ListClusters(r *restful.Request, response *restful.Response) {
|
||||
|
||||
@@ -47,12 +47,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
MasterLabel = "node-role.kubernetes.io/master"
|
||||
SidecarInject = "sidecar.istio.io/inject"
|
||||
gatewayPrefix = "kubesphere-router-"
|
||||
workingNamespace = "kubesphere-controls-system"
|
||||
globalGatewayname = gatewayPrefix + "kubesphere-system"
|
||||
helmPatch = `{"metadata":{"annotations":{"meta.helm.sh/release-name":"%s-ingress","meta.helm.sh/release-namespace":"%s"},"labels":{"helm.sh/chart":"ingress-nginx-3.35.0","app.kubernetes.io/managed-by":"Helm","app":null,"component":null,"tier":null}},"spec":{"selector":null}}`
|
||||
MasterLabel = "node-role.kubernetes.io/master"
|
||||
SidecarInject = "sidecar.istio.io/inject"
|
||||
gatewayPrefix = "kubesphere-router-"
|
||||
workingNamespace = "kubesphere-controls-system"
|
||||
globalGatewayNameSuffix = "kubesphere-system"
|
||||
globalGatewayName = gatewayPrefix + globalGatewayNameSuffix
|
||||
helmPatch = `{"metadata":{"annotations":{"meta.helm.sh/release-name":"%s-ingress","meta.helm.sh/release-namespace":"%s"},"labels":{"helm.sh/chart":"ingress-nginx-3.35.0","app.kubernetes.io/managed-by":"Helm","app":null,"component":null,"tier":null}},"spec":{"selector":null}}`
|
||||
)
|
||||
|
||||
type GatewayOperator interface {
|
||||
@@ -62,7 +63,7 @@ type GatewayOperator interface {
|
||||
UpdateGateway(namespace string, obj *v1alpha1.Gateway) (*v1alpha1.Gateway, error)
|
||||
UpgradeGateway(namespace string) (*v1alpha1.Gateway, error)
|
||||
ListGateways(query *query.Query) (*api.ListResult, error)
|
||||
GetPods(namesapce string, query *query.Query) (*api.ListResult, error)
|
||||
GetPods(namespace string, query *query.Query) (*api.ListResult, error)
|
||||
GetPodLogs(ctx context.Context, namespace string, podName string, logOptions *corev1.PodLogOptions, responseWriter io.Writer) error
|
||||
}
|
||||
|
||||
@@ -86,10 +87,14 @@ func NewGatewayOperator(client client.Client, cache cache.Cache, options *gatewa
|
||||
|
||||
func (c *gatewayOperator) getWorkingNamespace(namespace string) string {
|
||||
ns := c.options.Namespace
|
||||
// Set the working namespace to watching namespace when the Gatway's Namsapce Option is empty
|
||||
// Set the working namespace to watching namespace when the Gateway's Namespace Option is empty
|
||||
if ns == "" {
|
||||
ns = namespace
|
||||
}
|
||||
// Convert the global gateway query parameter
|
||||
if namespace == globalGatewayNameSuffix {
|
||||
ns = workingNamespace
|
||||
}
|
||||
return ns
|
||||
}
|
||||
|
||||
@@ -97,7 +102,7 @@ func (c *gatewayOperator) getWorkingNamespace(namespace string) string {
|
||||
func (c *gatewayOperator) overrideDefaultValue(gateway *v1alpha1.Gateway, namespace string) *v1alpha1.Gateway {
|
||||
// override default name
|
||||
gateway.Name = fmt.Sprint(gatewayPrefix, namespace)
|
||||
if gateway.Name != globalGatewayname {
|
||||
if gateway.Name != globalGatewayName {
|
||||
gateway.Spec.Controller.Scope = v1alpha1.Scope{Enabled: true, Namespace: namespace}
|
||||
}
|
||||
gateway.Namespace = c.getWorkingNamespace(namespace)
|
||||
@@ -108,7 +113,7 @@ func (c *gatewayOperator) overrideDefaultValue(gateway *v1alpha1.Gateway, namesp
|
||||
func (c *gatewayOperator) getGlobalGateway() *v1alpha1.Gateway {
|
||||
globalkey := types.NamespacedName{
|
||||
Namespace: workingNamespace,
|
||||
Name: globalGatewayname,
|
||||
Name: globalGatewayName,
|
||||
}
|
||||
|
||||
global := &v1alpha1.Gateway{}
|
||||
@@ -317,7 +322,7 @@ func (c *gatewayOperator) DeleteGateway(namespace string) error {
|
||||
// Update Gateway
|
||||
func (c *gatewayOperator) UpdateGateway(namespace string, obj *v1alpha1.Gateway) (*v1alpha1.Gateway, error) {
|
||||
if c.options.Namespace == "" && obj.Namespace != namespace || c.options.Namespace != "" && c.options.Namespace != obj.Namespace {
|
||||
return nil, fmt.Errorf("namepsace doesn't match with origin namesapce")
|
||||
return nil, fmt.Errorf("namespace doesn't match with origin namespace")
|
||||
}
|
||||
c.overrideDefaultValue(obj, namespace)
|
||||
err := c.client.Update(context.TODO(), obj)
|
||||
@@ -331,7 +336,7 @@ func (c *gatewayOperator) UpgradeGateway(namespace string) (*v1alpha1.Gateway, e
|
||||
if l == nil {
|
||||
return nil, fmt.Errorf("invalid operation, no legacy gateway was found")
|
||||
}
|
||||
if l.Namespace != c.options.Namespace {
|
||||
if l.Namespace != c.getWorkingNamespace(namespace) {
|
||||
return nil, fmt.Errorf("invalid operation, can't upgrade legacy gateway when working namespace changed")
|
||||
}
|
||||
|
||||
@@ -345,7 +350,7 @@ func (c *gatewayOperator) UpgradeGateway(namespace string) (*v1alpha1.Gateway, e
|
||||
}()
|
||||
}
|
||||
|
||||
// Delete old deployment, because it's not compatile with the deployment in the helm chart.
|
||||
// Delete old deployment, because it's not compatible with the deployment in the helm chart.
|
||||
// We can't defer here, there's a potential race condition causing gateway operator fails.
|
||||
d := &appsv1.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
@@ -451,7 +456,7 @@ func (c *gatewayOperator) compare(left runtime.Object, right runtime.Object, fie
|
||||
|
||||
func (c *gatewayOperator) filter(object runtime.Object, filter query.Filter) bool {
|
||||
var objMeta v1.ObjectMeta
|
||||
var namesapce string
|
||||
var namespace string
|
||||
|
||||
gateway, ok := object.(*v1alpha1.Gateway)
|
||||
if !ok {
|
||||
@@ -459,31 +464,31 @@ func (c *gatewayOperator) filter(object runtime.Object, filter query.Filter) boo
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
namesapce = svc.Labels["project"]
|
||||
namespace = svc.Labels["project"]
|
||||
objMeta = svc.ObjectMeta
|
||||
} else {
|
||||
namesapce = gateway.Spec.Controller.Scope.Namespace
|
||||
namespace = gateway.Spec.Controller.Scope.Namespace
|
||||
objMeta = gateway.ObjectMeta
|
||||
}
|
||||
|
||||
switch filter.Field {
|
||||
case query.FieldNamespace:
|
||||
return strings.Compare(namesapce, string(filter.Value)) == 0
|
||||
return strings.Compare(namespace, string(filter.Value)) == 0
|
||||
default:
|
||||
return v1alpha3.DefaultObjectMetaFilter(objMeta, filter)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) GetPods(namesapce string, query *query.Query) (*api.ListResult, error) {
|
||||
func (c *gatewayOperator) GetPods(namespace string, query *query.Query) (*api.ListResult, error) {
|
||||
podGetter := pod.New(c.factory.KubernetesSharedInformerFactory())
|
||||
|
||||
//TODO: move the selector string to options
|
||||
selector, err := labels.Parse(fmt.Sprintf("app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/instance=kubesphere-router-%s-ingress", namesapce))
|
||||
selector, err := labels.Parse(fmt.Sprintf("app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/instance=kubesphere-router-%s-ingress", namespace))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invild selector config")
|
||||
return nil, fmt.Errorf("invaild selector config")
|
||||
}
|
||||
query.LabelSelector = selector.String()
|
||||
return podGetter.List(c.getWorkingNamespace(namesapce), query)
|
||||
return podGetter.List(c.getWorkingNamespace(namespace), query)
|
||||
}
|
||||
|
||||
func (c *gatewayOperator) GetPodLogs(ctx context.Context, namespace string, podName string, logOptions *corev1.PodLogOptions, responseWriter io.Writer) error {
|
||||
|
||||
@@ -92,7 +92,7 @@ func Test_gatewayOperator_GetGateways(t *testing.T) {
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "projct1",
|
||||
namespace: "project1",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -105,7 +105,7 @@ func Test_gatewayOperator_GetGateways(t *testing.T) {
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "projct1",
|
||||
namespace: "project1",
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -336,7 +336,7 @@ func Test_gatewayOperator_CreateGateway(t *testing.T) {
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "projct1",
|
||||
namespace: "project1",
|
||||
obj: &v1alpha1.Gateway{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Gateway",
|
||||
@@ -346,7 +346,7 @@ func Test_gatewayOperator_CreateGateway(t *testing.T) {
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "projct1",
|
||||
Namespace: "project1",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -367,7 +367,7 @@ func Test_gatewayOperator_CreateGateway(t *testing.T) {
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "projct2",
|
||||
namespace: "project2",
|
||||
obj: &v1alpha1.Gateway{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
Kind: "Gateway",
|
||||
@@ -377,7 +377,7 @@ func Test_gatewayOperator_CreateGateway(t *testing.T) {
|
||||
Controller: v1alpha1.ControllerSpec{
|
||||
Scope: v1alpha1.Scope{
|
||||
Enabled: true,
|
||||
Namespace: "projct2",
|
||||
Namespace: "project2",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -593,7 +593,7 @@ func Test_gatewayOperator_UpgradeGateway(t *testing.T) {
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
namespace: "projct1",
|
||||
namespace: "project1",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
|
||||
@@ -220,7 +220,7 @@ func (o *operator) createCSR(username string) error {
|
||||
}
|
||||
|
||||
var csrBuffer, keyBuffer bytes.Buffer
|
||||
if err = pem.Encode(&keyBuffer, &pem.Block{Type: "PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(x509key)}); err != nil {
|
||||
if err = pem.Encode(&keyBuffer, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(x509key)}); err != nil {
|
||||
klog.Errorln(err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ type openpitrixOperator struct {
|
||||
CategoryInterface
|
||||
}
|
||||
|
||||
func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient versioned.Interface, s3Client s3.Interface, cc clusterclient.ClusterClients, stopCh <-chan struct{}) Interface {
|
||||
func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient versioned.Interface, s3Client s3.Interface, cc clusterclient.ClusterClients) Interface {
|
||||
klog.Infof("start helm repo informer")
|
||||
cachedReposData := reposcache.NewReposCache()
|
||||
helmReposInformer := ksInformers.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmRepos().Informer()
|
||||
|
||||
@@ -302,7 +302,7 @@ func (c *repoOperator) ListRepos(conditions *params.Conditions, orderBy string,
|
||||
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
|
||||
repos = repos[start:end]
|
||||
items := make([]interface{}, 0, len(repos))
|
||||
for i, j := offset, 0; i < len(repos) && j < limit; i, j = i+1, j+1 {
|
||||
for i := range repos {
|
||||
items = append(items, convertRepo(repos[i]))
|
||||
}
|
||||
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil
|
||||
|
||||
@@ -713,7 +713,7 @@ type Repo struct {
|
||||
// selectors
|
||||
Selectors RepoSelectors `json:"selectors"`
|
||||
|
||||
// status eg.[active|deleted]
|
||||
// status eg.[successful|failed|syncing]
|
||||
Status string `json:"status,omitempty"`
|
||||
|
||||
// record status changed time
|
||||
|
||||
@@ -399,6 +399,7 @@ func convertAppVersion(in *v1alpha1.HelmApplicationVersion) *AppVersion {
|
||||
if in.Spec.Metadata != nil {
|
||||
out.Description = in.Spec.Description
|
||||
out.Icon = in.Spec.Icon
|
||||
out.Home = in.Spec.Home
|
||||
}
|
||||
|
||||
// The field Maintainers and Sources were a string field, so I encode the helm field's maintainers and sources,
|
||||
@@ -431,6 +432,10 @@ func convertRepo(in *v1alpha1.HelmRepo) *Repo {
|
||||
out.Name = in.GetTrueName()
|
||||
|
||||
out.Status = in.Status.State
|
||||
// set default status `syncing` when helmrepo not reconcile yet
|
||||
if out.Status == "" {
|
||||
out.Status = v1alpha1.RepoStateSyncing
|
||||
}
|
||||
date := strfmt.DateTime(time.Unix(in.CreationTimestamp.Unix(), 0))
|
||||
out.CreateTime = &date
|
||||
|
||||
|
||||
@@ -17,6 +17,9 @@ limitations under the License.
|
||||
package pod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -31,7 +34,13 @@ const (
|
||||
fieldNodeName = "nodeName"
|
||||
fieldPVCName = "pvcName"
|
||||
fieldServiceName = "serviceName"
|
||||
fieldPhase = "phase"
|
||||
fieldStatus = "status"
|
||||
|
||||
statusTypeWaitting = "Waiting"
|
||||
statusTypeRunning = "Running"
|
||||
statusTypeError = "Error"
|
||||
statusTypeCompleted = "Completed"
|
||||
)
|
||||
|
||||
type podsGetter struct {
|
||||
@@ -90,6 +99,9 @@ func (p *podsGetter) filter(object runtime.Object, filter query.Filter) bool {
|
||||
case fieldServiceName:
|
||||
return p.podBelongToService(pod, string(filter.Value))
|
||||
case fieldStatus:
|
||||
_, statusType := p.getPodStatus(pod)
|
||||
return statusType == string(filter.Value)
|
||||
case fieldPhase:
|
||||
return string(pod.Status.Phase) == string(filter.Value)
|
||||
default:
|
||||
return v1alpha3.DefaultObjectMetaFilter(pod.ObjectMeta, filter)
|
||||
@@ -117,3 +129,133 @@ func (p *podsGetter) podBelongToService(item *corev1.Pod, serviceName string) bo
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getPodStatus refer to `kubectl get po` result.
|
||||
// https://github.com/kubernetes/kubernetes/blob/45279654db87f4908911569c07afc42804f0e246/pkg/printers/internalversion/printers.go#L820-920
|
||||
// podStatusPhase = []string("Pending", "Running","Succeeded","Failed","Unknown")
|
||||
// podStatusReasons = []string{"Evicted", "NodeAffinity", "NodeLost", "Shutdown", "UnexpectedAdmissionError"}
|
||||
// containerWaitingReasons = []string{"ContainerCreating", "CrashLoopBackOff", "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", "CreateContainerError", "InvalidImageName"}
|
||||
// containerTerminatedReasons = []string{"OOMKilled", "Completed", "Error", "ContainerCannotRun", "DeadlineExceeded", "Evicted"}
|
||||
func (p *podsGetter) getPodStatus(pod *corev1.Pod) (string, string) {
|
||||
reason := string(pod.Status.Phase)
|
||||
|
||||
if pod.Status.Reason != "" {
|
||||
reason = pod.Status.Reason
|
||||
}
|
||||
|
||||
/*
|
||||
todo: upgrade k8s.io/api version
|
||||
|
||||
// If the Pod carries {type:PodScheduled, reason:WaitingForGates}, set reason to 'SchedulingGated'.
|
||||
for _, condition := range pod.Status.Conditions {
|
||||
if condition.Type == corev1.PodScheduled && condition.Reason == corev1.PodReasonSchedulingGated {
|
||||
reason = corev1.PodReasonSchedulingGated
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
initializing := false
|
||||
for i := range pod.Status.InitContainerStatuses {
|
||||
container := pod.Status.InitContainerStatuses[i]
|
||||
|
||||
switch {
|
||||
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
|
||||
continue
|
||||
case container.State.Terminated != nil:
|
||||
// initialization is failed
|
||||
if len(container.State.Terminated.Reason) == 0 {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
} else {
|
||||
reason = "Init:" + container.State.Terminated.Reason
|
||||
}
|
||||
initializing = true
|
||||
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
|
||||
reason = "Init:" + container.State.Waiting.Reason
|
||||
initializing = true
|
||||
default:
|
||||
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
|
||||
initializing = true
|
||||
}
|
||||
break
|
||||
}
|
||||
if !initializing {
|
||||
|
||||
hasRunning := false
|
||||
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
|
||||
container := pod.Status.ContainerStatuses[i]
|
||||
|
||||
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
|
||||
reason = container.State.Waiting.Reason
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
|
||||
reason = container.State.Terminated.Reason
|
||||
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
|
||||
if container.State.Terminated.Signal != 0 {
|
||||
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
|
||||
} else {
|
||||
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
|
||||
}
|
||||
} else if container.Ready && container.State.Running != nil {
|
||||
hasRunning = true
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
|
||||
if reason == "Completed" && hasRunning {
|
||||
if hasPodReadyCondition(pod.Status.Conditions) {
|
||||
reason = "Running"
|
||||
} else {
|
||||
reason = "NotReady"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
|
||||
reason = "Unknown"
|
||||
} else if pod.DeletionTimestamp != nil {
|
||||
reason = "Terminating"
|
||||
}
|
||||
|
||||
statusType := statusTypeWaitting
|
||||
switch reason {
|
||||
case "Running":
|
||||
statusType = statusTypeRunning
|
||||
case "Failed":
|
||||
statusType = statusTypeError
|
||||
case "Error":
|
||||
statusType = statusTypeError
|
||||
case "Completed":
|
||||
statusType = statusTypeCompleted
|
||||
case "Succeeded":
|
||||
if isPodReadyConditionReason(pod.Status.Conditions, "PodCompleted") {
|
||||
statusType = statusTypeCompleted
|
||||
}
|
||||
default:
|
||||
if strings.HasPrefix(reason, "OutOf") {
|
||||
statusType = statusTypeError
|
||||
}
|
||||
}
|
||||
return reason, statusType
|
||||
}
|
||||
|
||||
func hasPodReadyCondition(conditions []corev1.PodCondition) bool {
|
||||
for _, condition := range conditions {
|
||||
if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isPodReadyConditionReason(conditions []corev1.PodCondition, reason string) bool {
|
||||
for _, condition := range conditions {
|
||||
if condition.Type == corev1.PodReady && condition.Reason != reason {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestListPods(t *testing.T) {
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"test status filter",
|
||||
"test phase filter",
|
||||
"default",
|
||||
&query.Query{
|
||||
Pagination: &query.Pagination{
|
||||
@@ -89,7 +89,7 @@ func TestListPods(t *testing.T) {
|
||||
Ascending: false,
|
||||
Filters: map[query.Field]query.Value{
|
||||
query.FieldNamespace: query.Value("default"),
|
||||
fieldStatus: query.Value(corev1.PodRunning),
|
||||
fieldPhase: query.Value(corev1.PodRunning),
|
||||
},
|
||||
},
|
||||
&api.ListResult{
|
||||
@@ -163,6 +163,7 @@ var (
|
||||
Phase: corev1.PodRunning,
|
||||
},
|
||||
}
|
||||
|
||||
pods = []interface{}{foo1, foo2, foo3, foo4, foo5}
|
||||
)
|
||||
|
||||
|
||||
@@ -24,7 +24,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -69,6 +71,8 @@ import (
|
||||
loggingclient "kubesphere.io/kubesphere/pkg/simple/client/logging"
|
||||
meteringclient "kubesphere.io/kubesphere/pkg/simple/client/metering"
|
||||
monitoringclient "kubesphere.io/kubesphere/pkg/simple/client/monitoring"
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
jsonpatchutil "kubesphere.io/kubesphere/pkg/utils/josnpatchutil"
|
||||
"kubesphere.io/kubesphere/pkg/utils/stringutils"
|
||||
)
|
||||
|
||||
@@ -78,10 +82,10 @@ type Interface interface {
|
||||
ListWorkspaces(user user.Info, queryParam *query.Query) (*api.ListResult, error)
|
||||
GetWorkspace(workspace string) (*tenantv1alpha1.Workspace, error)
|
||||
ListWorkspaceTemplates(user user.Info, query *query.Query) (*api.ListResult, error)
|
||||
CreateWorkspaceTemplate(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
CreateWorkspaceTemplate(user user.Info, workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
DeleteWorkspaceTemplate(workspace string, opts metav1.DeleteOptions) error
|
||||
UpdateWorkspaceTemplate(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
PatchWorkspaceTemplate(workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
UpdateWorkspaceTemplate(user user.Info, workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
PatchWorkspaceTemplate(user user.Info, workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
DescribeWorkspaceTemplate(workspace string) (*tenantv1alpha2.WorkspaceTemplate, error)
|
||||
ListNamespaces(user user.Info, workspace string, query *query.Query) (*api.ListResult, error)
|
||||
ListDevOpsProjects(user user.Info, workspace string, query *query.Query) (*api.ListResult, error)
|
||||
@@ -117,6 +121,7 @@ type tenantOperator struct {
|
||||
auditing auditing.Interface
|
||||
mo monitoring.MonitoringOperator
|
||||
opRelease openpitrix.ReleaseInterface
|
||||
clusterClient clusterclient.ClusterClients
|
||||
}
|
||||
|
||||
func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ksclient kubesphere.Interface, evtsClient eventsclient.Client, loggingClient loggingclient.Client, auditingclient auditingclient.Client, am am.AccessManagementInterface, im im.IdentityManagementInterface, authorizer authorizer.Authorizer, monitoringclient monitoringclient.Interface, resourceGetter *resourcev1alpha3.ResourceGetter, opClient openpitrix.Interface) Interface {
|
||||
@@ -132,6 +137,7 @@ func New(informers informers.InformerFactory, k8sclient kubernetes.Interface, ks
|
||||
auditing: auditing.NewEventsOperator(auditingclient),
|
||||
mo: monitoring.NewMonitoringOperator(monitoringclient, nil, k8sclient, informers, resourceGetter, nil),
|
||||
opRelease: opClient,
|
||||
clusterClient: clusterclient.NewClusterClient(informers.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Clusters()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -470,15 +476,111 @@ func (t *tenantOperator) PatchNamespace(workspace string, namespace *corev1.Name
|
||||
return t.k8sclient.CoreV1().Namespaces().Patch(context.Background(), namespace.Name, types.MergePatchType, data, metav1.PatchOptions{})
|
||||
}
|
||||
|
||||
func (t *tenantOperator) PatchWorkspaceTemplate(workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error) {
|
||||
return t.ksclient.TenantV1alpha2().WorkspaceTemplates().Patch(context.Background(), workspace, types.MergePatchType, data, metav1.PatchOptions{})
|
||||
func (t *tenantOperator) PatchWorkspaceTemplate(user user.Info, workspace string, data json.RawMessage) (*tenantv1alpha2.WorkspaceTemplate, error) {
|
||||
var manageWorkspaceTemplateRequest bool
|
||||
clusterNames := sets.NewString()
|
||||
|
||||
patchs, err := jsonpatchutil.Parse(data)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(patchs) > 0 {
|
||||
for _, patch := range patchs {
|
||||
path, err := patch.Path()
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the request path is cluster, just collecting cluster name to set and continue to check cluster permission later.
|
||||
// Or indicate that want to manage the workspace templates, so check if user has the permission to manage workspace templates.
|
||||
if strings.HasPrefix(path, "/spec/placement") {
|
||||
if patch.Kind() != "add" && patch.Kind() != "remove" {
|
||||
err := errors.NewBadRequest("not support operation type")
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
clusterValue := make(map[string]interface{})
|
||||
err := jsonpatchutil.GetValue(patch, &clusterValue)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the placement is empty, the first patch need fill with "clusters" field.
|
||||
if cName := clusterValue["name"]; cName != nil {
|
||||
cn, ok := cName.(string)
|
||||
if ok {
|
||||
clusterNames.Insert(cn)
|
||||
}
|
||||
} else if cluster := clusterValue["clusters"]; cluster != nil {
|
||||
clusterRefrences := []typesv1beta1.GenericClusterReference{}
|
||||
err := mapstructure.Decode(cluster, &clusterRefrences)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
for _, v := range clusterRefrences {
|
||||
clusterNames.Insert(v.Name)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
manageWorkspaceTemplateRequest = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if manageWorkspaceTemplateRequest {
|
||||
err := t.checkWorkspaceTemplatePermission(user, workspace)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if clusterNames.Len() > 0 {
|
||||
err := t.checkClusterPermission(user, clusterNames.List())
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return t.ksclient.TenantV1alpha2().WorkspaceTemplates().Patch(context.Background(), workspace, types.JSONPatchType, data, metav1.PatchOptions{})
|
||||
}
|
||||
|
||||
func (t *tenantOperator) CreateWorkspaceTemplate(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error) {
|
||||
func (t *tenantOperator) CreateWorkspaceTemplate(user user.Info, workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error) {
|
||||
if len(workspace.Spec.Placement.Clusters) != 0 {
|
||||
clusters := make([]string, 0)
|
||||
for _, v := range workspace.Spec.Placement.Clusters {
|
||||
clusters = append(clusters, v.Name)
|
||||
}
|
||||
err := t.checkClusterPermission(user, clusters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return t.ksclient.TenantV1alpha2().WorkspaceTemplates().Create(context.Background(), workspace, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (t *tenantOperator) UpdateWorkspaceTemplate(workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error) {
|
||||
func (t *tenantOperator) UpdateWorkspaceTemplate(user user.Info, workspace *tenantv1alpha2.WorkspaceTemplate) (*tenantv1alpha2.WorkspaceTemplate, error) {
|
||||
if len(workspace.Spec.Placement.Clusters) != 0 {
|
||||
clusters := make([]string, 0)
|
||||
for _, v := range workspace.Spec.Placement.Clusters {
|
||||
clusters = append(clusters, v.Name)
|
||||
}
|
||||
err := t.checkClusterPermission(user, clusters)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return t.ksclient.TenantV1alpha2().WorkspaceTemplates().Update(context.Background(), workspace, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
@@ -1081,6 +1183,16 @@ func (t *tenantOperator) MeteringHierarchy(user user.Info, queryParam *meteringv
|
||||
return resourceStats, nil
|
||||
}
|
||||
|
||||
func (t *tenantOperator) getClusterRoleBindingsByUser(clusterName, user string) (*rbacv1.ClusterRoleBindingList, error) {
|
||||
kubernetesClientSet, err := t.clusterClient.GetKubernetesClientSet(clusterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubernetesClientSet.RbacV1().ClusterRoleBindings().
|
||||
List(context.Background(),
|
||||
metav1.ListOptions{LabelSelector: labels.FormatLabels(map[string]string{"iam.kubesphere.io/user-ref": user})})
|
||||
}
|
||||
|
||||
func contains(objects []runtime.Object, object runtime.Object) bool {
|
||||
for _, item := range objects {
|
||||
if item == object {
|
||||
@@ -1106,3 +1218,78 @@ func stringContains(str string, subStrs []string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *tenantOperator) checkWorkspaceTemplatePermission(user user.Info, workspace string) error {
|
||||
deleteWST := authorizer.AttributesRecord{
|
||||
User: user,
|
||||
Verb: authorizer.VerbDelete,
|
||||
APIGroup: tenantv1alpha2.SchemeGroupVersion.Group,
|
||||
APIVersion: tenantv1alpha2.SchemeGroupVersion.Version,
|
||||
Resource: tenantv1alpha2.ResourcePluralWorkspaceTemplate,
|
||||
ResourceRequest: true,
|
||||
ResourceScope: request.GlobalScope,
|
||||
}
|
||||
authorize, reason, err := t.authorizer.Authorize(deleteWST)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if authorize != authorizer.DecisionAllow {
|
||||
return errors.NewForbidden(tenantv1alpha2.Resource(tenantv1alpha2.ResourcePluralWorkspaceTemplate), workspace, fmt.Errorf(reason))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantOperator) checkClusterPermission(user user.Info, clusters []string) error {
|
||||
// Checking whether the user can manage the cluster requires authentication from two aspects.
|
||||
// First check whether the user has relevant global permissions,
|
||||
// and then check whether the user has relevant cluster permissions in the target cluster
|
||||
|
||||
for _, clusterName := range clusters {
|
||||
|
||||
cluster, err := t.ksclient.ClusterV1alpha1().Clusters().Get(context.Background(), clusterName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cluster.Labels["cluster.kubesphere.io/visibility"] == "public" {
|
||||
continue
|
||||
}
|
||||
|
||||
deleteCluster := authorizer.AttributesRecord{
|
||||
User: user,
|
||||
Verb: authorizer.VerbDelete,
|
||||
APIGroup: clusterv1alpha1.SchemeGroupVersion.Group,
|
||||
APIVersion: clusterv1alpha1.SchemeGroupVersion.Version,
|
||||
Resource: clusterv1alpha1.ResourcesPluralCluster,
|
||||
Cluster: clusterName,
|
||||
ResourceRequest: true,
|
||||
ResourceScope: request.GlobalScope,
|
||||
}
|
||||
authorize, _, err := t.authorizer.Authorize(deleteCluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if authorize == authorizer.DecisionAllow {
|
||||
continue
|
||||
}
|
||||
|
||||
list, err := t.getClusterRoleBindingsByUser(clusterName, user.GetName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allowed := false
|
||||
for _, clusterRolebinding := range list.Items {
|
||||
if clusterRolebinding.RoleRef.Name == iamv1alpha2.ClusterAdmin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
return errors.NewForbidden(clusterv1alpha1.Resource(clusterv1alpha1.ResourcesPluralCluster), clusterName, fmt.Errorf("user is not allowed to use the cluster %s", clusterName))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -44,6 +44,8 @@ import (
|
||||
const (
|
||||
// Time allowed to write a message to the peer.
|
||||
writeWait = 10 * time.Second
|
||||
// ctrl+d to close terminal.
|
||||
endOfTransmission = "\u0004"
|
||||
)
|
||||
|
||||
// PtyHandler is what remotecommand expects from a pty
|
||||
@@ -76,7 +78,7 @@ type TerminalMessage struct {
|
||||
Rows, Cols uint16
|
||||
}
|
||||
|
||||
// TerminalSize handles pty->process resize events
|
||||
// Next handles pty->process resize events
|
||||
// Called in a loop from remotecommand as long as the process is running
|
||||
func (t TerminalSession) Next() *remotecommand.TerminalSize {
|
||||
select {
|
||||
@@ -95,7 +97,7 @@ func (t TerminalSession) Read(p []byte) (int, error) {
|
||||
var msg TerminalMessage
|
||||
err := t.conn.ReadJSON(&msg)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return copy(p, endOfTransmission), err
|
||||
}
|
||||
|
||||
switch msg.Op {
|
||||
@@ -105,7 +107,7 @@ func (t TerminalSession) Read(p []byte) (int, error) {
|
||||
t.sizeChan <- remotecommand.TerminalSize{Width: msg.Cols, Height: msg.Rows}
|
||||
return 0, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("unknown message type '%s'", msg.Op)
|
||||
return copy(p, endOfTransmission), fmt.Errorf("unknown message type '%s'", msg.Op)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,7 +217,7 @@ func (n *NodeTerminaler) getNSEnterPod() (*v1.Pod, error) {
|
||||
pod, err := n.client.CoreV1().Pods(n.Namespace).Get(context.Background(), n.PodName, metav1.GetOptions{})
|
||||
|
||||
if err != nil || (pod.Status.Phase != v1.PodRunning && pod.Status.Phase != v1.PodPending) {
|
||||
//pod has timed out, but has not been cleaned up
|
||||
// pod has timed out, but has not been cleaned up
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
err := n.client.CoreV1().Pods(n.Namespace).Delete(context.Background(), n.PodName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
@@ -328,7 +330,7 @@ func isValidShell(validShells []string, shell string) bool {
|
||||
|
||||
func (t *terminaler) HandleSession(shell, namespace, podName, containerName string, conn *websocket.Conn) {
|
||||
var err error
|
||||
validShells := []string{"sh", "bash"}
|
||||
validShells := []string{"bash", "sh"}
|
||||
|
||||
session := &TerminalSession{conn: conn, sizeChan: make(chan remotecommand.TerminalSize)}
|
||||
|
||||
|
||||
41
pkg/simple/client/cache/cache.go
vendored
41
pkg/simple/client/cache/cache.go
vendored
@@ -16,7 +16,17 @@ limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
var (
|
||||
cacheFactories = make(map[string]CacheFactory)
|
||||
)
|
||||
|
||||
var NeverExpire = time.Duration(0)
|
||||
|
||||
@@ -39,3 +49,32 @@ type Interface interface {
|
||||
// Expires updates object's expiration time, return err if key doesn't exist
|
||||
Expire(key string, duration time.Duration) error
|
||||
}
|
||||
|
||||
// DynamicOptions the options of the cache. For redis, options key can be "host", "port", "db", "password".
|
||||
// For InMemoryCache, options key can be "cleanupperiod"
|
||||
type DynamicOptions map[string]interface{}
|
||||
|
||||
func (o DynamicOptions) MarshalJSON() ([]byte, error) {
|
||||
|
||||
data, err := json.Marshal(o)
|
||||
return data, err
|
||||
}
|
||||
|
||||
func RegisterCacheFactory(factory CacheFactory) {
|
||||
cacheFactories[factory.Type()] = factory
|
||||
}
|
||||
|
||||
func New(option *Options, stopCh <-chan struct{}) (Interface, error) {
|
||||
if cacheFactories[option.Type] == nil {
|
||||
err := fmt.Errorf("cache with type %s is not supported", option.Type)
|
||||
klog.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cache, err := cacheFactories[option.Type].Create(option.Options, stopCh)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create cache, error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
8
pkg/simple/client/cache/factory.go
vendored
Normal file
8
pkg/simple/client/cache/factory.go
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package cache
|
||||
|
||||
type CacheFactory interface {
|
||||
// Type unique type of the cache
|
||||
Type() string
|
||||
// Create relevant caches by type
|
||||
Create(options DynamicOptions, stopCh <-chan struct{}) (Interface, error)
|
||||
}
|
||||
200
pkg/simple/client/cache/inmemory_cache.go
vendored
Normal file
200
pkg/simple/client/cache/inmemory_cache.go
vendored
Normal file
@@ -0,0 +1,200 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
)
|
||||
|
||||
var ErrNoSuchKey = errors.New("no such key")
|
||||
|
||||
const (
|
||||
typeInMemoryCache = "InMemoryCache"
|
||||
DefaultCacheType = typeInMemoryCache
|
||||
|
||||
defaultCleanupPeriod = 2 * time.Hour
|
||||
)
|
||||
|
||||
type simpleObject struct {
|
||||
value string
|
||||
neverExpire bool
|
||||
expiredAt time.Time
|
||||
}
|
||||
|
||||
func (so *simpleObject) IsExpired() bool {
|
||||
if so.neverExpire {
|
||||
return false
|
||||
}
|
||||
if time.Now().After(so.expiredAt) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// InMemoryCacheOptions used to create inMemoryCache in memory.
|
||||
// CleanupPeriod specifies cleans up expired token every period.
|
||||
// Note the SimpleCache cannot be used in multi-replicas apiserver,
|
||||
// which will lead to data inconsistency.
|
||||
type InMemoryCacheOptions struct {
|
||||
CleanupPeriod time.Duration `json:"cleanupPeriod" yaml:"cleanupPeriod" mapstructure:"cleanupperiod"`
|
||||
}
|
||||
|
||||
// imMemoryCache implements cache.Interface use memory objects, it should be used only for testing
|
||||
type inMemoryCache struct {
|
||||
store map[string]simpleObject
|
||||
}
|
||||
|
||||
func NewInMemoryCache(options *InMemoryCacheOptions, stopCh <-chan struct{}) (Interface, error) {
|
||||
var cleanupPeriod time.Duration
|
||||
cache := &inMemoryCache{
|
||||
store: make(map[string]simpleObject),
|
||||
}
|
||||
|
||||
if options == nil || options.CleanupPeriod == 0 {
|
||||
cleanupPeriod = defaultCleanupPeriod
|
||||
} else {
|
||||
cleanupPeriod = options.CleanupPeriod
|
||||
}
|
||||
go wait.Until(cache.cleanInvalidToken, cleanupPeriod, stopCh)
|
||||
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) cleanInvalidToken() {
|
||||
for k, v := range s.store {
|
||||
if v.IsExpired() {
|
||||
delete(s.store, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Keys(pattern string) ([]string, error) {
|
||||
// There is a little difference between go regexp and redis key pattern
|
||||
// In redis, * means any character, while in go . means match everything.
|
||||
pattern = strings.Replace(pattern, "*", ".", -1)
|
||||
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for k := range s.store {
|
||||
if re.MatchString(k) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Set(key string, value string, duration time.Duration) error {
|
||||
sobject := simpleObject{
|
||||
value: value,
|
||||
neverExpire: false,
|
||||
expiredAt: time.Now().Add(duration),
|
||||
}
|
||||
|
||||
if duration == NeverExpire {
|
||||
sobject.neverExpire = true
|
||||
}
|
||||
|
||||
s.store[key] = sobject
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Del(keys ...string) error {
|
||||
for _, key := range keys {
|
||||
delete(s.store, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Get(key string) (string, error) {
|
||||
if sobject, ok := s.store[key]; ok {
|
||||
if sobject.neverExpire || time.Now().Before(sobject.expiredAt) {
|
||||
return sobject.value, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrNoSuchKey
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Exists(keys ...string) (bool, error) {
|
||||
for _, key := range keys {
|
||||
if _, ok := s.store[key]; !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *inMemoryCache) Expire(key string, duration time.Duration) error {
|
||||
value, err := s.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sobject := simpleObject{
|
||||
value: value,
|
||||
neverExpire: false,
|
||||
expiredAt: time.Now().Add(duration),
|
||||
}
|
||||
|
||||
if duration == NeverExpire {
|
||||
sobject.neverExpire = true
|
||||
}
|
||||
|
||||
s.store[key] = sobject
|
||||
return nil
|
||||
}
|
||||
|
||||
type inMemoryCacheFactory struct {
|
||||
}
|
||||
|
||||
func (sf *inMemoryCacheFactory) Type() string {
|
||||
return typeInMemoryCache
|
||||
}
|
||||
|
||||
func (sf *inMemoryCacheFactory) Create(options DynamicOptions, stopCh <-chan struct{}) (Interface, error) {
|
||||
var sOptions InMemoryCacheOptions
|
||||
|
||||
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
|
||||
WeaklyTypedInput: true,
|
||||
Result: &sOptions,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := decoder.Decode(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewInMemoryCache(&sOptions, stopCh)
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterCacheFactory(&inMemoryCacheFactory{})
|
||||
}
|
||||
@@ -102,7 +102,7 @@ func TestDeleteAndExpireCache(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
cacheClient := NewSimpleCache()
|
||||
cacheClient, _ := NewInMemoryCache(nil, nil)
|
||||
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
err := load(cacheClient, dataSet)
|
||||
33
pkg/simple/client/cache/options.go
vendored
33
pkg/simple/client/cache/options.go
vendored
@@ -18,25 +18,19 @@ package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Host string `json:"host" yaml:"host"`
|
||||
Port int `json:"port" yaml:"port"`
|
||||
Password string `json:"password" yaml:"password"`
|
||||
DB int `json:"db" yaml:"db"`
|
||||
Type string `json:"type"`
|
||||
Options DynamicOptions `json:"options"`
|
||||
}
|
||||
|
||||
// NewRedisOptions returns options points to nowhere,
|
||||
// NewCacheOptions returns options points to nowhere,
|
||||
// because redis is not required for some components
|
||||
func NewRedisOptions() *Options {
|
||||
func NewCacheOptions() *Options {
|
||||
return &Options{
|
||||
Host: "",
|
||||
Port: 0,
|
||||
Password: "",
|
||||
DB: 0,
|
||||
Type: "",
|
||||
Options: map[string]interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,20 +38,9 @@ func NewRedisOptions() *Options {
|
||||
func (r *Options) Validate() []error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
if r.Port == 0 {
|
||||
errors = append(errors, fmt.Errorf("invalid service port number"))
|
||||
if r.Type == "" {
|
||||
errors = append(errors, fmt.Errorf("invalid cache type"))
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// AddFlags add option flags to command line flags,
|
||||
// if redis-host left empty, the following options will be ignored.
|
||||
func (r *Options) AddFlags(fs *pflag.FlagSet, s *Options) {
|
||||
fs.StringVar(&r.Host, "redis-host", s.Host, "Redis connection URL. If left blank, means redis is unnecessary, "+
|
||||
"redis will be disabled.")
|
||||
|
||||
fs.IntVar(&r.Port, "redis-port", s.Port, "")
|
||||
fs.StringVar(&r.Password, "redis-password", s.Password, "")
|
||||
fs.IntVar(&r.DB, "redis-db", s.DB, "")
|
||||
}
|
||||
|
||||
58
pkg/simple/client/cache/redis.go
vendored
58
pkg/simple/client/cache/redis.go
vendored
@@ -17,19 +17,31 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-redis/redis"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
const typeRedis = "redis"
|
||||
|
||||
type redisClient struct {
|
||||
client *redis.Client
|
||||
}
|
||||
|
||||
func NewRedisClient(option *Options, stopCh <-chan struct{}) (Interface, error) {
|
||||
var r Client
|
||||
// redisOptions used to create a redis client.
|
||||
type redisOptions struct {
|
||||
Host string `json:"host" yaml:"host" mapstructure:"host"`
|
||||
Port int `json:"port" yaml:"port" mapstructure:"port"`
|
||||
Password string `json:"password" yaml:"password" mapstructure:"password"`
|
||||
DB int `json:"db" yaml:"db" mapstructure:"db"`
|
||||
}
|
||||
|
||||
func NewRedisClient(option *redisOptions, stopCh <-chan struct{}) (Interface, error) {
|
||||
var r redisClient
|
||||
|
||||
redisOptions := &redis.Options{
|
||||
Addr: fmt.Sprintf("%s:%d", option.Host, option.Port),
|
||||
@@ -61,23 +73,23 @@ func NewRedisClient(option *Options, stopCh <-chan struct{}) (Interface, error)
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
func (r *Client) Get(key string) (string, error) {
|
||||
func (r *redisClient) Get(key string) (string, error) {
|
||||
return r.client.Get(key).Result()
|
||||
}
|
||||
|
||||
func (r *Client) Keys(pattern string) ([]string, error) {
|
||||
func (r *redisClient) Keys(pattern string) ([]string, error) {
|
||||
return r.client.Keys(pattern).Result()
|
||||
}
|
||||
|
||||
func (r *Client) Set(key string, value string, duration time.Duration) error {
|
||||
func (r *redisClient) Set(key string, value string, duration time.Duration) error {
|
||||
return r.client.Set(key, value, duration).Err()
|
||||
}
|
||||
|
||||
func (r *Client) Del(keys ...string) error {
|
||||
func (r *redisClient) Del(keys ...string) error {
|
||||
return r.client.Del(keys...).Err()
|
||||
}
|
||||
|
||||
func (r *Client) Exists(keys ...string) (bool, error) {
|
||||
func (r *redisClient) Exists(keys ...string) (bool, error) {
|
||||
existedKeys, err := r.client.Exists(keys...).Result()
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -86,6 +98,34 @@ func (r *Client) Exists(keys ...string) (bool, error) {
|
||||
return len(keys) == int(existedKeys), nil
|
||||
}
|
||||
|
||||
func (r *Client) Expire(key string, duration time.Duration) error {
|
||||
func (r *redisClient) Expire(key string, duration time.Duration) error {
|
||||
return r.client.Expire(key, duration).Err()
|
||||
}
|
||||
|
||||
type redisFactory struct{}
|
||||
|
||||
func (rf *redisFactory) Type() string {
|
||||
return typeRedis
|
||||
}
|
||||
|
||||
func (rf *redisFactory) Create(options DynamicOptions, stopCh <-chan struct{}) (Interface, error) {
|
||||
var rOptions redisOptions
|
||||
if err := mapstructure.Decode(options, &rOptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rOptions.Port == 0 {
|
||||
return nil, errors.New("invalid service port number")
|
||||
}
|
||||
if len(rOptions.Host) == 0 {
|
||||
return nil, errors.New("invalid service host")
|
||||
}
|
||||
client, err := NewRedisClient(&rOptions, stopCh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterCacheFactory(&redisFactory{})
|
||||
}
|
||||
|
||||
123
pkg/simple/client/cache/simple_cache.go
vendored
123
pkg/simple/client/cache/simple_cache.go
vendored
@@ -1,123 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The KubeSphere Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/server/errors"
|
||||
)
|
||||
|
||||
var ErrNoSuchKey = errors.New("no such key")
|
||||
|
||||
type simpleObject struct {
|
||||
value string
|
||||
neverExpire bool
|
||||
expiredAt time.Time
|
||||
}
|
||||
|
||||
// SimpleCache implements cache.Interface use memory objects, it should be used only for testing
|
||||
type simpleCache struct {
|
||||
store map[string]simpleObject
|
||||
}
|
||||
|
||||
func NewSimpleCache() Interface {
|
||||
return &simpleCache{store: make(map[string]simpleObject)}
|
||||
}
|
||||
|
||||
func (s *simpleCache) Keys(pattern string) ([]string, error) {
|
||||
// There is a little difference between go regexp and redis key pattern
|
||||
// In redis, * means any character, while in go . means match everything.
|
||||
pattern = strings.Replace(pattern, "*", ".", -1)
|
||||
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []string
|
||||
for k := range s.store {
|
||||
if re.MatchString(k) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (s *simpleCache) Set(key string, value string, duration time.Duration) error {
|
||||
sobject := simpleObject{
|
||||
value: value,
|
||||
neverExpire: false,
|
||||
expiredAt: time.Now().Add(duration),
|
||||
}
|
||||
|
||||
if duration == NeverExpire {
|
||||
sobject.neverExpire = true
|
||||
}
|
||||
|
||||
s.store[key] = sobject
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *simpleCache) Del(keys ...string) error {
|
||||
for _, key := range keys {
|
||||
delete(s.store, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *simpleCache) Get(key string) (string, error) {
|
||||
if sobject, ok := s.store[key]; ok {
|
||||
if sobject.neverExpire || time.Now().Before(sobject.expiredAt) {
|
||||
return sobject.value, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrNoSuchKey
|
||||
}
|
||||
|
||||
func (s *simpleCache) Exists(keys ...string) (bool, error) {
|
||||
for _, key := range keys {
|
||||
if _, ok := s.store[key]; !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *simpleCache) Expire(key string, duration time.Duration) error {
|
||||
value, err := s.Get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sobject := simpleObject{
|
||||
value: value,
|
||||
neverExpire: false,
|
||||
expiredAt: time.Now().Add(duration),
|
||||
}
|
||||
|
||||
if duration == NeverExpire {
|
||||
sobject.neverExpire = true
|
||||
}
|
||||
|
||||
s.store[key] = sobject
|
||||
return nil
|
||||
}
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -39,6 +38,11 @@ func TestClient_Get(t *testing.T) {
|
||||
type args struct {
|
||||
url string
|
||||
}
|
||||
|
||||
inMemoryCache, err := cache.NewInMemoryCache(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
token, _ := json.Marshal(
|
||||
&TokenResponse{
|
||||
Username: "test",
|
||||
@@ -58,7 +62,7 @@ func TestClient_Get(t *testing.T) {
|
||||
Strategy: AuthStrategyAnonymous,
|
||||
cache: nil,
|
||||
client: &MockClient{
|
||||
requestResult: "fake",
|
||||
RequestResult: "fake",
|
||||
},
|
||||
ServiceToken: "token",
|
||||
Host: "http://kiali.istio-system.svc",
|
||||
@@ -76,8 +80,8 @@ func TestClient_Get(t *testing.T) {
|
||||
Strategy: AuthStrategyToken,
|
||||
cache: nil,
|
||||
client: &MockClient{
|
||||
tokenResult: token,
|
||||
requestResult: "fake",
|
||||
TokenResult: token,
|
||||
RequestResult: "fake",
|
||||
},
|
||||
ServiceToken: "token",
|
||||
Host: "http://kiali.istio-system.svc",
|
||||
@@ -93,10 +97,10 @@ func TestClient_Get(t *testing.T) {
|
||||
name: "Token",
|
||||
fields: fields{
|
||||
Strategy: AuthStrategyToken,
|
||||
cache: cache.NewSimpleCache(),
|
||||
cache: inMemoryCache,
|
||||
client: &MockClient{
|
||||
tokenResult: token,
|
||||
requestResult: "fake",
|
||||
TokenResult: token,
|
||||
RequestResult: "fake",
|
||||
},
|
||||
ServiceToken: "token",
|
||||
Host: "http://kiali.istio-system.svc",
|
||||
@@ -129,22 +133,3 @@ func TestClient_Get(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type MockClient struct {
|
||||
tokenResult []byte
|
||||
requestResult string
|
||||
}
|
||||
|
||||
func (c *MockClient) Do(req *http.Request) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(c.requestResult))),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *MockClient) PostForm(url string, data url.Values) (resp *http.Response, err error) {
|
||||
return &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: ioutil.NopCloser(bytes.NewReader(c.tokenResult)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
27
pkg/simple/client/kiali/mock_client.go
Normal file
27
pkg/simple/client/kiali/mock_client.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package kiali
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type MockClient struct {
|
||||
TokenResult []byte
|
||||
RequestResult string
|
||||
}
|
||||
|
||||
func (c *MockClient) Do(req *http.Request) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: ioutil.NopCloser(bytes.NewReader([]byte(c.RequestResult))),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *MockClient) PostForm(url string, data url.Values) (resp *http.Response, err error) {
|
||||
return &http.Response{
|
||||
StatusCode: 200,
|
||||
Body: ioutil.NopCloser(bytes.NewReader(c.TokenResult)),
|
||||
}, nil
|
||||
}
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-ldap/ldap"
|
||||
@@ -63,8 +62,6 @@ type ldapInterfaceImpl struct {
|
||||
groupSearchBase string
|
||||
managerDN string
|
||||
managerPassword string
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
var _ Interface = &ldapInterfaceImpl{}
|
||||
@@ -95,7 +92,6 @@ func NewLdapClient(options *Options, stopCh <-chan struct{}) (Interface, error)
|
||||
groupSearchBase: options.GroupSearchBase,
|
||||
managerDN: options.ManagerDN,
|
||||
managerPassword: options.ManagerPassword,
|
||||
once: sync.Once{},
|
||||
}
|
||||
|
||||
go func() {
|
||||
@@ -103,9 +99,7 @@ func NewLdapClient(options *Options, stopCh <-chan struct{}) (Interface, error)
|
||||
client.close()
|
||||
}()
|
||||
|
||||
client.once.Do(func() {
|
||||
_ = client.createSearchBase()
|
||||
})
|
||||
_ = client.createSearchBase()
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -177,7 +177,7 @@ var promQLTemplates = map[string]string{
|
||||
"ingress_success_rate": `sum(rate(nginx_ingress_controller_requests{$1,$2,status!~"[4-5].*"}[$3])) / sum(rate(nginx_ingress_controller_requests{$1,$2}[$3]))`,
|
||||
"ingress_request_duration_average": `sum_over_time(nginx_ingress_controller_request_duration_seconds_sum{$1,$2}[$3])/sum_over_time(nginx_ingress_controller_request_duration_seconds_count{$1,$2}[$3])`,
|
||||
"ingress_request_duration_50percentage": `histogram_quantile(0.50, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
|
||||
"ingress_request_duration_95percentage": `histogram_quantile(0.90, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
|
||||
"ingress_request_duration_95percentage": `histogram_quantile(0.95, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
|
||||
"ingress_request_duration_99percentage": `histogram_quantile(0.99, sum by (le) (rate(nginx_ingress_controller_request_duration_seconds_bucket{$1,$2}[$3])))`,
|
||||
"ingress_request_volume": `round(sum(irate(nginx_ingress_controller_requests{$1,$2}[$3])), 0.001)`,
|
||||
"ingress_request_volume_by_ingress": `round(sum(irate(nginx_ingress_controller_requests{$1,$2}[$3])) by (ingress), 0.001)`,
|
||||
|
||||
@@ -74,6 +74,10 @@ func (h HelmVersionWrapper) GetKeywords() string {
|
||||
return strings.Join(h.ChartVersion.Keywords, ",")
|
||||
}
|
||||
|
||||
func (h HelmVersionWrapper) GetRawKeywords() []string {
|
||||
return h.ChartVersion.Keywords
|
||||
}
|
||||
|
||||
func (h HelmVersionWrapper) GetRawMaintainers() []*v1alpha1.Maintainer {
|
||||
mt := make([]*v1alpha1.Maintainer, 0, len(h.Maintainers))
|
||||
for _, value := range h.Maintainers {
|
||||
|
||||
@@ -99,6 +99,9 @@ func MergeRepoIndex(repo *v1alpha1.HelmRepo, index *helmrepo.IndexFile, existsSa
|
||||
|
||||
allAppNames := make(map[string]struct{}, len(index.Entries))
|
||||
for name, versions := range index.Entries {
|
||||
if len(versions) == 0 {
|
||||
continue
|
||||
}
|
||||
// add new applications
|
||||
if application, exists := saved.Applications[name]; !exists {
|
||||
application = &Application{
|
||||
|
||||
@@ -50,5 +50,102 @@ func TestLoadRepo(t *testing.T) {
|
||||
_ = chartData
|
||||
break
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var indexData1 = `
|
||||
apiVersion: v1
|
||||
entries:
|
||||
apisix: []
|
||||
apisix-dashboard:
|
||||
- apiVersion: v2
|
||||
appVersion: 2.9.0
|
||||
created: "2021-11-15T08:23:00.343784368Z"
|
||||
description: A Helm chart for Apache APISIX Dashboard
|
||||
digest: 76f794b1300f7bfb756ede352fe71eb863b89f1995b495e8b683990709e310ad
|
||||
icon: https://apache.org/logos/res/apisix/apisix.png
|
||||
maintainers:
|
||||
- email: zhangjintao@apache.org
|
||||
name: tao12345666333
|
||||
name: apisix-dashboard
|
||||
type: application
|
||||
urls:
|
||||
- https://charts.kubesphere.io/main/apisix-dashboard-0.3.0.tgz
|
||||
version: 0.3.0
|
||||
`
|
||||
var indexData2 = `
|
||||
apiVersion: v1
|
||||
entries:
|
||||
apisix:
|
||||
- apiVersion: v2
|
||||
appVersion: 2.10.0
|
||||
created: "2021-11-15T08:23:00.343234584Z"
|
||||
dependencies:
|
||||
- condition: etcd.enabled
|
||||
name: etcd
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 6.2.6
|
||||
- alias: dashboard
|
||||
condition: dashboard.enabled
|
||||
name: apisix-dashboard
|
||||
repository: https://charts.apiseven.com
|
||||
version: 0.3.0
|
||||
- alias: ingress-controller
|
||||
condition: ingress-controller.enabled
|
||||
name: apisix-ingress-controller
|
||||
repository: https://charts.apiseven.com
|
||||
version: 0.8.0
|
||||
description: A Helm chart for Apache APISIX
|
||||
digest: fed38a11c0fb54d385144767227e43cb2961d1b50d36ea207fdd122bddd3de28
|
||||
icon: https://apache.org/logos/res/apisix/apisix.png
|
||||
maintainers:
|
||||
- email: zhangjintao@apache.org
|
||||
name: tao12345666333
|
||||
name: apisix
|
||||
type: application
|
||||
urls:
|
||||
- https://charts.kubesphere.io/main/apisix-0.7.2.tgz
|
||||
version: 0.7.2
|
||||
apisix-dashboard:
|
||||
- apiVersion: v2
|
||||
appVersion: 2.9.0
|
||||
created: "2021-11-15T08:23:00.343784368Z"
|
||||
description: A Helm chart for Apache APISIX Dashboard
|
||||
digest: 76f794b1300f7bfb756ede352fe71eb863b89f1995b495e8b683990709e310ad
|
||||
icon: https://apache.org/logos/res/apisix/apisix.png
|
||||
maintainers:
|
||||
- email: zhangjintao@apache.org
|
||||
name: tao12345666333
|
||||
name: apisix-dashboard
|
||||
type: application
|
||||
urls:
|
||||
- https://charts.kubesphere.io/main/apisix-dashboard-0.3.0.tgz
|
||||
version: 0.3.0
|
||||
`
|
||||
|
||||
func TestMergeRepo(t *testing.T) {
|
||||
repoIndex1, err := loadIndex([]byte(indexData1))
|
||||
if err != nil {
|
||||
t.Errorf("failed to load repo index")
|
||||
t.Failed()
|
||||
}
|
||||
existsSavedIndex := &SavedIndex{}
|
||||
repoCR := &v1alpha1.HelmRepo{}
|
||||
|
||||
savedIndex1 := MergeRepoIndex(repoCR, repoIndex1, existsSavedIndex)
|
||||
if len(savedIndex1.Applications) != 1 {
|
||||
t.Errorf("faied to merge repo index with empty repo")
|
||||
t.Failed()
|
||||
}
|
||||
|
||||
repoIndex2, err := loadIndex([]byte(indexData2))
|
||||
if err != nil {
|
||||
t.Errorf("failed to load repo index")
|
||||
t.Failed()
|
||||
}
|
||||
|
||||
savedIndex2 := MergeRepoIndex(repoCR, repoIndex2, savedIndex1)
|
||||
if len(savedIndex2.Applications) != 2 {
|
||||
t.Errorf("faied to merge two repo index")
|
||||
t.Failed()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
|
||||
kubesphere "kubesphere.io/kubesphere/pkg/client/clientset/versioned"
|
||||
clusterinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster/v1alpha1"
|
||||
clusterlister "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1"
|
||||
)
|
||||
@@ -54,6 +56,8 @@ type ClusterClients interface {
|
||||
GetClusterKubeconfig(string) (string, error)
|
||||
Get(string) (*clusterv1alpha1.Cluster, error)
|
||||
GetInnerCluster(string) *innerCluster
|
||||
GetKubernetesClientSet(string) (*kubernetes.Clientset, error)
|
||||
GetKubeSphereClientSet(string) (*kubesphere.Clientset, error)
|
||||
}
|
||||
|
||||
func NewClusterClient(clusterInformer clusterinformer.ClusterInformer) ClusterClients {
|
||||
@@ -182,3 +186,45 @@ func (c *clusterClients) IsHostCluster(cluster *clusterv1alpha1.Cluster) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *clusterClients) GetKubeSphereClientSet(name string) (*kubesphere.Clientset, error) {
|
||||
kubeconfig, err := c.GetClusterKubeconfig(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restConfig, err := newRestConfigFromString(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientSet, err := kubesphere.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientSet, nil
|
||||
}
|
||||
|
||||
func (c *clusterClients) GetKubernetesClientSet(name string) (*kubernetes.Clientset, error) {
|
||||
kubeconfig, err := c.GetClusterKubeconfig(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restConfig, err := newRestConfigFromString(kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientSet, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientSet, nil
|
||||
}
|
||||
|
||||
func newRestConfigFromString(kubeconfig string) (*rest.Config, error) {
|
||||
bytes, err := clientcmd.NewClientConfigFromBytes([]byte(kubeconfig))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.ClientConfig()
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
)
|
||||
|
||||
var sf *sonyflake.Sonyflake
|
||||
var upperMachineID uint16
|
||||
|
||||
func init() {
|
||||
var st sonyflake.Settings
|
||||
@@ -37,11 +36,18 @@ func init() {
|
||||
sf = sonyflake.NewSonyflake(sonyflake.Settings{
|
||||
MachineID: lower16BitIP,
|
||||
})
|
||||
upperMachineID, _ = upper16BitIP()
|
||||
}
|
||||
if sf == nil {
|
||||
sf = sonyflake.NewSonyflake(sonyflake.Settings{
|
||||
MachineID: lower16BitIPv6,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func GetIntId() uint64 {
|
||||
if sf == nil {
|
||||
panic(errors.New("invalid snowflake instance"))
|
||||
}
|
||||
id, err := sf.NextID()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -93,15 +99,6 @@ func lower16BitIP() (uint16, error) {
|
||||
return uint16(ip[2])<<8 + uint16(ip[3]), nil
|
||||
}
|
||||
|
||||
func upper16BitIP() (uint16, error) {
|
||||
ip, err := IPv4()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return uint16(ip[0])<<8 + uint16(ip[1]), nil
|
||||
}
|
||||
|
||||
func IPv4() (net.IP, error) {
|
||||
as, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
@@ -123,3 +120,34 @@ func IPv4() (net.IP, error) {
|
||||
}
|
||||
return nil, errors.New("no ip address")
|
||||
}
|
||||
|
||||
func lower16BitIPv6() (uint16, error) {
|
||||
ip, err := IPv6()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint16(ip[14])<<8 + uint16(ip[15]), nil
|
||||
}
|
||||
func IPv6() (net.IP, error) {
|
||||
as, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, a := range as {
|
||||
ipnet, ok := a.(*net.IPNet)
|
||||
if !ok || ipnet.IP.IsLoopback() {
|
||||
continue
|
||||
}
|
||||
if ipnet.IP.To4() != nil {
|
||||
continue
|
||||
}
|
||||
ip := ipnet.IP.To16()
|
||||
if ip == nil {
|
||||
continue
|
||||
}
|
||||
return ip, nil
|
||||
|
||||
}
|
||||
return nil, errors.New("no ip address")
|
||||
}
|
||||
|
||||
22
pkg/utils/josnpatchutil/jsonpatchutil.go
Normal file
22
pkg/utils/josnpatchutil/jsonpatchutil.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package josnpatchutil
|
||||
|
||||
import (
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
)
|
||||
|
||||
func Parse(raw []byte) (jsonpatch.Patch, error) {
|
||||
return jsonpatch.DecodePatch(raw)
|
||||
}
|
||||
|
||||
func GetValue(patch jsonpatch.Operation, value interface{}) error {
|
||||
valueInterface, err := patch.ValueInterface()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mapstructure.Decode(valueInterface, value); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -17,7 +17,9 @@ limitations under the License.
|
||||
package reflectutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func In(value interface{}, container interface{}) bool {
|
||||
@@ -60,3 +62,15 @@ func Override(left interface{}, right interface{}) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SetUnExportedField(ptr interface{}, filedName string, newFiledValue interface{}) (err error) {
|
||||
v := reflect.ValueOf(ptr).Elem().FieldByName(filedName)
|
||||
v = reflect.NewAt(v.Type(), unsafe.Pointer(v.UnsafeAddr())).Elem()
|
||||
nv := reflect.ValueOf(newFiledValue)
|
||||
|
||||
if v.Kind() != nv.Kind() {
|
||||
return fmt.Errorf("kind error")
|
||||
}
|
||||
v.Set(nv)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -287,9 +287,15 @@ func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
|
||||
},
|
||||
Spec: v1alpha1.HelmApplicationVersionSpec{
|
||||
Metadata: &v1alpha1.Metadata{
|
||||
Name: hvw.GetName(),
|
||||
AppVersion: hvw.GetAppVersion(),
|
||||
Version: hvw.GetVersion(),
|
||||
Name: hvw.GetName(),
|
||||
AppVersion: hvw.GetAppVersion(),
|
||||
Version: hvw.GetVersion(),
|
||||
Description: hvw.GetDescription(),
|
||||
Home: hvw.GetHome(),
|
||||
Icon: hvw.GetIcon(),
|
||||
Maintainers: hvw.GetRawMaintainers(),
|
||||
Sources: hvw.GetRawSources(),
|
||||
Keywords: hvw.GetRawKeywords(),
|
||||
},
|
||||
URLs: chartVersion.URLs,
|
||||
Digest: chartVersion.Digest,
|
||||
|
||||
27
vendor/github.com/google/gops/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/gops/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2016 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
285
vendor/github.com/google/gops/agent/agent.go
generated
vendored
Normal file
285
vendor/github.com/google/gops/agent/agent.go
generated
vendored
Normal file
@@ -0,0 +1,285 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package agent provides hooks programs can register to retrieve
|
||||
// diagnostics data by using gops.
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
"runtime/trace"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/google/gops/internal"
|
||||
"github.com/google/gops/signal"
|
||||
)
|
||||
|
||||
const defaultAddr = "127.0.0.1:0"
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
portfile string
|
||||
listener net.Listener
|
||||
|
||||
units = []string{" bytes", "KB", "MB", "GB", "TB", "PB"}
|
||||
)
|
||||
|
||||
// Options allows configuring the started agent.
|
||||
type Options struct {
|
||||
// Addr is the host:port the agent will be listening at.
|
||||
// Optional.
|
||||
Addr string
|
||||
|
||||
// ConfigDir is the directory to store the configuration file,
|
||||
// PID of the gops process, filename, port as well as content.
|
||||
// Optional.
|
||||
ConfigDir string
|
||||
|
||||
// ShutdownCleanup automatically cleans up resources if the
|
||||
// running process receives an interrupt. Otherwise, users
|
||||
// can call Close before shutting down.
|
||||
// Optional.
|
||||
ShutdownCleanup bool
|
||||
|
||||
// ReuseSocketAddrAndPort determines whether the SO_REUSEADDR and
|
||||
// SO_REUSEPORT socket options should be set on the listening socket of
|
||||
// the agent. This option is only effective on unix-like OSes and if
|
||||
// Addr is set to a fixed host:port.
|
||||
// Optional.
|
||||
ReuseSocketAddrAndPort bool
|
||||
}
|
||||
|
||||
// Listen starts the gops agent on a host process. Once agent started, users
|
||||
// can use the advanced gops features. The agent will listen to Interrupt
|
||||
// signals and exit the process, if you need to perform further work on the
|
||||
// Interrupt signal use the options parameter to configure the agent
|
||||
// accordingly.
|
||||
//
|
||||
// Note: The agent exposes an endpoint via a TCP connection that can be used by
|
||||
// any program on the system. Review your security requirements before starting
|
||||
// the agent.
|
||||
func Listen(opts Options) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if portfile != "" {
|
||||
return fmt.Errorf("gops: agent already listening at: %v", listener.Addr())
|
||||
}
|
||||
|
||||
// new
|
||||
gopsdir := opts.ConfigDir
|
||||
if gopsdir == "" {
|
||||
cfgDir, err := internal.ConfigDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gopsdir = cfgDir
|
||||
}
|
||||
|
||||
err := os.MkdirAll(gopsdir, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.ShutdownCleanup {
|
||||
gracefulShutdown()
|
||||
}
|
||||
|
||||
addr := opts.Addr
|
||||
if addr == "" {
|
||||
addr = defaultAddr
|
||||
}
|
||||
var lc net.ListenConfig
|
||||
if opts.ReuseSocketAddrAndPort {
|
||||
lc.Control = setReuseAddrAndPortSockopts
|
||||
}
|
||||
listener, err = lc.Listen(context.Background(), "tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port := listener.Addr().(*net.TCPAddr).Port
|
||||
portfile = filepath.Join(gopsdir, strconv.Itoa(os.Getpid()))
|
||||
err = ioutil.WriteFile(portfile, []byte(strconv.Itoa(port)), os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go listen()
|
||||
return nil
|
||||
}
|
||||
|
||||
func listen() {
|
||||
buf := make([]byte, 1)
|
||||
for {
|
||||
fd, err := listener.Accept()
|
||||
if err != nil {
|
||||
// No great way to check for this, see https://golang.org/issues/4373.
|
||||
if !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
fmt.Fprintf(os.Stderr, "gops: %v\n", err)
|
||||
}
|
||||
if netErr, ok := err.(net.Error); ok && !netErr.Temporary() {
|
||||
break
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fd.Read(buf); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "gops: %v\n", err)
|
||||
continue
|
||||
}
|
||||
if err := handle(fd, buf); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "gops: %v\n", err)
|
||||
continue
|
||||
}
|
||||
fd.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func gracefulShutdown() {
|
||||
c := make(chan os.Signal, 1)
|
||||
gosignal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
go func() {
|
||||
// cleanup the socket on shutdown.
|
||||
sig := <-c
|
||||
Close()
|
||||
ret := 1
|
||||
if sig == syscall.SIGTERM {
|
||||
ret = 0
|
||||
}
|
||||
os.Exit(ret)
|
||||
}()
|
||||
}
|
||||
|
||||
// Close closes the agent, removing temporary files and closing the TCP listener.
|
||||
// If no agent is listening, Close does nothing.
|
||||
func Close() {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if portfile != "" {
|
||||
os.Remove(portfile)
|
||||
portfile = ""
|
||||
}
|
||||
if listener != nil {
|
||||
listener.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func formatBytes(val uint64) string {
|
||||
var i int
|
||||
var target uint64
|
||||
for i = range units {
|
||||
target = 1 << uint(10*(i+1))
|
||||
if val < target {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i > 0 {
|
||||
return fmt.Sprintf("%0.2f%s (%d bytes)", float64(val)/(float64(target)/1024), units[i], val)
|
||||
}
|
||||
return fmt.Sprintf("%d bytes", val)
|
||||
}
|
||||
|
||||
func handle(conn io.ReadWriter, msg []byte) error {
|
||||
switch msg[0] {
|
||||
case signal.StackTrace:
|
||||
return pprof.Lookup("goroutine").WriteTo(conn, 2)
|
||||
case signal.GC:
|
||||
runtime.GC()
|
||||
_, err := conn.Write([]byte("ok"))
|
||||
return err
|
||||
case signal.MemStats:
|
||||
var s runtime.MemStats
|
||||
runtime.ReadMemStats(&s)
|
||||
fmt.Fprintf(conn, "alloc: %v\n", formatBytes(s.Alloc))
|
||||
fmt.Fprintf(conn, "total-alloc: %v\n", formatBytes(s.TotalAlloc))
|
||||
fmt.Fprintf(conn, "sys: %v\n", formatBytes(s.Sys))
|
||||
fmt.Fprintf(conn, "lookups: %v\n", s.Lookups)
|
||||
fmt.Fprintf(conn, "mallocs: %v\n", s.Mallocs)
|
||||
fmt.Fprintf(conn, "frees: %v\n", s.Frees)
|
||||
fmt.Fprintf(conn, "heap-alloc: %v\n", formatBytes(s.HeapAlloc))
|
||||
fmt.Fprintf(conn, "heap-sys: %v\n", formatBytes(s.HeapSys))
|
||||
fmt.Fprintf(conn, "heap-idle: %v\n", formatBytes(s.HeapIdle))
|
||||
fmt.Fprintf(conn, "heap-in-use: %v\n", formatBytes(s.HeapInuse))
|
||||
fmt.Fprintf(conn, "heap-released: %v\n", formatBytes(s.HeapReleased))
|
||||
fmt.Fprintf(conn, "heap-objects: %v\n", s.HeapObjects)
|
||||
fmt.Fprintf(conn, "stack-in-use: %v\n", formatBytes(s.StackInuse))
|
||||
fmt.Fprintf(conn, "stack-sys: %v\n", formatBytes(s.StackSys))
|
||||
fmt.Fprintf(conn, "stack-mspan-inuse: %v\n", formatBytes(s.MSpanInuse))
|
||||
fmt.Fprintf(conn, "stack-mspan-sys: %v\n", formatBytes(s.MSpanSys))
|
||||
fmt.Fprintf(conn, "stack-mcache-inuse: %v\n", formatBytes(s.MCacheInuse))
|
||||
fmt.Fprintf(conn, "stack-mcache-sys: %v\n", formatBytes(s.MCacheSys))
|
||||
fmt.Fprintf(conn, "other-sys: %v\n", formatBytes(s.OtherSys))
|
||||
fmt.Fprintf(conn, "gc-sys: %v\n", formatBytes(s.GCSys))
|
||||
fmt.Fprintf(conn, "next-gc: when heap-alloc >= %v\n", formatBytes(s.NextGC))
|
||||
lastGC := "-"
|
||||
if s.LastGC != 0 {
|
||||
lastGC = fmt.Sprint(time.Unix(0, int64(s.LastGC)))
|
||||
}
|
||||
fmt.Fprintf(conn, "last-gc: %v\n", lastGC)
|
||||
fmt.Fprintf(conn, "gc-pause-total: %v\n", time.Duration(s.PauseTotalNs))
|
||||
fmt.Fprintf(conn, "gc-pause: %v\n", s.PauseNs[(s.NumGC+255)%256])
|
||||
fmt.Fprintf(conn, "gc-pause-end: %v\n", s.PauseEnd[(s.NumGC+255)%256])
|
||||
fmt.Fprintf(conn, "num-gc: %v\n", s.NumGC)
|
||||
fmt.Fprintf(conn, "num-forced-gc: %v\n", s.NumForcedGC)
|
||||
fmt.Fprintf(conn, "gc-cpu-fraction: %v\n", s.GCCPUFraction)
|
||||
fmt.Fprintf(conn, "enable-gc: %v\n", s.EnableGC)
|
||||
fmt.Fprintf(conn, "debug-gc: %v\n", s.DebugGC)
|
||||
case signal.Version:
|
||||
fmt.Fprintf(conn, "%v\n", runtime.Version())
|
||||
case signal.HeapProfile:
|
||||
return pprof.WriteHeapProfile(conn)
|
||||
case signal.CPUProfile:
|
||||
if err := pprof.StartCPUProfile(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(30 * time.Second)
|
||||
pprof.StopCPUProfile()
|
||||
case signal.Stats:
|
||||
fmt.Fprintf(conn, "goroutines: %v\n", runtime.NumGoroutine())
|
||||
fmt.Fprintf(conn, "OS threads: %v\n", pprof.Lookup("threadcreate").Count())
|
||||
fmt.Fprintf(conn, "GOMAXPROCS: %v\n", runtime.GOMAXPROCS(0))
|
||||
fmt.Fprintf(conn, "num CPU: %v\n", runtime.NumCPU())
|
||||
case signal.BinaryDump:
|
||||
path, err := os.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = bufio.NewReader(f).WriteTo(conn)
|
||||
return err
|
||||
case signal.Trace:
|
||||
if err := trace.Start(conn); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
trace.Stop()
|
||||
case signal.SetGCPercent:
|
||||
perc, err := binary.ReadVarint(bufio.NewReader(conn))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(conn, "New GC percent set to %v. Previous value was %v.\n", perc, debug.SetGCPercent(int(perc)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
37
vendor/github.com/google/gops/agent/sockopt_reuseport.go
generated
vendored
Normal file
37
vendor/github.com/google/gops/agent/sockopt_reuseport.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !js && !plan9 && !solaris && !windows
|
||||
// +build !js,!plan9,!solaris,!windows
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// setReuseAddrAndPortSockopts sets the SO_REUSEADDR and SO_REUSEPORT socket
|
||||
// options on c's underlying socket in order to increase the chance to re-bind()
|
||||
// to the same address and port upon agent restart.
|
||||
func setReuseAddrAndPortSockopts(network, address string, c syscall.RawConn) error {
|
||||
var soerr error
|
||||
if err := c.Control(func(su uintptr) {
|
||||
sock := int(su)
|
||||
// Allow reuse of recently-used addresses. This socket option is
|
||||
// set by default on listeners in Go's net package, see
|
||||
// net.setDefaultSockopts.
|
||||
soerr = unix.SetsockoptInt(sock, unix.SOL_SOCKET, unix.SO_REUSEADDR, 1)
|
||||
if soerr != nil {
|
||||
return
|
||||
}
|
||||
// Allow reuse of recently-used ports. This gives the agent a
|
||||
// better chance to re-bind upon restarts.
|
||||
soerr = unix.SetsockoptInt(sock, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return soerr
|
||||
}
|
||||
14
vendor/github.com/google/gops/agent/sockopt_unsupported.go
generated
vendored
Normal file
14
vendor/github.com/google/gops/agent/sockopt_unsupported.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build (js && wasm) || plan9 || solaris || windows
|
||||
// +build js,wasm plan9 solaris windows
|
||||
|
||||
package agent
|
||||
|
||||
import "syscall"
|
||||
|
||||
func setReuseAddrAndPortSockopts(network, address string, c syscall.RawConn) error {
|
||||
return nil
|
||||
}
|
||||
71
vendor/github.com/google/gops/internal/internal.go
generated
vendored
Normal file
71
vendor/github.com/google/gops/internal/internal.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const gopsConfigDirEnvKey = "GOPS_CONFIG_DIR"
|
||||
|
||||
func ConfigDir() (string, error) {
|
||||
if configDir := os.Getenv(gopsConfigDirEnvKey); configDir != "" {
|
||||
return configDir, nil
|
||||
}
|
||||
|
||||
if osUserConfigDir := getOSUserConfigDir(); osUserConfigDir != "" {
|
||||
return filepath.Join(osUserConfigDir, "gops"), nil
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(os.Getenv("APPDATA"), "gops"), nil
|
||||
}
|
||||
|
||||
if xdgConfigDir := os.Getenv("XDG_CONFIG_HOME"); xdgConfigDir != "" {
|
||||
return filepath.Join(xdgConfigDir, "gops"), nil
|
||||
}
|
||||
|
||||
homeDir := guessUnixHomeDir()
|
||||
if homeDir == "" {
|
||||
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
|
||||
}
|
||||
return filepath.Join(homeDir, ".config", "gops"), nil
|
||||
}
|
||||
|
||||
func guessUnixHomeDir() string {
|
||||
usr, err := user.Current()
|
||||
if err == nil {
|
||||
return usr.HomeDir
|
||||
}
|
||||
return os.Getenv("HOME")
|
||||
}
|
||||
|
||||
func PIDFile(pid int) (string, error) {
|
||||
gopsdir, err := ConfigDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(gopsdir, strconv.Itoa(pid)), nil
|
||||
}
|
||||
|
||||
func GetPort(pid int) (string, error) {
|
||||
portfile, err := PIDFile(pid)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b, err := ioutil.ReadFile(portfile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
port := strings.TrimSpace(string(b))
|
||||
return port, nil
|
||||
}
|
||||
20
vendor/github.com/google/gops/internal/internal_go1_13.go
generated
vendored
Normal file
20
vendor/github.com/google/gops/internal/internal_go1_13.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.13
|
||||
// +build go1.13
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func getOSUserConfigDir() string {
|
||||
configDir, err := os.UserConfigDir()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return configDir
|
||||
}
|
||||
12
vendor/github.com/google/gops/internal/internal_lt_go1_13.go
generated
vendored
Normal file
12
vendor/github.com/google/gops/internal/internal_lt_go1_13.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.13
|
||||
// +build !go1.13
|
||||
|
||||
package internal
|
||||
|
||||
func getOSUserConfigDir() string {
|
||||
return ""
|
||||
}
|
||||
38
vendor/github.com/google/gops/signal/signal.go
generated
vendored
Normal file
38
vendor/github.com/google/gops/signal/signal.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package signal contains signals used to communicate to the gops agents.
|
||||
package signal
|
||||
|
||||
const (
|
||||
// StackTrace represents a command to print stack trace.
|
||||
StackTrace = byte(0x1)
|
||||
|
||||
// GC runs the garbage collector.
|
||||
GC = byte(0x2)
|
||||
|
||||
// MemStats reports memory stats.
|
||||
MemStats = byte(0x3)
|
||||
|
||||
// Version prints the Go version.
|
||||
Version = byte(0x4)
|
||||
|
||||
// HeapProfile starts `go tool pprof` with the current memory profile.
|
||||
HeapProfile = byte(0x5)
|
||||
|
||||
// CPUProfile starts `go tool pprof` with the current CPU profile
|
||||
CPUProfile = byte(0x6)
|
||||
|
||||
// Stats returns Go runtime statistics such as number of goroutines, GOMAXPROCS, and NumCPU.
|
||||
Stats = byte(0x7)
|
||||
|
||||
// Trace starts the Go execution tracer, waits 5 seconds and launches the trace tool.
|
||||
Trace = byte(0x8)
|
||||
|
||||
// BinaryDump returns running binary file.
|
||||
BinaryDump = byte(0x9)
|
||||
|
||||
// SetGCPercent sets the garbage collection target percentage.
|
||||
SetGCPercent = byte(0x10)
|
||||
)
|
||||
13
vendor/modules.txt
vendored
13
vendor/modules.txt
vendored
@@ -416,6 +416,11 @@ github.com/google/go-containerregistry/pkg/v1/types
|
||||
github.com/google/go-querystring/query
|
||||
# github.com/google/gofuzz v1.1.0 => github.com/google/gofuzz v1.1.0
|
||||
github.com/google/gofuzz
|
||||
# github.com/google/gops v0.3.23
|
||||
## explicit
|
||||
github.com/google/gops/agent
|
||||
github.com/google/gops/internal
|
||||
github.com/google/gops/signal
|
||||
# github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 => github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/google/shlex
|
||||
# github.com/google/uuid v1.1.2 => github.com/google/uuid v1.1.1
|
||||
@@ -813,6 +818,10 @@ github.com/rubenv/sql-migrate/sqlparse
|
||||
github.com/russross/blackfriday
|
||||
# github.com/sergi/go-diff v1.1.0 => github.com/sergi/go-diff v1.0.0
|
||||
github.com/sergi/go-diff/diffmatchpatch
|
||||
# github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7
|
||||
## explicit
|
||||
# github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4
|
||||
## explicit
|
||||
# github.com/sirupsen/logrus v1.8.1 => github.com/sirupsen/logrus v1.4.2
|
||||
github.com/sirupsen/logrus
|
||||
# github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009 => github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009
|
||||
@@ -871,7 +880,7 @@ github.com/xeipuuv/gojsonreference
|
||||
github.com/xeipuuv/gojsonschema
|
||||
# github.com/xenolf/lego v0.3.2-0.20160613233155-a9d8cec0e656 => github.com/xenolf/lego v0.3.2-0.20160613233155-a9d8cec0e656
|
||||
## explicit
|
||||
# github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca => github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6
|
||||
# github.com/xlab/treeprint v1.1.0 => github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6
|
||||
github.com/xlab/treeprint
|
||||
# github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b => github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b
|
||||
github.com/yashtewari/glob-intersection
|
||||
@@ -988,7 +997,7 @@ golang.org/x/oauth2/internal
|
||||
golang.org/x/sync/errgroup
|
||||
golang.org/x/sync/semaphore
|
||||
golang.org/x/sync/singleflight
|
||||
# golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 => golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e
|
||||
# golang.org/x/sys v0.0.0-20210902050250-f475640dd07b => golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/plan9
|
||||
golang.org/x/sys/unix
|
||||
|
||||
Reference in New Issue
Block a user