Compare commits

...

12 Commits

Author SHA1 Message Date
KubeSphere CI Bot
d938161ad3 [release-3.3] fix the issue that the upload app template did not display icons (#5493)
fix the issue that the upload app template did not display icons

Co-authored-by: xiaoliu <978911210@qq.com>
2023-01-29 14:33:10 +08:00
KubeSphere CI Bot
c8e131fc13 [release-3.3] adjust Pod status filter (#5488)
adjust Pod status filter

Signed-off-by: frezes <zhangjunhao@kubesphere.io>

Signed-off-by: frezes <zhangjunhao@kubesphere.io>
Co-authored-by: frezes <zhangjunhao@kubesphere.io>
2023-01-17 14:26:01 +08:00
KubeSphere CI Bot
839a31ac1d [release-3.3] Fix:Goroutine leaks when getting audit event sender times out (#5475)
* Fix:Goroutine leaks when getting audit event sender times out

* make it more readable

Co-authored-by: hzhhong <hung.z.h916@gmail.com>
2023-01-13 11:14:33 +08:00
KubeSphere CI Bot
a0ba5f6085 [release-3.3] fix Home field fault in appstore application (#5474)
fix appstore app home field

Co-authored-by: xiaoliu <978911210@qq.com>
2023-01-13 11:14:25 +08:00
KubeSphere CI Bot
658497aa0a [release-3.3] fix: ks-apiserver panic error: ServiceAccount's Secret index out of r… (#5472)
fix: ks-apiserver panic error: ServiceAccount's Secret index out of range

Co-authored-by: peng wu <2030047311@qq.com>
2023-01-13 11:14:17 +08:00
KubeSphere CI Bot
a47bf848df [release-3.3] Fix missing maintainers in helm apps (#5473)
fix missing maintainers in helm apps

Co-authored-by: qingwave <854222409@qq.com>
2023-01-13 11:07:17 +08:00
hongzhouzi
dbb3f04b9e Resolved Conflict [release-3.3] Fix failed to cache resources if group version not found #5408 (#5466)
Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
2023-01-12 18:45:17 +08:00
hongzhouzi
705ea4af40 Resolved Conflict [release-3.3] Fix id generate error in IPv6-only environment. #5419 (#5465)
Resolved Conflict [release-3.3] Fix id generate error in IPv6-only environment. #5459

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
Co-authored-by: isyes <isyes@foxmail.com>
2023-01-12 18:26:17 +08:00
KubeSphere CI Bot
366d1e16e4 [release-3.3] fix: concurrent map read and map write caused by reloading in ks-apiserver (#5464)
fix: concurrent map read and map write caused by reloading in ks-apiserver.

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
Co-authored-by: hongzhouzi <hongzhouzi@kubesphere.io>
2023-01-12 17:55:17 +08:00
hongzhouzi
690d5be824 Resolved Conflict [release-3.3] fix: Resolved some data out of sync after live-reload. #5458 (#5462)
Resolved Conflict [fix: Resolved some data out of sync after live-reload.]

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>

Signed-off-by: hongzhouzi <hongzhouzi@kubesphere.io>
2023-01-12 17:44:17 +08:00
KubeSphere CI Bot
c0419ddab5 [release-3.3] add dynamic options for cache (#5325)
* add dynamic options for cache

* fixed bugs based on unit-test

* add doc for cache

* make cache implements be private

* Change simpleCache name to InMemoryCache

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>

* Remove fake cache and replacing to in memory cache with default parameter

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>

Signed-off-by: Wenhao Zhou <wenhaozhou@yunify.com>
Co-authored-by: Wenhao Zhou <wenhaozhou@yunify.com>
2022-11-03 15:55:00 +08:00
KubeSphere CI Bot
80b0301f79 [release-3.3] Fix: globalrole has cluster management right can not manage cluster (#5334)
Fix: globalrole has permision of cluster management can not manage cluster

Co-authored-by: Wenhao Zhou <wenhaozhou@yunify.com>
2022-10-27 14:47:50 +08:00
27 changed files with 588 additions and 240 deletions

View File

@@ -242,4 +242,5 @@ func (s *KubeSphereControllerManagerOptions) MergeConfig(cfg *controllerconfig.C
s.MultiClusterOptions = cfg.MultiClusterOptions s.MultiClusterOptions = cfg.MultiClusterOptions
s.ServiceMeshOptions = cfg.ServiceMeshOptions s.ServiceMeshOptions = cfg.ServiceMeshOptions
s.GatewayOptions = cfg.GatewayOptions s.GatewayOptions = cfg.GatewayOptions
s.MonitoringOptions = cfg.MonitoringOptions
} }

View File

@@ -20,6 +20,9 @@ import (
"crypto/tls" "crypto/tls"
"flag" "flag"
"fmt" "fmt"
"net/http"
"strings"
"sync"
openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1" openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1"
"kubesphere.io/kubesphere/pkg/utils/clusterclient" "kubesphere.io/kubesphere/pkg/utils/clusterclient"
@@ -41,9 +44,6 @@ import (
auditingclient "kubesphere.io/kubesphere/pkg/simple/client/auditing/elasticsearch" auditingclient "kubesphere.io/kubesphere/pkg/simple/client/auditing/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/cache" "kubesphere.io/kubesphere/pkg/simple/client/cache"
"net/http"
"strings"
"kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins" "kubesphere.io/kubesphere/pkg/simple/client/devops/jenkins"
eventsclient "kubesphere.io/kubesphere/pkg/simple/client/events/elasticsearch" eventsclient "kubesphere.io/kubesphere/pkg/simple/client/events/elasticsearch"
"kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/client/k8s"
@@ -59,9 +59,8 @@ type ServerRunOptions struct {
ConfigFile string ConfigFile string
GenericServerRunOptions *genericoptions.ServerRunOptions GenericServerRunOptions *genericoptions.ServerRunOptions
*apiserverconfig.Config *apiserverconfig.Config
schemeOnce sync.Once
// DebugMode bool
DebugMode bool
// Enable gops or not. // Enable gops or not.
GOPSEnabled bool GOPSEnabled bool
@@ -71,6 +70,7 @@ func NewServerRunOptions() *ServerRunOptions {
s := &ServerRunOptions{ s := &ServerRunOptions{
GenericServerRunOptions: genericoptions.NewServerRunOptions(), GenericServerRunOptions: genericoptions.NewServerRunOptions(),
Config: apiserverconfig.New(), Config: apiserverconfig.New(),
schemeOnce: sync.Once{},
} }
return s return s
@@ -87,7 +87,6 @@ func (s *ServerRunOptions) Flags() (fss cliflag.NamedFlagSets) {
s.AuthorizationOptions.AddFlags(fss.FlagSet("authorization"), s.AuthorizationOptions) s.AuthorizationOptions.AddFlags(fss.FlagSet("authorization"), s.AuthorizationOptions)
s.DevopsOptions.AddFlags(fss.FlagSet("devops"), s.DevopsOptions) s.DevopsOptions.AddFlags(fss.FlagSet("devops"), s.DevopsOptions)
s.SonarQubeOptions.AddFlags(fss.FlagSet("sonarqube"), s.SonarQubeOptions) s.SonarQubeOptions.AddFlags(fss.FlagSet("sonarqube"), s.SonarQubeOptions)
s.RedisOptions.AddFlags(fss.FlagSet("redis"), s.RedisOptions)
s.S3Options.AddFlags(fss.FlagSet("s3"), s.S3Options) s.S3Options.AddFlags(fss.FlagSet("s3"), s.S3Options)
s.OpenPitrixOptions.AddFlags(fss.FlagSet("openpitrix"), s.OpenPitrixOptions) s.OpenPitrixOptions.AddFlags(fss.FlagSet("openpitrix"), s.OpenPitrixOptions)
s.NetworkOptions.AddFlags(fss.FlagSet("network"), s.NetworkOptions) s.NetworkOptions.AddFlags(fss.FlagSet("network"), s.NetworkOptions)
@@ -176,21 +175,23 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
apiServer.SonarClient = sonarqube.NewSonar(sonarClient.SonarQube()) apiServer.SonarClient = sonarqube.NewSonar(sonarClient.SonarQube())
} }
var cacheClient cache.Interface // If debug mode is on or CacheOptions is nil, will create a fake cache.
if s.RedisOptions != nil && len(s.RedisOptions.Host) != 0 { if s.CacheOptions.Type != "" {
if s.RedisOptions.Host == fakeInterface && s.DebugMode { if s.DebugMode {
apiServer.CacheClient = cache.NewSimpleCache() s.CacheOptions.Type = cache.DefaultCacheType
} else {
cacheClient, err = cache.NewRedisClient(s.RedisOptions, stopCh)
if err != nil {
return nil, fmt.Errorf("failed to connect to redis service, please check redis status, error: %v", err)
}
apiServer.CacheClient = cacheClient
} }
cacheClient, err := cache.New(s.CacheOptions, stopCh)
if err != nil {
return nil, fmt.Errorf("failed to create cache, error: %v", err)
}
apiServer.CacheClient = cacheClient
} else { } else {
klog.Warning("ks-apiserver starts without redis provided, it will use in memory cache. " + s.CacheOptions = &cache.Options{Type: cache.DefaultCacheType}
"This may cause inconsistencies when running ks-apiserver with multiple replicas.") // fake cache has no error to return
apiServer.CacheClient = cache.NewSimpleCache() cacheClient, _ := cache.New(s.CacheOptions, stopCh)
apiServer.CacheClient = cacheClient
klog.Warning("ks-apiserver starts without cache provided, it will use in memory cache. " +
"This may cause inconsistencies when running ks-apiserver with multiple replicas, and memory leak risk")
} }
if s.EventsOptions.Host != "" { if s.EventsOptions.Host != "" {
@@ -222,7 +223,7 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
apiServer.ClusterClient = cc apiServer.ClusterClient = cc
} }
apiServer.OpenpitrixClient = openpitrixv1.NewOpenpitrixClient(informerFactory, apiServer.KubernetesClient.KubeSphere(), s.OpenPitrixOptions, apiServer.ClusterClient, stopCh) apiServer.OpenpitrixClient = openpitrixv1.NewOpenpitrixClient(informerFactory, apiServer.KubernetesClient.KubeSphere(), s.OpenPitrixOptions, apiServer.ClusterClient)
server := &http.Server{ server := &http.Server{
Addr: fmt.Sprintf(":%d", s.GenericServerRunOptions.InsecurePort), Addr: fmt.Sprintf(":%d", s.GenericServerRunOptions.InsecurePort),
@@ -241,9 +242,11 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS
} }
sch := scheme.Scheme sch := scheme.Scheme
if err := apis.AddToScheme(sch); err != nil { s.schemeOnce.Do(func() {
klog.Fatalf("unable add APIs to scheme: %v", err) if err := apis.AddToScheme(sch); err != nil {
} klog.Fatalf("unable add APIs to scheme: %v", err)
}
})
apiServer.RuntimeCache, err = runtimecache.New(apiServer.KubernetesClient.Config(), runtimecache.Options{Scheme: sch}) apiServer.RuntimeCache, err = runtimecache.New(apiServer.KubernetesClient.Config(), runtimecache.Options{Scheme: sch})
if err != nil { if err != nil {

View File

@@ -394,6 +394,10 @@ func waitForCacheSync(discoveryClient discovery.DiscoveryInterface, sharedInform
return err return err
}) })
if err != nil { if err != nil {
if errors.IsNotFound(err) {
klog.Warningf("group version %s not exists in the cluster", groupVersion)
continue
}
return fmt.Errorf("failed to fetch group version resources %s: %s", groupVersion, err) return fmt.Errorf("failed to fetch group version resources %s: %s", groupVersion, err)
} }
for _, resourceName := range resourceNames { for _, resourceName := range resourceNames {

View File

@@ -141,6 +141,7 @@ func (b *Backend) sendEvents(events *v1alpha1.EventList) {
defer cancel() defer cancel()
stopCh := make(chan struct{}) stopCh := make(chan struct{})
skipReturnSender := false
send := func() { send := func() {
ctx, cancel := context.WithTimeout(context.Background(), b.getSenderTimeout) ctx, cancel := context.WithTimeout(context.Background(), b.getSenderTimeout)
@@ -149,6 +150,7 @@ func (b *Backend) sendEvents(events *v1alpha1.EventList) {
select { select {
case <-ctx.Done(): case <-ctx.Done():
klog.Error("Get auditing event sender timeout") klog.Error("Get auditing event sender timeout")
skipReturnSender = true
return return
case b.senderCh <- struct{}{}: case b.senderCh <- struct{}{}:
} }
@@ -182,7 +184,9 @@ func (b *Backend) sendEvents(events *v1alpha1.EventList) {
go send() go send()
defer func() { defer func() {
<-b.senderCh if !skipReturnSender {
<-b.senderCh
}
}() }()
select { select {

View File

@@ -160,7 +160,7 @@ type Config struct {
ServiceMeshOptions *servicemesh.Options `json:"servicemesh,omitempty" yaml:"servicemesh,omitempty" mapstructure:"servicemesh"` ServiceMeshOptions *servicemesh.Options `json:"servicemesh,omitempty" yaml:"servicemesh,omitempty" mapstructure:"servicemesh"`
NetworkOptions *network.Options `json:"network,omitempty" yaml:"network,omitempty" mapstructure:"network"` NetworkOptions *network.Options `json:"network,omitempty" yaml:"network,omitempty" mapstructure:"network"`
LdapOptions *ldap.Options `json:"-,omitempty" yaml:"ldap,omitempty" mapstructure:"ldap"` LdapOptions *ldap.Options `json:"-,omitempty" yaml:"ldap,omitempty" mapstructure:"ldap"`
RedisOptions *cache.Options `json:"redis,omitempty" yaml:"redis,omitempty" mapstructure:"redis"` CacheOptions *cache.Options `json:"cache,omitempty" yaml:"cache,omitempty" mapstructure:"cache"`
S3Options *s3.Options `json:"s3,omitempty" yaml:"s3,omitempty" mapstructure:"s3"` S3Options *s3.Options `json:"s3,omitempty" yaml:"s3,omitempty" mapstructure:"s3"`
OpenPitrixOptions *openpitrix.Options `json:"openpitrix,omitempty" yaml:"openpitrix,omitempty" mapstructure:"openpitrix"` OpenPitrixOptions *openpitrix.Options `json:"openpitrix,omitempty" yaml:"openpitrix,omitempty" mapstructure:"openpitrix"`
MonitoringOptions *prometheus.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"` MonitoringOptions *prometheus.Options `json:"monitoring,omitempty" yaml:"monitoring,omitempty" mapstructure:"monitoring"`
@@ -189,7 +189,7 @@ func New() *Config {
ServiceMeshOptions: servicemesh.NewServiceMeshOptions(), ServiceMeshOptions: servicemesh.NewServiceMeshOptions(),
NetworkOptions: network.NewNetworkOptions(), NetworkOptions: network.NewNetworkOptions(),
LdapOptions: ldap.NewOptions(), LdapOptions: ldap.NewOptions(),
RedisOptions: cache.NewRedisOptions(), CacheOptions: cache.NewCacheOptions(),
S3Options: s3.NewS3Options(), S3Options: s3.NewS3Options(),
OpenPitrixOptions: openpitrix.NewOptions(), OpenPitrixOptions: openpitrix.NewOptions(),
MonitoringOptions: prometheus.NewPrometheusOptions(), MonitoringOptions: prometheus.NewPrometheusOptions(),
@@ -292,8 +292,8 @@ func (conf *Config) ToMap() map[string]bool {
// Remove invalid options before serializing to json or yaml // Remove invalid options before serializing to json or yaml
func (conf *Config) stripEmptyOptions() { func (conf *Config) stripEmptyOptions() {
if conf.RedisOptions != nil && conf.RedisOptions.Host == "" { if conf.CacheOptions != nil && conf.CacheOptions.Type == "" {
conf.RedisOptions = nil conf.CacheOptions = nil
} }
if conf.DevopsOptions != nil && conf.DevopsOptions.Host == "" { if conf.DevopsOptions != nil && conf.DevopsOptions.Host == "" {

View File

@@ -88,11 +88,9 @@ func newTestConfig() (*Config, error) {
MaxCap: 100, MaxCap: 100,
PoolName: "ldap", PoolName: "ldap",
}, },
RedisOptions: &cache.Options{ CacheOptions: &cache.Options{
Host: "localhost", Type: "redis",
Port: 6379, Options: map[string]interface{}{},
Password: "KUBESPHERE_REDIS_PASSWORD",
DB: 0,
}, },
S3Options: &s3.Options{ S3Options: &s3.Options{
Endpoint: "http://minio.openpitrix-system.svc", Endpoint: "http://minio.openpitrix-system.svc",
@@ -236,9 +234,6 @@ func TestGet(t *testing.T) {
saveTestConfig(t, conf) saveTestConfig(t, conf)
defer cleanTestConfig(t) defer cleanTestConfig(t)
conf.RedisOptions.Password = "P@88w0rd"
os.Setenv("KUBESPHERE_REDIS_PASSWORD", "P@88w0rd")
conf2, err := TryLoadFromDisk() conf2, err := TryLoadFromDisk()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -251,7 +246,7 @@ func TestGet(t *testing.T) {
func TestStripEmptyOptions(t *testing.T) { func TestStripEmptyOptions(t *testing.T) {
var config Config var config Config
config.RedisOptions = &cache.Options{Host: ""} config.CacheOptions = &cache.Options{Type: ""}
config.DevopsOptions = &jenkins.Options{Host: ""} config.DevopsOptions = &jenkins.Options{Host: ""}
config.MonitoringOptions = &prometheus.Options{Endpoint: ""} config.MonitoringOptions = &prometheus.Options{Endpoint: ""}
config.SonarQubeOptions = &sonarqube.Options{Host: ""} config.SonarQubeOptions = &sonarqube.Options{Host: ""}
@@ -284,7 +279,7 @@ func TestStripEmptyOptions(t *testing.T) {
config.stripEmptyOptions() config.stripEmptyOptions()
if config.RedisOptions != nil || if config.CacheOptions != nil ||
config.DevopsOptions != nil || config.DevopsOptions != nil ||
config.MonitoringOptions != nil || config.MonitoringOptions != nil ||
config.SonarQubeOptions != nil || config.SonarQubeOptions != nil ||

View File

@@ -52,7 +52,7 @@ type openpitrixHandler struct {
openpitrix openpitrix.Interface openpitrix openpitrix.Interface
} }
func NewOpenpitrixClient(ksInformers informers.InformerFactory, ksClient versioned.Interface, option *openpitrixoptions.Options, cc clusterclient.ClusterClients, stopCh <-chan struct{}) openpitrix.Interface { func NewOpenpitrixClient(ksInformers informers.InformerFactory, ksClient versioned.Interface, option *openpitrixoptions.Options, cc clusterclient.ClusterClients) openpitrix.Interface {
var s3Client s3.Interface var s3Client s3.Interface
if option != nil && option.S3Options != nil && len(option.S3Options.Endpoint) != 0 { if option != nil && option.S3Options != nil && len(option.S3Options.Endpoint) != 0 {
var err error var err error
@@ -62,7 +62,7 @@ func NewOpenpitrixClient(ksInformers informers.InformerFactory, ksClient version
} }
} }
return openpitrix.NewOpenpitrixOperator(ksInformers, ksClient, s3Client, cc, stopCh) return openpitrix.NewOpenpitrixOperator(ksInformers, ksClient, s3Client, cc)
} }
func (h *openpitrixHandler) CreateRepo(req *restful.Request, resp *restful.Response) { func (h *openpitrixHandler) CreateRepo(req *restful.Request, resp *restful.Response) {

View File

@@ -48,15 +48,17 @@ func NewHandler(o *servicemesh.Options, client kubernetes.Interface, cache cache
if o != nil && o.KialiQueryHost != "" { if o != nil && o.KialiQueryHost != "" {
sa, err := client.CoreV1().ServiceAccounts(KubesphereNamespace).Get(context.TODO(), KubeSphereServiceAccount, metav1.GetOptions{}) sa, err := client.CoreV1().ServiceAccounts(KubesphereNamespace).Get(context.TODO(), KubeSphereServiceAccount, metav1.GetOptions{})
if err == nil { if err == nil {
secret, err := client.CoreV1().Secrets(KubesphereNamespace).Get(context.TODO(), sa.Secrets[0].Name, metav1.GetOptions{}) if len(sa.Secrets) > 0 {
if err == nil { secret, err := client.CoreV1().Secrets(KubesphereNamespace).Get(context.TODO(), sa.Secrets[0].Name, metav1.GetOptions{})
return &Handler{ if err == nil {
opt: o, return &Handler{
client: kiali.NewDefaultClient( opt: o,
cache, client: kiali.NewDefaultClient(
string(secret.Data["token"]), cache,
o.KialiQueryHost, string(secret.Data["token"]),
), o.KialiQueryHost,
),
}
} }
} }
klog.Warningf("get ServiceAccount's Secret failed %v", err) klog.Warningf("get ServiceAccount's Secret failed %v", err)

View File

@@ -16,6 +16,7 @@ package openpitrix
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/base64"
"errors" "errors"
"fmt" "fmt"
"sort" "sort"
@@ -104,7 +105,7 @@ func newApplicationOperator(cached reposcache.ReposCache, informers externalvers
} }
// save icon data and helm application // save icon data and helm application
func (c *applicationOperator) createApp(app *v1alpha1.HelmApplication, iconData []byte) (*v1alpha1.HelmApplication, error) { func (c *applicationOperator) createApp(app *v1alpha1.HelmApplication, iconData string) (*v1alpha1.HelmApplication, error) {
exists, err := c.getHelmAppByName(app.GetWorkspace(), app.GetTrueName()) exists, err := c.getHelmAppByName(app.GetWorkspace(), app.GetTrueName())
if err != nil { if err != nil {
return nil, err return nil, err
@@ -112,11 +113,18 @@ func (c *applicationOperator) createApp(app *v1alpha1.HelmApplication, iconData
if exists != nil { if exists != nil {
return nil, appItemExists return nil, appItemExists
} }
if strings.HasPrefix(iconData, "http://") || strings.HasPrefix(iconData, "https://") {
if len(iconData) != 0 { app.Spec.Icon = iconData
} else if len(iconData) != 0 {
// save icon attachment // save icon attachment
iconId := idutils.GetUuid(v1alpha1.HelmAttachmentPrefix) iconId := idutils.GetUuid(v1alpha1.HelmAttachmentPrefix)
err = c.backingStoreClient.Upload(iconId, iconId, bytes.NewBuffer(iconData), len(iconData)) decodeString, err := base64.StdEncoding.DecodeString(iconData)
if err != nil {
klog.Errorf("decodeString icon failed, error: %s", err)
return nil, err
}
err = c.backingStoreClient.Upload(iconId, iconId, bytes.NewBuffer(decodeString), len(iconData))
if err != nil { if err != nil {
klog.Errorf("save icon attachment failed, error: %s", err) klog.Errorf("save icon attachment failed, error: %s", err)
return nil, err return nil, err
@@ -168,6 +176,7 @@ func (c *applicationOperator) ValidatePackage(request *ValidatePackageRequest) (
result.VersionName = chrt.GetVersionName() result.VersionName = chrt.GetVersionName()
result.Description = chrt.GetDescription() result.Description = chrt.GetDescription()
result.URL = chrt.GetUrls() result.URL = chrt.GetUrls()
result.Icon = chrt.GetIcon()
} }
return result, nil return result, nil

View File

@@ -46,7 +46,7 @@ type openpitrixOperator struct {
CategoryInterface CategoryInterface
} }
func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient versioned.Interface, s3Client s3.Interface, cc clusterclient.ClusterClients, stopCh <-chan struct{}) Interface { func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient versioned.Interface, s3Client s3.Interface, cc clusterclient.ClusterClients) Interface {
klog.Infof("start helm repo informer") klog.Infof("start helm repo informer")
cachedReposData := reposcache.NewReposCache() cachedReposData := reposcache.NewReposCache()
helmReposInformer := ksInformers.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmRepos().Informer() helmReposInformer := ksInformers.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmRepos().Informer()

View File

@@ -288,7 +288,7 @@ type AppVersionReview struct {
type CreateAppRequest struct { type CreateAppRequest struct {
// app icon // app icon
Icon strfmt.Base64 `json:"icon,omitempty"` Icon string `json:"icon,omitempty"`
// isv // isv
Isv string `json:"isv,omitempty"` Isv string `json:"isv,omitempty"`
@@ -413,6 +413,8 @@ type ValidatePackageResponse struct {
// app version name.eg.[0.1.0] // app version name.eg.[0.1.0]
VersionName string `json:"version_name,omitempty"` VersionName string `json:"version_name,omitempty"`
Icon string `json:"icon,omitempty"`
} }
type CreateAppVersionRequest struct { type CreateAppVersionRequest struct {

View File

@@ -399,6 +399,7 @@ func convertAppVersion(in *v1alpha1.HelmApplicationVersion) *AppVersion {
if in.Spec.Metadata != nil { if in.Spec.Metadata != nil {
out.Description = in.Spec.Description out.Description = in.Spec.Description
out.Icon = in.Spec.Icon out.Icon = in.Spec.Icon
out.Home = in.Spec.Home
} }
// The field Maintainers and Sources were a string field, so I encode the helm field's maintainers and sources, // The field Maintainers and Sources were a string field, so I encode the helm field's maintainers and sources,

View File

@@ -17,6 +17,9 @@ limitations under the License.
package pod package pod
import ( import (
"fmt"
"strings"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@@ -31,7 +34,13 @@ const (
fieldNodeName = "nodeName" fieldNodeName = "nodeName"
fieldPVCName = "pvcName" fieldPVCName = "pvcName"
fieldServiceName = "serviceName" fieldServiceName = "serviceName"
fieldPhase = "phase"
fieldStatus = "status" fieldStatus = "status"
statusTypeWaitting = "Waiting"
statusTypeRunning = "Running"
statusTypeError = "Error"
statusTypeCompleted = "Completed"
) )
type podsGetter struct { type podsGetter struct {
@@ -90,6 +99,9 @@ func (p *podsGetter) filter(object runtime.Object, filter query.Filter) bool {
case fieldServiceName: case fieldServiceName:
return p.podBelongToService(pod, string(filter.Value)) return p.podBelongToService(pod, string(filter.Value))
case fieldStatus: case fieldStatus:
_, statusType := p.getPodStatus(pod)
return statusType == string(filter.Value)
case fieldPhase:
return string(pod.Status.Phase) == string(filter.Value) return string(pod.Status.Phase) == string(filter.Value)
default: default:
return v1alpha3.DefaultObjectMetaFilter(pod.ObjectMeta, filter) return v1alpha3.DefaultObjectMetaFilter(pod.ObjectMeta, filter)
@@ -117,3 +129,133 @@ func (p *podsGetter) podBelongToService(item *corev1.Pod, serviceName string) bo
} }
return true return true
} }
// getPodStatus refer to `kubectl get po` result.
// https://github.com/kubernetes/kubernetes/blob/45279654db87f4908911569c07afc42804f0e246/pkg/printers/internalversion/printers.go#L820-920
// podStatusPhase = []string("Pending", "Running","Succeeded","Failed","Unknown")
// podStatusReasons = []string{"Evicted", "NodeAffinity", "NodeLost", "Shutdown", "UnexpectedAdmissionError"}
// containerWaitingReasons = []string{"ContainerCreating", "CrashLoopBackOff", "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff", "CreateContainerError", "InvalidImageName"}
// containerTerminatedReasons = []string{"OOMKilled", "Completed", "Error", "ContainerCannotRun", "DeadlineExceeded", "Evicted"}
func (p *podsGetter) getPodStatus(pod *corev1.Pod) (string, string) {
reason := string(pod.Status.Phase)
if pod.Status.Reason != "" {
reason = pod.Status.Reason
}
/*
todo: upgrade k8s.io/api version
// If the Pod carries {type:PodScheduled, reason:WaitingForGates}, set reason to 'SchedulingGated'.
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1.PodScheduled && condition.Reason == corev1.PodReasonSchedulingGated {
reason = corev1.PodReasonSchedulingGated
}
}
*/
initializing := false
for i := range pod.Status.InitContainerStatuses {
container := pod.Status.InitContainerStatuses[i]
switch {
case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0:
continue
case container.State.Terminated != nil:
// initialization is failed
if len(container.State.Terminated.Reason) == 0 {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode)
}
} else {
reason = "Init:" + container.State.Terminated.Reason
}
initializing = true
case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing":
reason = "Init:" + container.State.Waiting.Reason
initializing = true
default:
reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers))
initializing = true
}
break
}
if !initializing {
hasRunning := false
for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- {
container := pod.Status.ContainerStatuses[i]
if container.State.Waiting != nil && container.State.Waiting.Reason != "" {
reason = container.State.Waiting.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason != "" {
reason = container.State.Terminated.Reason
} else if container.State.Terminated != nil && container.State.Terminated.Reason == "" {
if container.State.Terminated.Signal != 0 {
reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal)
} else {
reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode)
}
} else if container.Ready && container.State.Running != nil {
hasRunning = true
}
}
// change pod status back to "Running" if there is at least one container still reporting as "Running" status
if reason == "Completed" && hasRunning {
if hasPodReadyCondition(pod.Status.Conditions) {
reason = "Running"
} else {
reason = "NotReady"
}
}
}
if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" {
reason = "Unknown"
} else if pod.DeletionTimestamp != nil {
reason = "Terminating"
}
statusType := statusTypeWaitting
switch reason {
case "Running":
statusType = statusTypeRunning
case "Failed":
statusType = statusTypeError
case "Error":
statusType = statusTypeError
case "Completed":
statusType = statusTypeCompleted
case "Succeeded":
if isPodReadyConditionReason(pod.Status.Conditions, "PodCompleted") {
statusType = statusTypeCompleted
}
default:
if strings.HasPrefix(reason, "OutOf") {
statusType = statusTypeError
}
}
return reason, statusType
}
func hasPodReadyCondition(conditions []corev1.PodCondition) bool {
for _, condition := range conditions {
if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue {
return true
}
}
return false
}
func isPodReadyConditionReason(conditions []corev1.PodCondition, reason string) bool {
for _, condition := range conditions {
if condition.Type == corev1.PodReady && condition.Reason != reason {
return false
}
}
return true
}

View File

@@ -78,7 +78,7 @@ func TestListPods(t *testing.T) {
nil, nil,
}, },
{ {
"test status filter", "test phase filter",
"default", "default",
&query.Query{ &query.Query{
Pagination: &query.Pagination{ Pagination: &query.Pagination{
@@ -89,7 +89,7 @@ func TestListPods(t *testing.T) {
Ascending: false, Ascending: false,
Filters: map[query.Field]query.Value{ Filters: map[query.Field]query.Value{
query.FieldNamespace: query.Value("default"), query.FieldNamespace: query.Value("default"),
fieldStatus: query.Value(corev1.PodRunning), fieldPhase: query.Value(corev1.PodRunning),
}, },
}, },
&api.ListResult{ &api.ListResult{
@@ -163,6 +163,7 @@ var (
Phase: corev1.PodRunning, Phase: corev1.PodRunning,
}, },
} }
pods = []interface{}{foo1, foo2, foo3, foo4, foo5} pods = []interface{}{foo1, foo2, foo3, foo4, foo5}
) )

View File

@@ -1257,7 +1257,7 @@ func (t *tenantOperator) checkClusterPermission(user user.Info, clusters []strin
deleteCluster := authorizer.AttributesRecord{ deleteCluster := authorizer.AttributesRecord{
User: user, User: user,
Verb: authorizer.VerbDelete, Verb: authorizer.VerbDelete,
APIGroup: clusterv1alpha1.SchemeGroupVersion.Version, APIGroup: clusterv1alpha1.SchemeGroupVersion.Group,
APIVersion: clusterv1alpha1.SchemeGroupVersion.Version, APIVersion: clusterv1alpha1.SchemeGroupVersion.Version,
Resource: clusterv1alpha1.ResourcesPluralCluster, Resource: clusterv1alpha1.ResourcesPluralCluster,
Cluster: clusterName, Cluster: clusterName,

View File

@@ -16,7 +16,17 @@ limitations under the License.
package cache package cache
import "time" import (
"encoding/json"
"fmt"
"time"
"k8s.io/klog"
)
var (
cacheFactories = make(map[string]CacheFactory)
)
var NeverExpire = time.Duration(0) var NeverExpire = time.Duration(0)
@@ -39,3 +49,32 @@ type Interface interface {
// Expires updates object's expiration time, return err if key doesn't exist // Expires updates object's expiration time, return err if key doesn't exist
Expire(key string, duration time.Duration) error Expire(key string, duration time.Duration) error
} }
// DynamicOptions the options of the cache. For redis, options key can be "host", "port", "db", "password".
// For InMemoryCache, options key can be "cleanupperiod"
type DynamicOptions map[string]interface{}
func (o DynamicOptions) MarshalJSON() ([]byte, error) {
data, err := json.Marshal(o)
return data, err
}
func RegisterCacheFactory(factory CacheFactory) {
cacheFactories[factory.Type()] = factory
}
func New(option *Options, stopCh <-chan struct{}) (Interface, error) {
if cacheFactories[option.Type] == nil {
err := fmt.Errorf("cache with type %s is not supported", option.Type)
klog.Error(err)
return nil, err
}
cache, err := cacheFactories[option.Type].Create(option.Options, stopCh)
if err != nil {
klog.Errorf("failed to create cache, error: %v", err)
return nil, err
}
return cache, nil
}

8
pkg/simple/client/cache/factory.go vendored Normal file
View File

@@ -0,0 +1,8 @@
package cache
type CacheFactory interface {
// Type unique type of the cache
Type() string
// Create relevant caches by type
Create(options DynamicOptions, stopCh <-chan struct{}) (Interface, error)
}

View File

@@ -0,0 +1,200 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"regexp"
"strings"
"time"
"github.com/mitchellh/mapstructure"
"k8s.io/apimachinery/pkg/util/wait"
"kubesphere.io/kubesphere/pkg/server/errors"
)
var ErrNoSuchKey = errors.New("no such key")
const (
typeInMemoryCache = "InMemoryCache"
DefaultCacheType = typeInMemoryCache
defaultCleanupPeriod = 2 * time.Hour
)
type simpleObject struct {
value string
neverExpire bool
expiredAt time.Time
}
func (so *simpleObject) IsExpired() bool {
if so.neverExpire {
return false
}
if time.Now().After(so.expiredAt) {
return true
}
return false
}
// InMemoryCacheOptions used to create inMemoryCache in memory.
// CleanupPeriod specifies cleans up expired token every period.
// Note the SimpleCache cannot be used in multi-replicas apiserver,
// which will lead to data inconsistency.
type InMemoryCacheOptions struct {
CleanupPeriod time.Duration `json:"cleanupPeriod" yaml:"cleanupPeriod" mapstructure:"cleanupperiod"`
}
// imMemoryCache implements cache.Interface use memory objects, it should be used only for testing
type inMemoryCache struct {
store map[string]simpleObject
}
func NewInMemoryCache(options *InMemoryCacheOptions, stopCh <-chan struct{}) (Interface, error) {
var cleanupPeriod time.Duration
cache := &inMemoryCache{
store: make(map[string]simpleObject),
}
if options == nil || options.CleanupPeriod == 0 {
cleanupPeriod = defaultCleanupPeriod
} else {
cleanupPeriod = options.CleanupPeriod
}
go wait.Until(cache.cleanInvalidToken, cleanupPeriod, stopCh)
return cache, nil
}
func (s *inMemoryCache) cleanInvalidToken() {
for k, v := range s.store {
if v.IsExpired() {
delete(s.store, k)
}
}
}
func (s *inMemoryCache) Keys(pattern string) ([]string, error) {
// There is a little difference between go regexp and redis key pattern
// In redis, * means any character, while in go . means match everything.
pattern = strings.Replace(pattern, "*", ".", -1)
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
var keys []string
for k := range s.store {
if re.MatchString(k) {
keys = append(keys, k)
}
}
return keys, nil
}
func (s *inMemoryCache) Set(key string, value string, duration time.Duration) error {
sobject := simpleObject{
value: value,
neverExpire: false,
expiredAt: time.Now().Add(duration),
}
if duration == NeverExpire {
sobject.neverExpire = true
}
s.store[key] = sobject
return nil
}
func (s *inMemoryCache) Del(keys ...string) error {
for _, key := range keys {
delete(s.store, key)
}
return nil
}
func (s *inMemoryCache) Get(key string) (string, error) {
if sobject, ok := s.store[key]; ok {
if sobject.neverExpire || time.Now().Before(sobject.expiredAt) {
return sobject.value, nil
}
}
return "", ErrNoSuchKey
}
func (s *inMemoryCache) Exists(keys ...string) (bool, error) {
for _, key := range keys {
if _, ok := s.store[key]; !ok {
return false, nil
}
}
return true, nil
}
func (s *inMemoryCache) Expire(key string, duration time.Duration) error {
value, err := s.Get(key)
if err != nil {
return err
}
sobject := simpleObject{
value: value,
neverExpire: false,
expiredAt: time.Now().Add(duration),
}
if duration == NeverExpire {
sobject.neverExpire = true
}
s.store[key] = sobject
return nil
}
type inMemoryCacheFactory struct {
}
func (sf *inMemoryCacheFactory) Type() string {
return typeInMemoryCache
}
func (sf *inMemoryCacheFactory) Create(options DynamicOptions, stopCh <-chan struct{}) (Interface, error) {
var sOptions InMemoryCacheOptions
decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &sOptions,
})
if err != nil {
return nil, err
}
if err := decoder.Decode(options); err != nil {
return nil, err
}
return NewInMemoryCache(&sOptions, stopCh)
}
func init() {
RegisterCacheFactory(&inMemoryCacheFactory{})
}

View File

@@ -102,7 +102,7 @@ func TestDeleteAndExpireCache(t *testing.T) {
} }
for _, testCase := range testCases { for _, testCase := range testCases {
cacheClient := NewSimpleCache() cacheClient, _ := NewInMemoryCache(nil, nil)
t.Run(testCase.description, func(t *testing.T) { t.Run(testCase.description, func(t *testing.T) {
err := load(cacheClient, dataSet) err := load(cacheClient, dataSet)

View File

@@ -18,25 +18,19 @@ package cache
import ( import (
"fmt" "fmt"
"github.com/spf13/pflag"
) )
type Options struct { type Options struct {
Host string `json:"host" yaml:"host"` Type string `json:"type"`
Port int `json:"port" yaml:"port"` Options DynamicOptions `json:"options"`
Password string `json:"password" yaml:"password"`
DB int `json:"db" yaml:"db"`
} }
// NewRedisOptions returns options points to nowhere, // NewCacheOptions returns options points to nowhere,
// because redis is not required for some components // because redis is not required for some components
func NewRedisOptions() *Options { func NewCacheOptions() *Options {
return &Options{ return &Options{
Host: "", Type: "",
Port: 0, Options: map[string]interface{}{},
Password: "",
DB: 0,
} }
} }
@@ -44,20 +38,9 @@ func NewRedisOptions() *Options {
func (r *Options) Validate() []error { func (r *Options) Validate() []error {
errors := make([]error, 0) errors := make([]error, 0)
if r.Port == 0 { if r.Type == "" {
errors = append(errors, fmt.Errorf("invalid service port number")) errors = append(errors, fmt.Errorf("invalid cache type"))
} }
return errors return errors
} }
// AddFlags add option flags to command line flags,
// if redis-host left empty, the following options will be ignored.
func (r *Options) AddFlags(fs *pflag.FlagSet, s *Options) {
fs.StringVar(&r.Host, "redis-host", s.Host, "Redis connection URL. If left blank, means redis is unnecessary, "+
"redis will be disabled.")
fs.IntVar(&r.Port, "redis-port", s.Port, "")
fs.StringVar(&r.Password, "redis-password", s.Password, "")
fs.IntVar(&r.DB, "redis-db", s.DB, "")
}

View File

@@ -17,19 +17,31 @@ limitations under the License.
package cache package cache
import ( import (
"errors"
"fmt" "fmt"
"time" "time"
"github.com/go-redis/redis" "github.com/go-redis/redis"
"github.com/mitchellh/mapstructure"
"k8s.io/klog" "k8s.io/klog"
) )
type Client struct { const typeRedis = "redis"
type redisClient struct {
client *redis.Client client *redis.Client
} }
func NewRedisClient(option *Options, stopCh <-chan struct{}) (Interface, error) { // redisOptions used to create a redis client.
var r Client type redisOptions struct {
Host string `json:"host" yaml:"host" mapstructure:"host"`
Port int `json:"port" yaml:"port" mapstructure:"port"`
Password string `json:"password" yaml:"password" mapstructure:"password"`
DB int `json:"db" yaml:"db" mapstructure:"db"`
}
func NewRedisClient(option *redisOptions, stopCh <-chan struct{}) (Interface, error) {
var r redisClient
redisOptions := &redis.Options{ redisOptions := &redis.Options{
Addr: fmt.Sprintf("%s:%d", option.Host, option.Port), Addr: fmt.Sprintf("%s:%d", option.Host, option.Port),
@@ -61,23 +73,23 @@ func NewRedisClient(option *Options, stopCh <-chan struct{}) (Interface, error)
return &r, nil return &r, nil
} }
func (r *Client) Get(key string) (string, error) { func (r *redisClient) Get(key string) (string, error) {
return r.client.Get(key).Result() return r.client.Get(key).Result()
} }
func (r *Client) Keys(pattern string) ([]string, error) { func (r *redisClient) Keys(pattern string) ([]string, error) {
return r.client.Keys(pattern).Result() return r.client.Keys(pattern).Result()
} }
func (r *Client) Set(key string, value string, duration time.Duration) error { func (r *redisClient) Set(key string, value string, duration time.Duration) error {
return r.client.Set(key, value, duration).Err() return r.client.Set(key, value, duration).Err()
} }
func (r *Client) Del(keys ...string) error { func (r *redisClient) Del(keys ...string) error {
return r.client.Del(keys...).Err() return r.client.Del(keys...).Err()
} }
func (r *Client) Exists(keys ...string) (bool, error) { func (r *redisClient) Exists(keys ...string) (bool, error) {
existedKeys, err := r.client.Exists(keys...).Result() existedKeys, err := r.client.Exists(keys...).Result()
if err != nil { if err != nil {
return false, err return false, err
@@ -86,6 +98,34 @@ func (r *Client) Exists(keys ...string) (bool, error) {
return len(keys) == int(existedKeys), nil return len(keys) == int(existedKeys), nil
} }
func (r *Client) Expire(key string, duration time.Duration) error { func (r *redisClient) Expire(key string, duration time.Duration) error {
return r.client.Expire(key, duration).Err() return r.client.Expire(key, duration).Err()
} }
type redisFactory struct{}
func (rf *redisFactory) Type() string {
return typeRedis
}
func (rf *redisFactory) Create(options DynamicOptions, stopCh <-chan struct{}) (Interface, error) {
var rOptions redisOptions
if err := mapstructure.Decode(options, &rOptions); err != nil {
return nil, err
}
if rOptions.Port == 0 {
return nil, errors.New("invalid service port number")
}
if len(rOptions.Host) == 0 {
return nil, errors.New("invalid service host")
}
client, err := NewRedisClient(&rOptions, stopCh)
if err != nil {
return nil, err
}
return client, nil
}
func init() {
RegisterCacheFactory(&redisFactory{})
}

View File

@@ -1,123 +0,0 @@
/*
Copyright 2019 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"regexp"
"strings"
"time"
"kubesphere.io/kubesphere/pkg/server/errors"
)
var ErrNoSuchKey = errors.New("no such key")
type simpleObject struct {
value string
neverExpire bool
expiredAt time.Time
}
// SimpleCache implements cache.Interface use memory objects, it should be used only for testing
type simpleCache struct {
store map[string]simpleObject
}
func NewSimpleCache() Interface {
return &simpleCache{store: make(map[string]simpleObject)}
}
func (s *simpleCache) Keys(pattern string) ([]string, error) {
// There is a little difference between go regexp and redis key pattern
// In redis, * means any character, while in go . means match everything.
pattern = strings.Replace(pattern, "*", ".", -1)
re, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
var keys []string
for k := range s.store {
if re.MatchString(k) {
keys = append(keys, k)
}
}
return keys, nil
}
func (s *simpleCache) Set(key string, value string, duration time.Duration) error {
sobject := simpleObject{
value: value,
neverExpire: false,
expiredAt: time.Now().Add(duration),
}
if duration == NeverExpire {
sobject.neverExpire = true
}
s.store[key] = sobject
return nil
}
func (s *simpleCache) Del(keys ...string) error {
for _, key := range keys {
delete(s.store, key)
}
return nil
}
func (s *simpleCache) Get(key string) (string, error) {
if sobject, ok := s.store[key]; ok {
if sobject.neverExpire || time.Now().Before(sobject.expiredAt) {
return sobject.value, nil
}
}
return "", ErrNoSuchKey
}
func (s *simpleCache) Exists(keys ...string) (bool, error) {
for _, key := range keys {
if _, ok := s.store[key]; !ok {
return false, nil
}
}
return true, nil
}
func (s *simpleCache) Expire(key string, duration time.Duration) error {
value, err := s.Get(key)
if err != nil {
return err
}
sobject := simpleObject{
value: value,
neverExpire: false,
expiredAt: time.Now().Add(duration),
}
if duration == NeverExpire {
sobject.neverExpire = true
}
s.store[key] = sobject
return nil
}

View File

@@ -38,6 +38,11 @@ func TestClient_Get(t *testing.T) {
type args struct { type args struct {
url string url string
} }
inMemoryCache, err := cache.NewInMemoryCache(nil, nil)
if err != nil {
t.Fatal(err)
}
token, _ := json.Marshal( token, _ := json.Marshal(
&TokenResponse{ &TokenResponse{
Username: "test", Username: "test",
@@ -92,7 +97,7 @@ func TestClient_Get(t *testing.T) {
name: "Token", name: "Token",
fields: fields{ fields: fields{
Strategy: AuthStrategyToken, Strategy: AuthStrategyToken,
cache: cache.NewSimpleCache(), cache: inMemoryCache,
client: &MockClient{ client: &MockClient{
TokenResult: token, TokenResult: token,
RequestResult: "fake", RequestResult: "fake",

View File

@@ -20,7 +20,6 @@ import (
"fmt" "fmt"
"sort" "sort"
"strings" "strings"
"sync"
"time" "time"
"github.com/go-ldap/ldap" "github.com/go-ldap/ldap"
@@ -63,8 +62,6 @@ type ldapInterfaceImpl struct {
groupSearchBase string groupSearchBase string
managerDN string managerDN string
managerPassword string managerPassword string
once sync.Once
} }
var _ Interface = &ldapInterfaceImpl{} var _ Interface = &ldapInterfaceImpl{}
@@ -95,7 +92,6 @@ func NewLdapClient(options *Options, stopCh <-chan struct{}) (Interface, error)
groupSearchBase: options.GroupSearchBase, groupSearchBase: options.GroupSearchBase,
managerDN: options.ManagerDN, managerDN: options.ManagerDN,
managerPassword: options.ManagerPassword, managerPassword: options.ManagerPassword,
once: sync.Once{},
} }
go func() { go func() {
@@ -103,9 +99,7 @@ func NewLdapClient(options *Options, stopCh <-chan struct{}) (Interface, error)
client.close() client.close()
}() }()
client.once.Do(func() { _ = client.createSearchBase()
_ = client.createSearchBase()
})
return client, nil return client, nil
} }

View File

@@ -74,6 +74,10 @@ func (h HelmVersionWrapper) GetKeywords() string {
return strings.Join(h.ChartVersion.Keywords, ",") return strings.Join(h.ChartVersion.Keywords, ",")
} }
func (h HelmVersionWrapper) GetRawKeywords() []string {
return h.ChartVersion.Keywords
}
func (h HelmVersionWrapper) GetRawMaintainers() []*v1alpha1.Maintainer { func (h HelmVersionWrapper) GetRawMaintainers() []*v1alpha1.Maintainer {
mt := make([]*v1alpha1.Maintainer, 0, len(h.Maintainers)) mt := make([]*v1alpha1.Maintainer, 0, len(h.Maintainers))
for _, value := range h.Maintainers { for _, value := range h.Maintainers {

View File

@@ -28,7 +28,6 @@ import (
) )
var sf *sonyflake.Sonyflake var sf *sonyflake.Sonyflake
var upperMachineID uint16
func init() { func init() {
var st sonyflake.Settings var st sonyflake.Settings
@@ -37,11 +36,18 @@ func init() {
sf = sonyflake.NewSonyflake(sonyflake.Settings{ sf = sonyflake.NewSonyflake(sonyflake.Settings{
MachineID: lower16BitIP, MachineID: lower16BitIP,
}) })
upperMachineID, _ = upper16BitIP() }
if sf == nil {
sf = sonyflake.NewSonyflake(sonyflake.Settings{
MachineID: lower16BitIPv6,
})
} }
} }
func GetIntId() uint64 { func GetIntId() uint64 {
if sf == nil {
panic(errors.New("invalid snowflake instance"))
}
id, err := sf.NextID() id, err := sf.NextID()
if err != nil { if err != nil {
panic(err) panic(err)
@@ -93,15 +99,6 @@ func lower16BitIP() (uint16, error) {
return uint16(ip[2])<<8 + uint16(ip[3]), nil return uint16(ip[2])<<8 + uint16(ip[3]), nil
} }
func upper16BitIP() (uint16, error) {
ip, err := IPv4()
if err != nil {
return 0, err
}
return uint16(ip[0])<<8 + uint16(ip[1]), nil
}
func IPv4() (net.IP, error) { func IPv4() (net.IP, error) {
as, err := net.InterfaceAddrs() as, err := net.InterfaceAddrs()
if err != nil { if err != nil {
@@ -123,3 +120,34 @@ func IPv4() (net.IP, error) {
} }
return nil, errors.New("no ip address") return nil, errors.New("no ip address")
} }
func lower16BitIPv6() (uint16, error) {
ip, err := IPv6()
if err != nil {
return 0, err
}
return uint16(ip[14])<<8 + uint16(ip[15]), nil
}
func IPv6() (net.IP, error) {
as, err := net.InterfaceAddrs()
if err != nil {
return nil, err
}
for _, a := range as {
ipnet, ok := a.(*net.IPNet)
if !ok || ipnet.IP.IsLoopback() {
continue
}
if ipnet.IP.To4() != nil {
continue
}
ip := ipnet.IP.To16()
if ip == nil {
continue
}
return ip, nil
}
return nil, errors.New("no ip address")
}

View File

@@ -287,9 +287,15 @@ func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
}, },
Spec: v1alpha1.HelmApplicationVersionSpec{ Spec: v1alpha1.HelmApplicationVersionSpec{
Metadata: &v1alpha1.Metadata{ Metadata: &v1alpha1.Metadata{
Name: hvw.GetName(), Name: hvw.GetName(),
AppVersion: hvw.GetAppVersion(), AppVersion: hvw.GetAppVersion(),
Version: hvw.GetVersion(), Version: hvw.GetVersion(),
Description: hvw.GetDescription(),
Home: hvw.GetHome(),
Icon: hvw.GetIcon(),
Maintainers: hvw.GetRawMaintainers(),
Sources: hvw.GetRawSources(),
Keywords: hvw.GetRawKeywords(),
}, },
URLs: chartVersion.URLs, URLs: chartVersion.URLs,
Digest: chartVersion.Digest, Digest: chartVersion.Digest,