From 93ad572e193ae6f88735710eb591f32b0b88ca4b Mon Sep 17 00:00:00 2001 From: hongming Date: Mon, 1 Apr 2019 02:59:19 +0800 Subject: [PATCH] refine tenant api Signed-off-by: hongming --- Gopkg.lock | 187 ++- Gopkg.toml | 4 + Makefile | 4 +- cmd/controller-manager/app/controllers.go | 24 +- cmd/ks-apiserver/apiserver.go | 2 + cmd/ks-apiserver/app/options/options.go | 2 - cmd/ks-apiserver/app/server.go | 34 +- cmd/ks-iam/app/options/options.go | 21 +- cmd/ks-iam/app/server.go | 18 +- config/crds/tenant_v1alpha1_workspace.yaml | 50 + config/rbac/rbac_role.yaml | 178 +-- config/samples/tenant_v1alpha1_workspace.yaml | 8 + hack/docker_build.sh | 2 +- hack/docker_push.sh | 2 +- .../caddy-plugin/authenticate/authenticate.go | 5 +- .../authentication/authentication.go | 14 +- .../caddy-plugin/authentication/auto_load.go | 13 +- pkg/apis/addtoscheme_tenant_v1alpha1.go | 10 + pkg/apis/iam/v1alpha2/register.go | 186 ++- pkg/apis/logging/v1alpha2/register.go | 2 +- pkg/apis/resources/v1alpha2/register.go | 17 +- pkg/apis/tenant/group.go | 20 + pkg/apis/tenant/install/install.go | 33 + pkg/apis/tenant/v1alpha1/doc.go | 25 + pkg/apis/tenant/v1alpha1/register.go | 48 + .../tenant/v1alpha1/v1alpha1_suite_test.go | 57 + pkg/apis/tenant/v1alpha1/workspace_types.go | 67 + .../tenant/v1alpha1/workspace_types_test.go | 58 + .../tenant/v1alpha1/zz_generated.deepcopy.go | 120 ++ pkg/apis/tenant/v1alpha2/register.go | 107 ++ pkg/apis/terminal/install/install.go | 33 + pkg/apis/terminal/v1alpha2/register.go | 56 + pkg/apiserver/iam/am.go | 182 +-- pkg/apiserver/iam/auth.go | 32 +- pkg/apiserver/iam/groups.go | 101 +- pkg/apiserver/iam/{users.go => im.go} | 200 +-- .../iam/counter.go => apiserver/iam/types.go} | 36 - pkg/apiserver/iam/workspaces.go | 747 ++--------- pkg/apiserver/resources/application.go | 6 +- pkg/apiserver/resources/cluster_resources.go | 44 - .../{namespace_resources.go => resources.go} | 27 +- pkg/apiserver/routers/routers.go | 15 - pkg/apiserver/tenant/tenant.go | 264 ++++ pkg/apiserver/terminal/terminal.go | 56 + pkg/client/clientset/versioned/clientset.go | 28 +- .../versioned/fake/clientset_generated.go | 12 + .../clientset/versioned/fake/register.go | 10 +- .../clientset/versioned/scheme/register.go | 10 +- .../versioned/typed/tenant/v1alpha1/doc.go | 20 + .../typed/tenant/v1alpha1/fake/doc.go | 20 + .../v1alpha1/fake/fake_tenant_client.go | 40 + .../tenant/v1alpha1/fake/fake_workspace.go | 131 ++ .../tenant/v1alpha1/generated_expansion.go | 21 + .../typed/tenant/v1alpha1/tenant_client.go | 90 ++ .../typed/tenant/v1alpha1/workspace.go | 180 +++ .../informers/externalversions/factory.go | 26 +- .../informers/externalversions/generic.go | 11 +- .../externalversions/tenant/interface.go | 46 + .../tenant/v1alpha1/interface.go | 45 + .../tenant/v1alpha1/workspace.go | 88 ++ .../tenant/v1alpha1/expansion_generated.go | 23 + .../listers/tenant/v1alpha1/workspace.go | 65 + pkg/constants/constants.go | 17 +- pkg/controller/add_clusterrolebinding.go | 28 + pkg/controller/add_namespace.go | 28 + pkg/controller/add_workspace.go | 26 + .../clusterrolebinding_controller.go | 220 ++++ ...lusterrolebinding_controller_suite_test.go | 77 ++ .../clusterrolebinding_controller_test.go | 19 + .../namespace/namespace_controller.go | 436 +++++++ .../namespace_controller_suite_test.go | 77 ++ .../namespace/namespace_controller_test.go | 19 + .../workspace/workspace_controller.go | 534 ++++++++ .../workspace_controller_suite_test.go | 77 ++ .../workspace/workspace_controller_test.go | 19 + pkg/errors/errors.go | 4 + pkg/informers/informers.go | 11 + pkg/models/applications/applications.go | 417 ++----- pkg/models/iam/am.go | 780 ++++++------ pkg/models/iam/iam.go | 188 --- pkg/models/iam/im.go | 573 +++++---- pkg/models/iam/policy/policy.go | 440 ++----- pkg/models/log/constants.go | 2 +- pkg/models/log/logcrd.go | 2 +- pkg/models/metrics/metrics.go | 24 +- pkg/models/metrics/util.go | 23 +- pkg/models/quotas/quotas.go | 4 +- pkg/models/resources/clusterroles.go | 29 +- pkg/models/resources/configmaps.go | 18 +- pkg/models/resources/cronjobs.go | 22 +- pkg/models/resources/daemonsets.go | 22 +- pkg/models/resources/deployments.go | 22 +- pkg/models/resources/ingresses.go | 18 +- pkg/models/resources/jobs.go | 22 +- pkg/models/resources/namespaces.go | 20 +- pkg/models/resources/nodes.go | 20 +- .../resources/persistentvolumeclaims.go | 18 +- pkg/models/resources/pods.go | 70 +- pkg/models/resources/resources.go | 126 +- pkg/models/resources/roles.go | 18 +- pkg/models/resources/s2ibuilder.go | 18 +- pkg/models/resources/s2ibuildertemplate.go | 20 +- pkg/models/resources/s2irun.go | 20 +- pkg/models/resources/secrets.go | 18 +- pkg/models/resources/services.go | 18 +- pkg/models/resources/statefulsets.go | 10 +- pkg/models/resources/storageclasses.go | 20 +- pkg/models/resources/workspaces.go | 132 ++ pkg/models/routers/routers.go | 36 - pkg/models/status/status.go | 2 +- pkg/models/tenant/devops.go | 119 ++ pkg/models/tenant/namespaces.go | 134 ++ pkg/models/tenant/tenant.go | 103 ++ pkg/models/tenant/workspaces.go | 130 ++ pkg/models/terminal/terminal.go | 301 +++++ pkg/models/types.go | 49 +- pkg/models/workspaces/workspaces.go | 713 +---------- pkg/params/params.go | 21 +- pkg/simple/client/k8s/ksclient.go | 46 + pkg/simple/client/k8s/s2iclient.go | 19 +- .../client/kubesphere/kubesphereclient.go | 274 +++++ pkg/simple/client/ldap/ldapclient.go | 4 +- pkg/simple/client/mysql/dbclient.go | 1 - pkg/simple/client/openpitrix/applications.go | 291 +++++ .../client/openpitrix/openpitrixclient.go | 142 +++ pkg/simple/client/redis/redis.go | 9 + pkg/simple/controller/namespace/namespaces.go | 196 --- pkg/utils/{ => iputil}/iputils.go | 2 +- pkg/utils/{ => jsonutil}/jsonutils.go | 2 +- pkg/utils/{jwt => jwtutil}/jwt.go | 26 +- pkg/utils/k8sutil/k8sutil.go | 73 ++ pkg/utils/{ => sliceutil}/sliceutils.go | 2 +- pkg/utils/utils.go | 7 - tools/cmd/doc-gen/main.go | 2 + .../spdystream/LICENSE} | 49 +- .../github.com/docker/spdystream/LICENSE.docs | 425 +++++++ .../docker/spdystream/connection.go | 959 +++++++++++++++ .../github.com/docker/spdystream/handlers.go | 36 + .../github.com/docker/spdystream/priority.go | 98 ++ .../docker/spdystream/spdy/dictionary.go | 187 +++ .../github.com/docker/spdystream/spdy/read.go | 348 ++++++ .../docker/spdystream/spdy/types.go | 275 +++++ .../docker/spdystream/spdy/write.go | 318 +++++ vendor/github.com/docker/spdystream/stream.go | 327 +++++ vendor/github.com/docker/spdystream/utils.go | 16 + .../apis/devops/v1alpha1/s2ibuilder_types.go | 20 +- .../pkg/apis/devops/v1alpha1/s2irun_types.go | 4 +- .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../mitchellh/go-homedir/homedir.go | 167 --- .../spf13/cobra/cobra/cmd/license_agpl.go | 683 ----------- .../cobra/cobra/cmd/license_bsd_clause_2.go | 71 -- .../cobra/cobra/cmd/license_bsd_clause_3.go | 78 -- .../spf13/cobra/cobra/cmd/license_gpl_2.go | 376 ------ .../spf13/cobra/cobra/cmd/license_gpl_3.go | 711 ----------- .../spf13/cobra/cobra/cmd/license_lgpl.go | 186 --- .../spf13/cobra/cobra/cmd/license_mit.go | 63 - .../spf13/cobra/cobra/cmd/licenses.go | 118 -- .../x/tools/container/intsets/popcnt_amd64.go | 20 - .../x/tools/container/intsets/popcnt_amd64.s | 30 - .../x/tools/container/intsets/popcnt_gccgo.go | 9 - .../tools/container/intsets/popcnt_gccgo_c.c | 19 - .../tools/container/intsets/popcnt_generic.go | 33 - .../x/tools/container/intsets/sparse.go | 1091 ----------------- .../x/tools/container/intsets/util.go | 84 -- vendor/gopkg.in/igm/sockjs-go.v2/LICENSE | 26 + .../gopkg.in/igm/sockjs-go.v2/sockjs/doc.go | 5 + .../igm/sockjs-go.v2/sockjs/eventsource.go | 32 + .../gopkg.in/igm/sockjs-go.v2/sockjs/frame.go | 11 + .../igm/sockjs-go.v2/sockjs/handler.go | 133 ++ .../igm/sockjs-go.v2/sockjs/htmlfile.go | 58 + .../igm/sockjs-go.v2/sockjs/httpreceiver.go | 105 ++ .../igm/sockjs-go.v2/sockjs/iframe.go | 42 + .../gopkg.in/igm/sockjs-go.v2/sockjs/jsonp.go | 77 ++ .../igm/sockjs-go.v2/sockjs/mapping.go | 36 + .../igm/sockjs-go.v2/sockjs/options.go | 114 ++ .../igm/sockjs-go.v2/sockjs/session.go | 219 ++++ .../igm/sockjs-go.v2/sockjs/sockjs.go | 13 + .../gopkg.in/igm/sockjs-go.v2/sockjs/utils.go | 16 + .../gopkg.in/igm/sockjs-go.v2/sockjs/web.go | 47 + .../igm/sockjs-go.v2/sockjs/websocket.go | 97 ++ .../gopkg.in/igm/sockjs-go.v2/sockjs/xhr.go | 88 ++ .../apimachinery/pkg/util/httpstream/doc.go | 19 + .../pkg/util/httpstream/httpstream.go | 149 +++ .../pkg/util/httpstream/spdy/connection.go | 145 +++ .../pkg/util/httpstream/spdy/roundtripper.go | 335 +++++ .../pkg/util/httpstream/spdy/upgrade.go | 107 ++ .../pkg/util/remotecommand/constants.go | 53 + .../third_party/forked/golang/netutil/addr.go | 27 + .../client-go/tools/remotecommand/doc.go | 20 + .../tools/remotecommand/errorstream.go | 55 + .../client-go/tools/remotecommand/reader.go | 41 + .../tools/remotecommand/remotecommand.go | 142 +++ .../client-go/tools/remotecommand/resize.go | 33 + .../client-go/tools/remotecommand/v1.go | 160 +++ .../client-go/tools/remotecommand/v2.go | 195 +++ .../client-go/tools/remotecommand/v3.go | 111 ++ .../client-go/tools/remotecommand/v4.go | 119 ++ .../k8s.io/client-go/transport/spdy/spdy.go | 94 ++ vendor/k8s.io/client-go/util/exec/exec.go | 52 + .../third_party/forked/godep/license.go | 59 - .../controllerutil/controllerutil.go | 178 +++ .../pkg/controller/controllerutil/doc.go | 20 + 202 files changed, 13517 insertions(+), 7951 deletions(-) create mode 100644 config/crds/tenant_v1alpha1_workspace.yaml create mode 100644 config/samples/tenant_v1alpha1_workspace.yaml create mode 100644 pkg/apis/addtoscheme_tenant_v1alpha1.go create mode 100644 pkg/apis/tenant/group.go create mode 100644 pkg/apis/tenant/install/install.go create mode 100644 pkg/apis/tenant/v1alpha1/doc.go create mode 100644 pkg/apis/tenant/v1alpha1/register.go create mode 100644 pkg/apis/tenant/v1alpha1/v1alpha1_suite_test.go create mode 100644 pkg/apis/tenant/v1alpha1/workspace_types.go create mode 100644 pkg/apis/tenant/v1alpha1/workspace_types_test.go create mode 100644 pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go create mode 100644 pkg/apis/tenant/v1alpha2/register.go create mode 100644 pkg/apis/terminal/install/install.go create mode 100644 pkg/apis/terminal/v1alpha2/register.go rename pkg/apiserver/iam/{users.go => im.go} (52%) rename pkg/{models/iam/counter.go => apiserver/iam/types.go} (56%) delete mode 100644 pkg/apiserver/resources/cluster_resources.go rename pkg/apiserver/resources/{namespace_resources.go => resources.go} (59%) create mode 100644 pkg/apiserver/tenant/tenant.go create mode 100644 pkg/apiserver/terminal/terminal.go create mode 100644 pkg/client/clientset/versioned/typed/tenant/v1alpha1/doc.go create mode 100644 pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/doc.go create mode 100644 pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_tenant_client.go create mode 100644 pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_workspace.go create mode 100644 pkg/client/clientset/versioned/typed/tenant/v1alpha1/generated_expansion.go create mode 100644 pkg/client/clientset/versioned/typed/tenant/v1alpha1/tenant_client.go create mode 100644 pkg/client/clientset/versioned/typed/tenant/v1alpha1/workspace.go create mode 100644 pkg/client/informers/externalversions/tenant/interface.go create mode 100644 pkg/client/informers/externalversions/tenant/v1alpha1/interface.go create mode 100644 pkg/client/informers/externalversions/tenant/v1alpha1/workspace.go create mode 100644 pkg/client/listers/tenant/v1alpha1/expansion_generated.go create mode 100644 pkg/client/listers/tenant/v1alpha1/workspace.go create mode 100644 pkg/controller/add_clusterrolebinding.go create mode 100644 pkg/controller/add_namespace.go create mode 100644 pkg/controller/add_workspace.go create mode 100644 pkg/controller/clusterrolebinding/clusterrolebinding_controller.go create mode 100644 pkg/controller/clusterrolebinding/clusterrolebinding_controller_suite_test.go create mode 100644 pkg/controller/clusterrolebinding/clusterrolebinding_controller_test.go create mode 100644 pkg/controller/namespace/namespace_controller.go create mode 100644 pkg/controller/namespace/namespace_controller_suite_test.go create mode 100644 pkg/controller/namespace/namespace_controller_test.go create mode 100644 pkg/controller/workspace/workspace_controller.go create mode 100644 pkg/controller/workspace/workspace_controller_suite_test.go create mode 100644 pkg/controller/workspace/workspace_controller_test.go delete mode 100644 pkg/models/iam/iam.go create mode 100644 pkg/models/resources/workspaces.go create mode 100644 pkg/models/tenant/devops.go create mode 100644 pkg/models/tenant/namespaces.go create mode 100644 pkg/models/tenant/tenant.go create mode 100644 pkg/models/tenant/workspaces.go create mode 100644 pkg/models/terminal/terminal.go create mode 100644 pkg/simple/client/k8s/ksclient.go create mode 100644 pkg/simple/client/kubesphere/kubesphereclient.go create mode 100644 pkg/simple/client/openpitrix/applications.go create mode 100644 pkg/simple/client/openpitrix/openpitrixclient.go delete mode 100644 pkg/simple/controller/namespace/namespaces.go rename pkg/utils/{ => iputil}/iputils.go (98%) rename pkg/utils/{ => jsonutil}/jsonutils.go (98%) rename pkg/utils/{jwt => jwtutil}/jwt.go (79%) create mode 100644 pkg/utils/k8sutil/k8sutil.go rename pkg/utils/{ => sliceutil}/sliceutils.go (98%) delete mode 100644 pkg/utils/utils.go rename vendor/github.com/{spf13/cobra/cobra/cmd/license_apache_2.go => docker/spdystream/LICENSE} (84%) create mode 100644 vendor/github.com/docker/spdystream/LICENSE.docs create mode 100644 vendor/github.com/docker/spdystream/connection.go create mode 100644 vendor/github.com/docker/spdystream/handlers.go create mode 100644 vendor/github.com/docker/spdystream/priority.go create mode 100644 vendor/github.com/docker/spdystream/spdy/dictionary.go create mode 100644 vendor/github.com/docker/spdystream/spdy/read.go create mode 100644 vendor/github.com/docker/spdystream/spdy/types.go create mode 100644 vendor/github.com/docker/spdystream/spdy/write.go create mode 100644 vendor/github.com/docker/spdystream/stream.go create mode 100644 vendor/github.com/docker/spdystream/utils.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go delete mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/licenses.go delete mode 100644 vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go delete mode 100644 vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s delete mode 100644 vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go delete mode 100644 vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c delete mode 100644 vendor/golang.org/x/tools/container/intsets/popcnt_generic.go delete mode 100644 vendor/golang.org/x/tools/container/intsets/sparse.go delete mode 100644 vendor/golang.org/x/tools/container/intsets/util.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/LICENSE create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/doc.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/eventsource.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/frame.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/handler.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/htmlfile.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/httpreceiver.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/iframe.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/jsonp.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/mapping.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/options.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/session.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/sockjs.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/utils.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/web.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/websocket.go create mode 100644 vendor/gopkg.in/igm/sockjs-go.v2/sockjs/xhr.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/reader.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/resize.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go create mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go create mode 100644 vendor/k8s.io/client-go/util/exec/exec.go delete mode 100644 vendor/k8s.io/kubernetes/third_party/forked/godep/license.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go diff --git a/Gopkg.lock b/Gopkg.lock index 5f5ed4df3..5793b1169 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -142,6 +142,15 @@ packages = ["."] revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" +[[projects]] + branch = "master" + name = "github.com/docker/spdystream" + packages = [ + ".", + "spdy", + ] + revision = "6480d4af844c189cf5dd913db24ddd339d3a4f85" + [[projects]] name = "github.com/dustin/go-humanize" packages = ["."] @@ -740,7 +749,7 @@ "api/prometheus/v1", "prometheus", "prometheus/internal", - "prometheus/promhttp" + "prometheus/promhttp", ] revision = "505eaef017263e299324067d40ca2c48f6a2cf50" version = "v0.9.2" @@ -756,7 +765,7 @@ packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model" + "model", ] revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" version = "v0.2.0" @@ -769,7 +778,7 @@ "internal/util", "iostats", "nfs", - "xfs" + "xfs", ] revision = "e56f2e22fc761e82a34aca553f6725e2aff4fe6c" @@ -778,7 +787,7 @@ packages = [ "modfile", "module", - "semver" + "semver", ] revision = "1cf9852c553c5b7da2d5a4a091129a7822fed0c9" version = "v1.2.2" @@ -793,7 +802,7 @@ name = "github.com/spf13/afero" packages = [ ".", - "mem" + "mem", ] revision = "f4711e4db9e9a1d3887343acb72b2bbfc2f686f5" version = "v1.2.1" @@ -820,7 +829,7 @@ name = "github.com/stretchr/testify" packages = [ "assert", - "mock" + "mock", ] revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" version = "v1.3.0" @@ -843,7 +852,7 @@ "lego", "log", "platform/wait", - "registration" + "registration", ] revision = "2952cdaebd4da7cd560e195343bdd3cb78a67643" version = "v2.3.0" @@ -868,7 +877,7 @@ "internal/bufferpool", "internal/color", "internal/exit", - "zapcore" + "zapcore", ] revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" version = "v1.9.1" @@ -883,7 +892,7 @@ "hkdf", "ocsp", "pbkdf2", - "ssh/terminal" + "ssh/terminal", ] revision = "a1f597ede03a7bef967a422b5b3a5bd08805a01e" @@ -906,7 +915,7 @@ "internal/socks", "ipv4", "ipv6", - "proxy" + "proxy", ] revision = "9f648a60d9775ef5c977e7669d1673a7a67bef33" @@ -918,7 +927,7 @@ "google", "internal", "jws", - "jwt" + "jwt", ] revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3" @@ -928,8 +937,9 @@ packages = [ "cpu", "unix", - "windows" + "windows", ] + revision = "fead79001313d15903fb4605b4a1b781532cd93e" [[projects]] @@ -961,7 +971,7 @@ "unicode/cldr", "unicode/norm", "unicode/rangetable", - "width" + "width", ] revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" @@ -976,7 +986,6 @@ branch = "master" name = "golang.org/x/tools" packages = [ - "container/intsets", "go/ast/astutil", "go/gcexportdata", "go/internal/cgo", @@ -988,7 +997,7 @@ "internal/fastwalk", "internal/gopathwalk", "internal/module", - "internal/semver" + "internal/semver", ] revision = "8b67d361bba210f5fbb3c1a0fc121e0847b10b57" @@ -1005,7 +1014,7 @@ "internal/modules", "internal/remote_api", "internal/urlfetch", - "urlfetch" + "urlfetch", ] revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" version = "v1.4.0" @@ -1023,6 +1032,12 @@ source = "https://github.com/fsnotify/fsnotify.git" version = "v1.4.7" +[[projects]] + name = "gopkg.in/igm/sockjs-go.v2" + packages = ["sockjs"] + revision = "d276e9ffe5cc5c271b81198cc77a2adf6c4482d2" + version = "v2.0.0" + [[projects]] name = "gopkg.in/inf.v0" packages = ["."] @@ -1041,7 +1056,7 @@ ".", "cipher", "json", - "jwt" + "jwt", ] revision = "628223f44a71f715d2881ea69afc795a1e9c01be" version = "v2.3.0" @@ -1093,7 +1108,7 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1" + "storage/v1beta1", ] revision = "05914d821849570fba9eacfb29466f2d8d3cd229" version = "kubernetes-1.13.1" @@ -1106,7 +1121,7 @@ "pkg/client/clientset/clientset", "pkg/client/clientset/clientset/scheme", "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", - "pkg/features" + "pkg/features", ] revision = "0fe22c71c47604641d9aa352c785b7912c200562" version = "kubernetes-1.13.1" @@ -1144,12 +1159,15 @@ "pkg/util/diff", "pkg/util/errors", "pkg/util/framer", + "pkg/util/httpstream", + "pkg/util/httpstream/spdy", "pkg/util/intstr", "pkg/util/json", "pkg/util/mergepatch", "pkg/util/naming", "pkg/util/net", "pkg/util/rand", + "pkg/util/remotecommand", "pkg/util/runtime", "pkg/util/sets", "pkg/util/strategicpatch", @@ -1161,7 +1179,8 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect" + "third_party/forked/golang/netutil", + "third_party/forked/golang/reflect", ] revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd" version = "kubernetes-1.13.1" @@ -1176,7 +1195,7 @@ "pkg/authorization/authorizer", "pkg/endpoints/request", "pkg/features", - "pkg/util/feature" + "pkg/util/feature", ] revision = "3ccfe8365421eb08e334b195786a2973460741d8" version = "kubernetes-1.13.1" @@ -1318,17 +1337,20 @@ "tools/pager", "tools/record", "tools/reference", + "tools/remotecommand", "tools/watch", "transport", + "transport/spdy", "util/buffer", "util/cert", "util/connrotation", + "util/exec", "util/flowcontrol", "util/homedir", "util/integer", "util/jsonpath", "util/retry", - "util/workqueue" + "util/workqueue", ] revision = "8d9ed539ba3134352c586810e749e58df4e94e4f" version = "kubernetes-1.13.1" @@ -1344,8 +1366,9 @@ "cmd/client-gen/generators/util", "cmd/client-gen/path", "cmd/client-gen/types", - "pkg/util" + "pkg/util", ] + pruneopts = "T" revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae" version = "kubernetes-1.13.1" @@ -1360,7 +1383,7 @@ "generator", "namer", "parser", - "types" + "types", ] revision = "b90029ef6cd877cb3f422d75b3a07707e3aac6b7" @@ -1407,7 +1430,7 @@ "pkg/util/net/sets", "pkg/util/parsers", "pkg/util/slice", - "pkg/util/taints" + "pkg/util/taints", ] revision = "c27b913fddd1a6c480c229191a087698aa92f0b1" version = "v1.13.4" @@ -1433,6 +1456,7 @@ "pkg/client/apiutil", "pkg/client/config", "pkg/controller", + "pkg/controller/controllerutil", "pkg/envtest", "pkg/envtest/printer", "pkg/event", @@ -1461,7 +1485,7 @@ "pkg/webhook/internal/cert/writer", "pkg/webhook/internal/cert/writer/atomic", "pkg/webhook/internal/metrics", - "pkg/webhook/types" + "pkg/webhook/types", ] revision = "12d98582e72927b6cd0123e2b4e819f9341ce62c" version = "v0.1.10" @@ -1478,7 +1502,7 @@ "pkg/rbac", "pkg/util", "pkg/webhook", - "pkg/webhook/internal" + "pkg/webhook/internal", ] revision = "fbf141159251d035089e7acdd5a343f8cec91b94" version = "v0.1.9" @@ -1488,7 +1512,7 @@ packages = [ "integration", "integration/addr", - "integration/internal" + "integration/internal", ] revision = "d348cb12705b516376e0c323bacca72b00a78425" version = "v0.1.1" @@ -1502,6 +1526,111 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "662b6da91343ff0a611e4487b8eef803b103b676f5b2a5db7ed8351846218fc5" + input-imports = [ + "github.com/dgrijalva/jwt-go", + "github.com/docker/docker/api/types", + "github.com/docker/docker/client", + "github.com/emicklei/go-restful", + "github.com/emicklei/go-restful-openapi", + "github.com/go-ldap/ldap", + "github.com/go-openapi/spec", + "github.com/go-redis/redis", + "github.com/go-sql-driver/mysql", + "github.com/golang/glog", + "github.com/google/uuid", + "github.com/jinzhu/gorm", + "github.com/json-iterator/go", + "github.com/kiali/kiali/config", + "github.com/kiali/kiali/handlers", + "github.com/knative/pkg/apis/istio/v1alpha3", + "github.com/knative/pkg/client/clientset/versioned", + "github.com/knative/pkg/client/informers/externalversions", + "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3", + "github.com/knative/pkg/client/listers/istio/v1alpha3", + "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1", + "github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1", + "github.com/kubesphere/s2ioperator/pkg/client/clientset/versioned", + "github.com/kubesphere/s2ioperator/pkg/client/informers/externalversions", + "github.com/mholt/caddy", + "github.com/mholt/caddy/caddy/caddymain", + "github.com/mholt/caddy/caddyhttp/httpserver", + "github.com/onsi/ginkgo", + "github.com/onsi/gomega", + "github.com/spf13/cobra", + "github.com/spf13/pflag", + "golang.org/x/net/context", + "gopkg.in/igm/sockjs-go.v2/sockjs", + "gopkg.in/yaml.v2", + "k8s.io/api/apps/v1", + "k8s.io/api/apps/v1beta2", + "k8s.io/api/batch/v1", + "k8s.io/api/batch/v1beta1", + "k8s.io/api/core/v1", + "k8s.io/api/extensions/v1beta1", + "k8s.io/api/policy/v1beta1", + "k8s.io/api/rbac/v1", + "k8s.io/api/storage/v1", + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", + "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/resource", + "k8s.io/apimachinery/pkg/apis/meta/v1", + "k8s.io/apimachinery/pkg/fields", + "k8s.io/apimachinery/pkg/labels", + "k8s.io/apimachinery/pkg/runtime", + "k8s.io/apimachinery/pkg/runtime/schema", + "k8s.io/apimachinery/pkg/runtime/serializer", + "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/json", + "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", + "k8s.io/apimachinery/pkg/util/wait", + "k8s.io/apimachinery/pkg/watch", + "k8s.io/apiserver/pkg/authentication/user", + "k8s.io/apiserver/pkg/authorization/authorizer", + "k8s.io/apiserver/pkg/endpoints/request", + "k8s.io/client-go/discovery", + "k8s.io/client-go/discovery/fake", + "k8s.io/client-go/informers", + "k8s.io/client-go/informers/apps/v1", + "k8s.io/client-go/informers/core/v1", + "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/listers/apps/v1", + "k8s.io/client-go/listers/core/v1", + "k8s.io/client-go/plugin/pkg/client/auth/gcp", + "k8s.io/client-go/rest", + "k8s.io/client-go/testing", + "k8s.io/client-go/tools/cache", + "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/record", + "k8s.io/client-go/tools/remotecommand", + "k8s.io/client-go/util/flowcontrol", + "k8s.io/client-go/util/workqueue", + "k8s.io/code-generator/cmd/client-gen", + "k8s.io/gengo/examples/deepcopy-gen/generators", + "k8s.io/gengo/examples/defaulter-gen/generators", + "k8s.io/klog", + "k8s.io/kubernetes/pkg/apis/core", + "k8s.io/kubernetes/pkg/controller", + "k8s.io/kubernetes/pkg/util/metrics", + "k8s.io/kubernetes/pkg/util/slice", + "sigs.k8s.io/application/pkg/controller/application", + "sigs.k8s.io/controller-runtime/pkg/client", + "sigs.k8s.io/controller-runtime/pkg/client/config", + "sigs.k8s.io/controller-runtime/pkg/controller", + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil", + "sigs.k8s.io/controller-runtime/pkg/envtest", + "sigs.k8s.io/controller-runtime/pkg/handler", + "sigs.k8s.io/controller-runtime/pkg/manager", + "sigs.k8s.io/controller-runtime/pkg/reconcile", + "sigs.k8s.io/controller-runtime/pkg/runtime/log", + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", + "sigs.k8s.io/controller-runtime/pkg/runtime/signals", + "sigs.k8s.io/controller-runtime/pkg/source", + "sigs.k8s.io/controller-tools/cmd/controller-gen", + "sigs.k8s.io/testing_frameworks/integration", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index b9c34c429..71ccc9078 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -116,3 +116,7 @@ required = [ [[constraint]] branch = "master" name = "github.com/knative/pkg" + +[[constraint]] + name = "gopkg.in/igm/sockjs-go.v2" + version = "2.0.0" diff --git a/Makefile b/Makefile index 8a3848243..23fa1c053 100644 --- a/Makefile +++ b/Makefile @@ -64,7 +64,7 @@ deploy: manifests # Generate DeepCopy to implement runtime.Object deepcopy: - ./vendor/k8s.io/code-generator/generate-groups.sh deepcopy,lister,informer,client kubesphere.io/kubesphere/pkg/client kubesphere.io/kubesphere/pkg/apis "servicemesh:v1alpha2" + ./vendor/k8s.io/code-generator/generate-groups.sh all kubesphere.io/kubesphere/pkg/client kubesphere.io/kubesphere/pkg/apis "servicemesh:v1alpha2 tenant:v1alpha1" # Generate code generate: @@ -79,7 +79,7 @@ docker-build: all # Run tests test: generate fmt vet - export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=1m; go test ./pkg/... ./cmd/... -coverprofile cover.out + go test ./pkg/... ./cmd/... -coverprofile cover.out .PHONY: clean clean: diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go index a93a841d8..51ea4ef4b 100644 --- a/cmd/controller-manager/app/controllers.go +++ b/cmd/controller-manager/app/controllers.go @@ -1,3 +1,20 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ package app import ( @@ -6,7 +23,6 @@ import ( "k8s.io/client-go/rest" "kubesphere.io/kubesphere/pkg/controller/destinationrule" "kubesphere.io/kubesphere/pkg/controller/virtualservice" - "kubesphere.io/kubesphere/pkg/simple/controller/namespace" "sigs.k8s.io/controller-runtime/pkg/manager" "time" @@ -60,11 +76,6 @@ func AddControllers(mgr manager.Manager, cfg *rest.Config, stopCh <-chan struct{ kubeClient, istioclient) - nsController := namespace.NewNamespaceController(kubeClient, - informerFactory.Core().V1().Namespaces(), - informerFactory.Rbac().V1().Roles(), - ) - servicemeshinformer.Start(stopCh) istioInformer.Start(stopCh) informerFactory.Start(stopCh) @@ -72,7 +83,6 @@ func AddControllers(mgr manager.Manager, cfg *rest.Config, stopCh <-chan struct{ controllers := map[string]manager.Runnable{ "virtualservice-controller": vsController, "destinationrule-controller": drController, - "namespace-controller": nsController, } for name, ctrl := range controllers { diff --git a/cmd/ks-apiserver/apiserver.go b/cmd/ks-apiserver/apiserver.go index 10577b767..2ac59e2e2 100644 --- a/cmd/ks-apiserver/apiserver.go +++ b/cmd/ks-apiserver/apiserver.go @@ -27,6 +27,8 @@ import ( _ "kubesphere.io/kubesphere/pkg/apis/operations/install" _ "kubesphere.io/kubesphere/pkg/apis/resources/install" _ "kubesphere.io/kubesphere/pkg/apis/servicemesh/metrics/install" + _ "kubesphere.io/kubesphere/pkg/apis/tenant/install" + _ "kubesphere.io/kubesphere/pkg/apis/terminal/install" ) func main() { diff --git a/cmd/ks-apiserver/app/options/options.go b/cmd/ks-apiserver/app/options/options.go index 62e41ac1c..c8a958b34 100644 --- a/cmd/ks-apiserver/app/options/options.go +++ b/cmd/ks-apiserver/app/options/options.go @@ -29,6 +29,4 @@ func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { s.GenericServerRunOptions.AddFlags(fs) fs.StringVar(&s.IstioPilotServiceURL, "istio-pilot-service-url", "http://istio-pilot.istio-system.svc:8080/version", "istio pilot discovery service url") - fs.StringVar(&s.OpenPitrixServer, "openpitrix-server", "http://openpitrix-api-gateway.openpitrix-system.svc", "openpitrix server") - fs.StringVar(&s.OpenPitrixProxyToken, "openpitrix-proxy-token", "", "openpitrix proxy token") } diff --git a/cmd/ks-apiserver/app/server.go b/cmd/ks-apiserver/app/server.go index b4d1ba45e..a58f36b13 100644 --- a/cmd/ks-apiserver/app/server.go +++ b/cmd/ks-apiserver/app/server.go @@ -29,8 +29,10 @@ import ( "kubesphere.io/kubesphere/pkg/apiserver/runtime" "kubesphere.io/kubesphere/pkg/filter" "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models" logging "kubesphere.io/kubesphere/pkg/models/log" "kubesphere.io/kubesphere/pkg/signals" + "kubesphere.io/kubesphere/pkg/simple/client/mysql" "log" "net/http" ) @@ -70,28 +72,43 @@ func Run(s *options.ServerRunOptions) error { container := runtime.Container container.Filter(filter.Logging) - log.Printf("Server listening on %d.", s.GenericServerRunOptions.InsecurePort) - for _, webservice := range container.RegisteredWebServices() { for _, route := range webservice.Routes() { - log.Printf(route.Path) + log.Println(route.Method, route.Path) } } initializeESClientConfig() initializeKialiConfig(s) + err = initializeDatabase() + + if err != nil { + return err + } if s.GenericServerRunOptions.InsecurePort != 0 { + log.Printf("Server listening on %d.", s.GenericServerRunOptions.InsecurePort) err = http.ListenAndServe(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.InsecurePort), container) } if s.GenericServerRunOptions.SecurePort != 0 && len(s.GenericServerRunOptions.TlsCertFile) > 0 && len(s.GenericServerRunOptions.TlsPrivateKey) > 0 { + log.Printf("Server listening on %d.", s.GenericServerRunOptions.SecurePort) err = http.ListenAndServeTLS(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.SecurePort), s.GenericServerRunOptions.TlsCertFile, s.GenericServerRunOptions.TlsPrivateKey, container) } return err } +func initializeDatabase() error { + db := mysql.Client() + if !db.HasTable(&models.WorkspaceDPBinding{}) { + if err := db.CreateTable(&models.WorkspaceDPBinding{}).Error; err != nil { + return err + } + } + return nil +} + func initializeKialiConfig(s *options.ServerRunOptions) { // Initialize kiali config config := kconfig.NewConfig() @@ -114,7 +131,7 @@ func initializeKialiConfig(s *options.ServerRunOptions) { func initializeESClientConfig() { // List all outputs - outputs,err := logging.GetFluentbitOutputFromConfigMap() + outputs, err := logging.GetFluentbitOutputFromConfigMap() if err != nil { glog.Errorln(err) return @@ -153,9 +170,11 @@ func waitForResourceSync() { informerFactory.Apps().V1().StatefulSets().Lister() informerFactory.Apps().V1().Deployments().Lister() informerFactory.Apps().V1().DaemonSets().Lister() + informerFactory.Apps().V1().ReplicaSets().Lister() informerFactory.Batch().V1().Jobs().Lister() informerFactory.Batch().V1beta1().CronJobs().Lister() + informerFactory.Extensions().V1beta1().Ingresses().Lister() informerFactory.Start(stopChan) informerFactory.WaitForCacheSync(stopChan) @@ -167,5 +186,12 @@ func waitForResourceSync() { s2iInformerFactory.Start(stopChan) s2iInformerFactory.WaitForCacheSync(stopChan) + + ksInformerFactory := informers.KsSharedInformerFactory() + ksInformerFactory.Tenant().V1alpha1().Workspaces().Lister() + + ksInformerFactory.Start(stopChan) + ksInformerFactory.WaitForCacheSync(stopChan) + log.Println("resources sync success") } diff --git a/cmd/ks-iam/app/options/options.go b/cmd/ks-iam/app/options/options.go index 4fd808f7c..9de464ec6 100644 --- a/cmd/ks-iam/app/options/options.go +++ b/cmd/ks-iam/app/options/options.go @@ -1,3 +1,20 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ package options import ( @@ -10,6 +27,7 @@ type ServerRunOptions struct { AdminEmail string AdminPassword string TokenExpireTime string + JWTSecret string } func NewServerRunOptions() *ServerRunOptions { @@ -22,6 +40,7 @@ func NewServerRunOptions() *ServerRunOptions { func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.AdminEmail, "admin-email", "admin@kubesphere.io", "default administrator's email") fs.StringVar(&s.AdminPassword, "admin-password", "passw0rd", "default administrator's password") - fs.StringVar(&s.TokenExpireTime, "token-expire-time", "24h", "token expire time") + fs.StringVar(&s.TokenExpireTime, "token-expire-time", "2h", "token expire time,valid time units are \"ns\",\"us\",\"ms\",\"s\",\"m\",\"h\"") + fs.StringVar(&s.JWTSecret, "jwt-secret", "", "jwt secret") s.GenericServerRunOptions.AddFlags(fs) } diff --git a/cmd/ks-iam/app/server.go b/cmd/ks-iam/app/server.go index 24a9de24d..29d7deab8 100644 --- a/cmd/ks-iam/app/server.go +++ b/cmd/ks-iam/app/server.go @@ -29,6 +29,7 @@ import ( "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/models/iam" "kubesphere.io/kubesphere/pkg/signals" + "kubesphere.io/kubesphere/pkg/utils/jwtutil" "log" "net/http" "time" @@ -54,14 +55,12 @@ cluster's shared state through which all other components interact.`, } func Run(s *options.ServerRunOptions) error { - pflag.VisitAll(func(flag *pflag.Flag) { log.Printf("FLAG: --%s=%q", flag.Name, flag.Value) }) var err error - expireTime, err := time.ParseDuration(s.TokenExpireTime) if err != nil { @@ -69,6 +68,7 @@ func Run(s *options.ServerRunOptions) error { } err = iam.Init(s.AdminEmail, s.AdminPassword, expireTime) + jwtutil.Setup(s.JWTSecret) if err != nil { return err @@ -79,11 +79,19 @@ func Run(s *options.ServerRunOptions) error { container := runtime.Container container.Filter(filter.Logging) + for _, webservice := range container.RegisteredWebServices() { + for _, route := range webservice.Routes() { + log.Println(route.Method, route.Path) + } + } + if s.GenericServerRunOptions.InsecurePort != 0 { + log.Printf("Server listening on %d.", s.GenericServerRunOptions.InsecurePort) err = http.ListenAndServe(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.InsecurePort), container) } if s.GenericServerRunOptions.SecurePort != 0 && len(s.GenericServerRunOptions.TlsCertFile) > 0 && len(s.GenericServerRunOptions.TlsPrivateKey) > 0 { + log.Printf("Server listening on %d.", s.GenericServerRunOptions.SecurePort) err = http.ListenAndServeTLS(fmt.Sprintf("%s:%d", s.GenericServerRunOptions.BindAddress, s.GenericServerRunOptions.SecurePort), s.GenericServerRunOptions.TlsCertFile, s.GenericServerRunOptions.TlsPrivateKey, container) } @@ -103,5 +111,11 @@ func waitForResourceSync() { informerFactory.Start(stopChan) informerFactory.WaitForCacheSync(stopChan) + + ksInformerFactory := informers.KsSharedInformerFactory() + ksInformerFactory.Tenant().V1alpha1().Workspaces().Lister() + + ksInformerFactory.Start(stopChan) + ksInformerFactory.WaitForCacheSync(stopChan) log.Println("resources sync success") } diff --git a/config/crds/tenant_v1alpha1_workspace.yaml b/config/crds/tenant_v1alpha1_workspace.yaml new file mode 100644 index 000000000..7b0d6ca92 --- /dev/null +++ b/config/crds/tenant_v1alpha1_workspace.yaml @@ -0,0 +1,50 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: workspaces.tenant.kubesphere.io +spec: + group: tenant.kubesphere.io + names: + kind: Workspace + plural: workspaces + scope: Cluster + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + manager: + type: string + quotas: + type: object + type: object + status: + properties: + quotas: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: object + type: object + version: v1alpha1 +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/rbac/rbac_role.yaml b/config/rbac/rbac_role.yaml index 0662cc129..a15e43a3e 100644 --- a/config/rbac/rbac_role.yaml +++ b/config/rbac/rbac_role.yaml @@ -4,84 +4,100 @@ metadata: creationTimestamp: null name: manager-role rules: -- apiGroups: - - networking.istio.io - resources: - - virtualservices - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - networking.istio.io - resources: - - virtualservices/status - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - servicemesh.kubesphere.io - resources: - - strategies - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - servicemesh.kubesphere.io - resources: - - strategies/status - verbs: - - get - - update - - patch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - create - - update - - patch - - delete + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - namespaces/status + verbs: + - get + - update + - patch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - namespaces/status + verbs: + - get + - update + - patch + - apiGroups: + - tenant.kubesphere.io + resources: + - workspaces + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - tenant.kubesphere.io + resources: + - workspaces/status + verbs: + - get + - update + - patch + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update + - patch + - delete diff --git a/config/samples/tenant_v1alpha1_workspace.yaml b/config/samples/tenant_v1alpha1_workspace.yaml new file mode 100644 index 000000000..cb67d3bb5 --- /dev/null +++ b/config/samples/tenant_v1alpha1_workspace.yaml @@ -0,0 +1,8 @@ +apiVersion: tenant.kubesphere.io/v1alpha1 +kind: Workspace +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: workspace-sample + spec: + manager: admin diff --git a/hack/docker_build.sh b/hack/docker_build.sh index cf814de17..4ae937ef5 100755 --- a/hack/docker_build.sh +++ b/hack/docker_build.sh @@ -2,6 +2,6 @@ docker build -f build/ks-apigateway/Dockerfile -t kubespheredev/ks-apigateway:latest . docker build -f build/ks-apiserver/Dockerfile -t kubespheredev/ks-apiserver:latest . - docker build -f build/ks-iam/Dockerfile -t kubespheredev/ks-iam:latest . + docker build -f build/ks-iam/Dockerfile -t kubespheredev/ks-account:latest . docker build -f build/controller-manager/Dockerfile -t kubespheredev/ks-controller-manager:latest . diff --git a/hack/docker_push.sh b/hack/docker_push.sh index b6db80c2e..9367f279d 100755 --- a/hack/docker_push.sh +++ b/hack/docker_push.sh @@ -5,5 +5,5 @@ echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin docker push kubespheredev/ks-apigateway:latest docker push kubespheredev/ks-apiserver:latest -docker push kubespheredev/ks-iam:latest +docker push kubespheredev/ks-account:latest docker push kubespheredev/ks-controller-manager:latest diff --git a/pkg/apigateway/caddy-plugin/authenticate/authenticate.go b/pkg/apigateway/caddy-plugin/authenticate/authenticate.go index 7bda950fe..e23df9b3b 100644 --- a/pkg/apigateway/caddy-plugin/authenticate/authenticate.go +++ b/pkg/apigateway/caddy-plugin/authenticate/authenticate.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/apiserver/pkg/endpoints/request" + "log" "net/http" "strconv" "strings" @@ -50,7 +51,7 @@ type User struct { } var requestInfoFactory = request.RequestInfoFactory{ - APIPrefixes: sets.NewString("api", "apis"), + APIPrefixes: sets.NewString("api", "apis", "kapis", "kapi"), GrouplessAPIPrefixes: sets.NewString("api")} func (h Auth) ServeHTTP(resp http.ResponseWriter, req *http.Request) (int, error) { @@ -71,6 +72,7 @@ func (h Auth) ServeHTTP(resp http.ResponseWriter, req *http.Request) (int, error token, err := h.Validate(uToken) if err != nil { + log.Println(uToken) return h.HandleUnauthorized(resp, err), nil } @@ -166,6 +168,7 @@ func (h Auth) Validate(uToken string) (*jwt.Token, error) { func (h Auth) HandleUnauthorized(w http.ResponseWriter, err error) int { message := fmt.Sprintf("Unauthorized,%v", err) w.Header().Add("WWW-Authenticate", message) + log.Println(message) return http.StatusUnauthorized } diff --git a/pkg/apigateway/caddy-plugin/authentication/authentication.go b/pkg/apigateway/caddy-plugin/authentication/authentication.go index fc46a8cc7..cd875176d 100644 --- a/pkg/apigateway/caddy-plugin/authentication/authentication.go +++ b/pkg/apigateway/caddy-plugin/authentication/authentication.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubernetes/pkg/util/slice" "kubesphere.io/kubesphere/pkg/informers" - sliceutils "kubesphere.io/kubesphere/pkg/utils" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" ) type Authentication struct { @@ -87,6 +87,10 @@ func handleForbidden(w http.ResponseWriter, err error) int { func permissionValidate(attrs authorizer.Attributes) (bool, error) { + if attrs.GetResource() == "users" && attrs.GetUser().GetName() == attrs.GetName() { + return true, nil + } + permitted, err := clusterRoleValidate(attrs) if err != nil { @@ -164,7 +168,7 @@ func clusterRoleValidate(attrs authorizer.Attributes) (bool, error) { for _, subject := range clusterRoleBinding.Subjects { if (subject.Kind == v1.UserKind && subject.Name == attrs.GetUser().GetName()) || - (subject.Kind == v1.GroupKind && sliceutils.HasString(attrs.GetUser().GetGroups(), subject.Name)) { + (subject.Kind == v1.GroupKind && sliceutil.HasString(attrs.GetUser().GetGroups(), subject.Name)) { clusterRole, err := clusterRoleLister.Get(clusterRoleBinding.RoleRef.Name) @@ -198,11 +202,11 @@ func ruleMatchesResources(rule v1.PolicyRule, apiGroup string, resource string, return false } - if !sliceutils.HasString(rule.APIGroups, apiGroup) && !sliceutils.HasString(rule.APIGroups, v1.ResourceAll) { + if !sliceutil.HasString(rule.APIGroups, apiGroup) && !sliceutil.HasString(rule.APIGroups, v1.ResourceAll) { return false } - if len(rule.ResourceNames) > 0 && !sliceutils.HasString(rule.ResourceNames, resourceName) { + if len(rule.ResourceNames) > 0 && !sliceutil.HasString(rule.ResourceNames, resourceName) { return false } @@ -234,7 +238,7 @@ func ruleMatchesResources(rule v1.PolicyRule, apiGroup string, resource string, func ruleMatchesRequest(rule v1.PolicyRule, apiGroup string, nonResourceURL string, resource string, subresource string, resourceName string, verb string) bool { - if !sliceutils.HasString(rule.Verbs, verb) && !sliceutils.HasString(rule.Verbs, v1.VerbAll) { + if !sliceutil.HasString(rule.Verbs, verb) && !sliceutil.HasString(rule.Verbs, v1.VerbAll) { return false } diff --git a/pkg/apigateway/caddy-plugin/authentication/auto_load.go b/pkg/apigateway/caddy-plugin/authentication/auto_load.go index 03f26a655..989f12b49 100644 --- a/pkg/apigateway/caddy-plugin/authentication/auto_load.go +++ b/pkg/apigateway/caddy-plugin/authentication/auto_load.go @@ -25,7 +25,6 @@ import ( "github.com/mholt/caddy/caddyhttp/httpserver" "kubesphere.io/kubesphere/pkg/informers" - "kubesphere.io/kubesphere/pkg/signals" ) func init() { @@ -43,13 +42,8 @@ func Setup(c *caddy.Controller) error { if err != nil { return err } - - if err != nil { - return err - } - + stopChan := make(chan struct{}, 0) c.OnStartup(func() error { - stopChan := signals.SetupSignalHandler() informerFactory := informers.SharedInformerFactory() informerFactory.Rbac().V1().Roles().Lister() informerFactory.Rbac().V1().RoleBindings().Lister() @@ -61,6 +55,11 @@ func Setup(c *caddy.Controller) error { return nil }) + c.OnShutdown(func() error { + close(stopChan) + return nil + }) + httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { return &Authentication{Next: next, Rule: rule} }) diff --git a/pkg/apis/addtoscheme_tenant_v1alpha1.go b/pkg/apis/addtoscheme_tenant_v1alpha1.go new file mode 100644 index 000000000..6d40a1afc --- /dev/null +++ b/pkg/apis/addtoscheme_tenant_v1alpha1.go @@ -0,0 +1,10 @@ +package apis + +import ( + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) +} diff --git a/pkg/apis/iam/v1alpha2/register.go b/pkg/apis/iam/v1alpha2/register.go index 47578d8e3..4613a6c57 100644 --- a/pkg/apis/iam/v1alpha2/register.go +++ b/pkg/apis/iam/v1alpha2/register.go @@ -20,7 +20,6 @@ package v1alpha2 import ( "github.com/emicklei/go-restful" "github.com/emicklei/go-restful-openapi" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" "kubesphere.io/kubesphere/pkg/apiserver/iam" "kubesphere.io/kubesphere/pkg/apiserver/runtime" @@ -46,6 +45,7 @@ func addWebService(c *restful.Container) error { Doc("Token review"). Reads(iam.TokenReview{}). Writes(iam.TokenReview{}). + Doc("k8s token review"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.POST("/login"). To(iam.LoginHandler). @@ -53,10 +53,10 @@ func addWebService(c *restful.Container) error { Reads(iam.LoginRequest{}). Writes(models.Token{}). Metadata(restfulspec.KeyOpenAPITags, tags)) - - ws.Route(ws.GET("/users/{name}"). - To(iam.UserDetail). + ws.Route(ws.GET("/users/{username}"). + To(iam.DescribeUser). Doc("User detail"). + Param(ws.PathParameter("username", "username")). Writes(models.User{}). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.POST("/users"). @@ -67,11 +67,13 @@ func addWebService(c *restful.Container) error { Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.DELETE("/users/{name}"). To(iam.DeleteUser). + Param(ws.PathParameter("name", "username")). Doc("Delete user"). Writes(errors.Error{}). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.PUT("/users/{name}"). To(iam.UpdateUser). + Param(ws.PathParameter("name", "username")). Reads(models.User{}). Writes(errors.Error{}). Doc("Update user"). @@ -79,26 +81,29 @@ func addWebService(c *restful.Container) error { ws.Route(ws.GET("/users/{name}/log"). To(iam.UserLoginLog). + Param(ws.PathParameter("name", "username")). Doc("User login log"). Writes([]map[string]string{}). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/users"). - To(iam.UserList). + To(iam.ListUsers). Doc("User list"). Writes(models.PageableResponse{}). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/groups"). - To(iam.RootGroupList). + To(iam.ListGroups). Writes([]models.Group{}). Doc("User group list"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/groups/{path}"). - To(iam.GroupDetail). + To(iam.DescribeGroup). + Param(ws.PathParameter("path", "group path")). Doc("User group detail"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/groups/{path}/users"). - To(iam.GroupUsers). + To(iam.ListGroupUsers). + Param(ws.PathParameter("path", "group path")). Doc("Group user list"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.POST("/groups"). @@ -108,139 +113,108 @@ func addWebService(c *restful.Container) error { Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.DELETE("/groups/{path}"). To(iam.DeleteGroup). + Param(ws.PathParameter("path", "group path")). Doc("Delete user group"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.PUT("/groups/{path}"). To(iam.UpdateGroup). + Param(ws.PathParameter("path", "group path")). Doc("Update user group"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/users/{username}/roles"). - To(iam.UserRoles). + To(iam.ListUserRoles). + Param(ws.PathParameter("username", "username")). Doc("Get user role list"). Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}/roles"). + To(iam.ListRoles). + Param(ws.PathParameter("namespace", "namespace")). + Doc("Get role list"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/clusterroles"). + To(iam.ListClusterRoles). + Doc("Get cluster role list"). + Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/namespaces/{namespace}/roles/{role}/users"). - To(iam.RoleUsers). + To(iam.ListRoleUsers). + Param(ws.PathParameter("namespace", "namespace")). + Param(ws.PathParameter("role", "role name")). Doc("Get user list by role"). Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/namespaces/{namespace}/roles/{role}/rules"). - To(iam.RoleRules). - Doc("Get role detail"). - Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/namespaces/{namespace}/users"). - To(iam.NamespaceUsers). + To(iam.ListNamespaceUsers). + Param(ws.PathParameter("namespace", "namespace")). Doc("Get user list by namespace"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/clusterroles/{clusterrole}/users"). - To(iam.ClusterRoleUsers). + To(iam.ListClusterRoleUsers). + Param(ws.PathParameter("clusterrole", "cluster role name")). Doc("Get user list by cluster role"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/clusterroles/{clusterrole}/rules"). - To(iam.ClusterRoleRules). + To(iam.ListClusterRoleRules). + Param(ws.PathParameter("clusterrole", "cluster role name")). Doc("Get cluster role detail"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/rulesmapping/clusterroles"). - To(iam.ClusterRulesMappingHandler). + To(iam.ClusterRulesMapping). Doc("Get cluster role policy rules mapping"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/rulesmapping/roles"). - To(iam.RulesMappingHandler). + To(iam.RulesMapping). Doc("Get role policy rules mapping"). Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/workspaces/{workspace}/rules"). - To(iam.WorkspaceRulesHandler). - Doc("Get workspace level policy rules"). + ws.Route(ws.GET("/workspaces/{workspace}/roles"). + To(iam.ListWorkspaceRoles). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("List workspace role"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/roles/{role}"). + To(iam.DescribeWorkspaceRole). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("role", "workspace role name")). + Doc("Describe workspace role"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/roles/{role}/rules"). + To(iam.ListWorkspaceRoleRules). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("role", "workspace role name")). + Doc("Get workspace role policy rules"). Metadata(restfulspec.KeyOpenAPITags, tags)) ws.Route(ws.GET("/workspaces/{workspace}/members"). - To(iam.WorkspaceMemberList). + To(iam.ListWorkspaceUsers). + Param(ws.PathParameter("workspace", "workspace name")). Doc("Get workspace member list"). Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/namespaces/{namespace}/rules"). - To(iam.NamespacesRulesHandler). - Doc("Get namespace level policy rules"). - Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/devops/{devops}/rules"). - To(iam.DevopsRulesHandler). - Doc("Get devops project level policy rules"). - Metadata(restfulspec.KeyOpenAPITags, tags)) - - tags = []string{"Workspace"} - - ws.Route(ws.GET("/workspaces"). - To(iam.UserWorkspaceListHandler). - Doc("Get workspace list"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Writes([]models.Workspace{})) - ws.Route(ws.POST("/workspaces"). - To(iam.WorkspaceCreateHandler). - Doc("Create workspace"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Writes(models.Workspace{})) - ws.Route(ws.DELETE("/workspaces/{name}"). - To(iam.DeleteWorkspaceHandler). - Doc("Delete workspace"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Writes(errors.Error{})) - ws.Route(ws.GET("/workspaces/{name}"). - To(iam.WorkspaceDetailHandler). - Doc("Get workspace detail"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Writes(models.Workspace{})) - ws.Route(ws.PUT("/workspaces/{name}"). - To(iam.WorkspaceEditHandler). - Doc("Update workspace"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Writes(models.Workspace{})) - - ws.Route(ws.GET("/workspaces/{name}/members/{member}"). - To(iam.WorkspaceMemberDetail). - Doc("Get workspace member detail"). - Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/workspaces/{name}/roles"). - To(iam.WorkspaceRoles). - Doc("Get workspace roles"). - Metadata(restfulspec.KeyOpenAPITags, tags)) - - ws.Route(ws.POST("/workspaces/{name}/members"). - To(iam.WorkspaceMemberInvite). + ws.Route(ws.POST("/workspaces/{workspace}/members"). + To(iam.InviteUser). + Param(ws.PathParameter("workspace", "workspace name")). Doc("Add user to workspace"). Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.DELETE("/workspaces/{name}/members"). - To(iam.WorkspaceMemberRemove). - Doc("Delete user from workspace"). + ws.Route(ws.POST("/workspaces/{workspace}/members"). + To(iam.RemoveUser). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("Remove user from workspace"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/members/{username}"). + To(iam.DescribeWorkspaceUser). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("username", "username")). + Doc("Describe user in workspace"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}/roles/{role}/rules"). + To(iam.ListRoleRules). + Param(ws.PathParameter("namespace", "namespace")). + Param(ws.PathParameter("role", "role name")). + Doc("Get namespace role policy rules"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/devops/{devops}/roles/{role}/rules"). + To(iam.ListDevopsRoleRules). + Param(ws.PathParameter("devops", "devops project id")). + Param(ws.PathParameter("role", "devops role name")). + Doc("Get devops role policy rules"). Metadata(restfulspec.KeyOpenAPITags, tags)) - - tags = []string{"unstable"} - - ws.Route(ws.GET("/workspaces/{workspace}/namespaces"). - To(iam.UserNamespaceListHandler). - Doc("Get namespace list"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Writes(models.PageableResponse{})) - ws.Route(ws.POST("/workspaces/{name}/namespaces"). - To(iam.NamespaceCreateHandler). - Doc("Create namespace"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Writes(v1.Namespace{})) - ws.Route(ws.DELETE("/workspaces/{name}/namespaces/{namespace}").To(iam.NamespaceDeleteHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/workspaces/{name}/namespaces/{namespace}").To(iam.NamespaceCheckHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/namespaces/{namespace}").To(iam.NamespaceCheckHandler)) - - // TODO move to /apis/resources.kubesphere.io/workspaces/{workspace}/members/{username} - ws.Route(ws.GET("/workspaces/{workspace}/members/{username}/namespaces").To(iam.NamespacesListHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.GET("/workspaces/{name}/members/{username}/devops").To(iam.DevOpsProjectHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - // TODO /workspaces/{name}/roles/{role} - ws.Route(ws.GET("/workspaces/{name}/roles/{role}").To(iam.WorkspaceRoles).Metadata(restfulspec.KeyOpenAPITags, tags)) - // TODO move to /apis/resources.kubesphere.io/devops - ws.Route(ws.GET("/workspaces/{name}/devops").To(iam.DevOpsProjectHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.POST("/workspaces/{name}/devops").To(iam.DevOpsProjectCreateHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - ws.Route(ws.DELETE("/workspaces/{name}/devops/{id}").To(iam.DevOpsProjectDeleteHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - - // TODO merge into /groups - ws.Route(ws.GET("/groups/count").To(iam.CountHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - - ws.Route(ws.GET("/users/{name}/namespaces").To(iam.NamespacesListHandler).Metadata(restfulspec.KeyOpenAPITags, tags)) - c.Add(ws) return nil } diff --git a/pkg/apis/logging/v1alpha2/register.go b/pkg/apis/logging/v1alpha2/register.go index 0e1672b35..2247e7f76 100644 --- a/pkg/apis/logging/v1alpha2/register.go +++ b/pkg/apis/logging/v1alpha2/register.go @@ -214,4 +214,4 @@ func addWebService(c *restful.Container) error { c.Add(ws) return nil -} \ No newline at end of file +} diff --git a/pkg/apis/resources/v1alpha2/register.go b/pkg/apis/resources/v1alpha2/register.go index 56726c1ff..3620c24ec 100644 --- a/pkg/apis/resources/v1alpha2/register.go +++ b/pkg/apis/resources/v1alpha2/register.go @@ -52,7 +52,7 @@ func addWebService(c *restful.Container) error { tags := []string{"Namespace resources"} webservice.Route(webservice.GET("/namespaces/{namespace}/{resources}"). - To(resources.NamespaceResourceHandler). + To(resources.ListResources). Metadata(restfulspec.KeyOpenAPITags, tags). Doc("Namespace level resource query"). Param(webservice.PathParameter("namespace", "which namespace")). @@ -69,7 +69,7 @@ func addWebService(c *restful.Container) error { tags = []string{"Cluster resources"} webservice.Route(webservice.GET("/{resources}"). - To(resources.ClusterResourceHandler). + To(resources.ListResources). Writes(models.PageableResponse{}). Metadata(restfulspec.KeyOpenAPITags, tags). Doc("Cluster level resource query"). @@ -196,26 +196,19 @@ func addWebService(c *restful.Container) error { webservice.Route(webservice.GET("/routers"). To(routers.GetAllRouters). - Doc("Get all routers"). + Doc("List all routers"). Metadata(restfulspec.KeyOpenAPITags, tags). Writes(corev1.Service{})) - webservice.Route(webservice.GET("/users/{username}/routers"). - To(routers.GetAllRoutersOfUser). - Doc("Get routers for user"). - Metadata(restfulspec.KeyOpenAPITags, tags). - Param(webservice.PathParameter("username", "")). - Writes(corev1.Service{})) - webservice.Route(webservice.GET("/namespaces/{namespace}/router"). To(routers.GetRouter). - Doc("Get router of a specified project"). + Doc("List router of a specified project"). Metadata(restfulspec.KeyOpenAPITags, tags). Param(webservice.PathParameter("namespace", "name of the project"))) webservice.Route(webservice.DELETE("/namespaces/{namespace}/router"). To(routers.DeleteRouter). - Doc("Get router of a specified project"). + Doc("List router of a specified project"). Metadata(restfulspec.KeyOpenAPITags, tags). Param(webservice.PathParameter("namespace", "name of the project"))) diff --git a/pkg/apis/tenant/group.go b/pkg/apis/tenant/group.go new file mode 100644 index 000000000..365293be8 --- /dev/null +++ b/pkg/apis/tenant/group.go @@ -0,0 +1,20 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +// Package tenant contains tenant API versions +package tenant diff --git a/pkg/apis/tenant/install/install.go b/pkg/apis/tenant/install/install.go new file mode 100644 index 000000000..9e344eb06 --- /dev/null +++ b/pkg/apis/tenant/install/install.go @@ -0,0 +1,33 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package install + +import ( + "github.com/emicklei/go-restful" + urlruntime "k8s.io/apimachinery/pkg/util/runtime" + tenantv1alpha2 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha2" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" +) + +func init() { + Install(runtime.Container) +} + +func Install(container *restful.Container) { + urlruntime.Must(tenantv1alpha2.AddToContainer(container)) +} diff --git a/pkg/apis/tenant/v1alpha1/doc.go b/pkg/apis/tenant/v1alpha1/doc.go new file mode 100644 index 000000000..5ff815ff2 --- /dev/null +++ b/pkg/apis/tenant/v1alpha1/doc.go @@ -0,0 +1,25 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +// Package v1alpha1 contains API Schema definitions for the tenant v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/tenant +// +k8s:defaulter-gen=TypeMeta +// +groupName=tenant.kubesphere.io +package v1alpha1 diff --git a/pkg/apis/tenant/v1alpha1/register.go b/pkg/apis/tenant/v1alpha1/register.go new file mode 100644 index 000000000..fcc919186 --- /dev/null +++ b/pkg/apis/tenant/v1alpha1/register.go @@ -0,0 +1,48 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1alpha1 contains API Schema definitions for the tenant v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/tenant +// +k8s:defaulter-gen=TypeMeta +// +groupName=tenant.kubesphere.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "tenant.kubesphere.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + // AddToScheme is required by pkg/client/... + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/pkg/apis/tenant/v1alpha1/v1alpha1_suite_test.go b/pkg/apis/tenant/v1alpha1/v1alpha1_suite_test.go new file mode 100644 index 000000000..991502bad --- /dev/null +++ b/pkg/apis/tenant/v1alpha1/v1alpha1_suite_test.go @@ -0,0 +1,57 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package v1alpha1 + +import ( + "log" + "os" + "path/filepath" + "testing" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" +) + +var cfg *rest.Config +var c client.Client + +func TestMain(m *testing.M) { + t := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")}, + } + + err := SchemeBuilder.AddToScheme(scheme.Scheme) + if err != nil { + log.Fatal(err) + } + + if cfg, err = t.Start(); err != nil { + log.Fatal(err) + } + + if c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}); err != nil { + log.Fatal(err) + } + + code := m.Run() + t.Stop() + os.Exit(code) +} diff --git a/pkg/apis/tenant/v1alpha1/workspace_types.go b/pkg/apis/tenant/v1alpha1/workspace_types.go new file mode 100644 index 000000000..1612479f7 --- /dev/null +++ b/pkg/apis/tenant/v1alpha1/workspace_types.go @@ -0,0 +1,67 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + Manager string `json:"manager,omitempty"` + Quotas v1.ResourceQuotaSpec `json:"quotas,omitempty"` +} + +// WorkspaceStatus defines the observed state of Workspace +type WorkspaceStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make" to regenerate code after modifying this file + Quotas v1.ResourceQuotaStatus `json:"quotas,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced + +// Workspace is the Schema for the workspaces API +// +k8s:openapi-gen=true +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec WorkspaceSpec `json:"spec,omitempty"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced + +// WorkspaceList contains a list of Workspace +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} diff --git a/pkg/apis/tenant/v1alpha1/workspace_types_test.go b/pkg/apis/tenant/v1alpha1/workspace_types_test.go new file mode 100644 index 000000000..9de5c9b70 --- /dev/null +++ b/pkg/apis/tenant/v1alpha1/workspace_types_test.go @@ -0,0 +1,58 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/onsi/gomega" + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestStorageWorkspace(t *testing.T) { + key := types.NamespacedName{ + Name: "foo", + } + created := &Workspace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }} + g := gomega.NewGomegaWithT(t) + + // Test Create + fetched := &Workspace{} + g.Expect(c.Create(context.TODO(), created)).NotTo(gomega.HaveOccurred()) + + g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred()) + g.Expect(fetched).To(gomega.Equal(created)) + + // Test Updating the Labels + updated := fetched.DeepCopy() + updated.Labels = map[string]string{"hello": "world"} + g.Expect(c.Update(context.TODO(), updated)).NotTo(gomega.HaveOccurred()) + + g.Expect(c.Get(context.TODO(), key, fetched)).NotTo(gomega.HaveOccurred()) + g.Expect(fetched).To(gomega.Equal(updated)) + + // Test Delete + g.Expect(c.Delete(context.TODO(), fetched)).NotTo(gomega.HaveOccurred()) + g.Expect(c.Get(context.TODO(), key, fetched)).To(gomega.HaveOccurred()) +} diff --git a/pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..47a5fd616 --- /dev/null +++ b/pkg/apis/tenant/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,120 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.Quotas.DeepCopyInto(&out.Quotas) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in + in.Quotas.DeepCopyInto(&out.Quotas) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/tenant/v1alpha2/register.go b/pkg/apis/tenant/v1alpha2/register.go new file mode 100644 index 000000000..cedfbf69a --- /dev/null +++ b/pkg/apis/tenant/v1alpha2/register.go @@ -0,0 +1,107 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package v1alpha2 + +import ( + "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful-openapi" + "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/apiserver/tenant" +) + +const GroupName = "tenant.kubesphere.io" + +var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +var ( + WebServiceBuilder = runtime.NewContainerBuilder(addWebService) + AddToContainer = WebServiceBuilder.AddToContainer +) + +func addWebService(c *restful.Container) error { + tags := []string{"Tenant"} + ws := runtime.NewWebService(GroupVersion) + + ws.Route(ws.GET("/workspaces"). + To(tenant.ListWorkspaces). + Doc("List workspace by user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/rules"). + To(tenant.ListWorkspaceRules). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("List the rules for the current user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/namespaces/{namespace}/rules"). + To(tenant.ListNamespaceRules). + Param(ws.PathParameter("namespace", "namespace")). + Doc("List the rules for the current user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/devops/{devops}/rules"). + To(tenant.ListDevopsRules). + Param(ws.PathParameter("devops", "devops project id")). + Doc("List the rules for the current user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/namespaces"). + To(tenant.ListNamespaces). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("List the namespaces for the current user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/members/{username}/namespaces"). + To(tenant.ListNamespaces). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("username", "workspace member's username")). + Doc("List the namespaces for the workspace member"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.POST("/workspaces/{workspace}/namespaces"). + To(tenant.CreateNamespace). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("Create namespace"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.DELETE("/workspaces/{workspace}/namespaces/{namespace}"). + To(tenant.DeleteNamespace). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("namespace", "namespace")). + Doc("Delete namespace"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + ws.Route(ws.GET("/workspaces/{workspace}/devops"). + To(tenant.ListDevopsProjects). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("List devops projects for the current user"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.GET("/workspaces/{workspace}/members/{username}/devops"). + To(tenant.ListDevopsProjects). + Param(ws.PathParameter("workspace", "workspace name")). + Param(ws.PathParameter("username", "workspace member's username")). + Doc("List the devops projects for the workspace member"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.POST("/workspaces/{workspace}/devops"). + To(tenant.CreateDevopsProject). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("Create devops project"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + ws.Route(ws.DELETE("/workspaces/{workspace}/devops"). + To(tenant.DeleteDevopsProject). + Param(ws.PathParameter("workspace", "workspace name")). + Doc("Delete devops project"). + Metadata(restfulspec.KeyOpenAPITags, tags)) + + c.Add(ws) + return nil +} diff --git a/pkg/apis/terminal/install/install.go b/pkg/apis/terminal/install/install.go new file mode 100644 index 000000000..91877c445 --- /dev/null +++ b/pkg/apis/terminal/install/install.go @@ -0,0 +1,33 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package install + +import ( + "github.com/emicklei/go-restful" + urlruntime "k8s.io/apimachinery/pkg/util/runtime" + terminalv1alpha2 "kubesphere.io/kubesphere/pkg/apis/terminal/v1alpha2" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" +) + +func init() { + Install(runtime.Container) +} + +func Install(c *restful.Container) { + urlruntime.Must(terminalv1alpha2.AddToContainer(c)) +} diff --git a/pkg/apis/terminal/v1alpha2/register.go b/pkg/apis/terminal/v1alpha2/register.go new file mode 100644 index 000000000..02af415c1 --- /dev/null +++ b/pkg/apis/terminal/v1alpha2/register.go @@ -0,0 +1,56 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package v1alpha2 + +import ( + "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful-openapi" + "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/pkg/apiserver/runtime" + "kubesphere.io/kubesphere/pkg/apiserver/terminal" + "kubesphere.io/kubesphere/pkg/models" +) + +const GroupName = "terminal.kubesphere.io" + +var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +var ( + WebServiceBuilder = runtime.NewContainerBuilder(addWebService) + AddToContainer = WebServiceBuilder.AddToContainer +) + +func addWebService(c *restful.Container) error { + + webservice := runtime.NewWebService(GroupVersion) + + tags := []string{"Terminal"} + + webservice.Route(webservice.GET("/namespace/{namespace}/pods/{pods}"). + To(terminal.CreateTerminalSession). + Doc("create terminal session"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Writes(models.PodInfo{})) + + path := runtime.ApiRootPath + "/" + GroupVersion.String() + "/sockjs" + c.Handle(path, terminal.NewTerminalHandler(path)) + + c.Add(webservice) + + return nil +} diff --git a/pkg/apiserver/iam/am.go b/pkg/apiserver/iam/am.go index 5ca967a13..59175a22a 100644 --- a/pkg/apiserver/iam/am.go +++ b/pkg/apiserver/iam/am.go @@ -20,42 +20,22 @@ package iam import ( "github.com/emicklei/go-restful" "k8s.io/api/rbac/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "kubesphere.io/kubesphere/pkg/params" "net/http" "sort" - "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/iam" "kubesphere.io/kubesphere/pkg/models/iam/policy" ) type roleList struct { - ClusterRoles []*v1.ClusterRole `json:"clusterRoles" protobuf:"bytes,2,rep,name=clusterRoles"` + ClusterRoles []*v1.ClusterRole `json:"clusterRole" protobuf:"bytes,2,rep,name=clusterRoles"` Roles []*v1.Role `json:"roles" protobuf:"bytes,2,rep,name=roles"` } -func RoleRules(req *restful.Request, resp *restful.Response) { - namespace := req.PathParameter("namespace") - roleName := req.PathParameter("role") - - role, err := iam.GetRole(namespace, roleName) - - if err != nil { - resp.WriteError(http.StatusInternalServerError, err) - return - } - - rules, err := iam.GetRoleSimpleRules([]*v1.Role{role}, namespace) - - if err != nil { - resp.WriteError(http.StatusInternalServerError, err) - return - } - - resp.WriteAsJson(rules[namespace]) -} - -func RoleUsers(req *restful.Request, resp *restful.Response) { +func ListRoleUsers(req *restful.Request, resp *restful.Response) { roleName := req.PathParameter("role") namespace := req.PathParameter("namespace") @@ -69,7 +49,53 @@ func RoleUsers(req *restful.Request, resp *restful.Response) { resp.WriteAsJson(users) } -func NamespaceUsers(req *restful.Request, resp *restful.Response) { +func ListClusterRoles(req *restful.Request, resp *restful.Response) { + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + reverse := params.ParseReverse(req) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + result, err := iam.ListClusterRoles(conditions, orderBy, reverse, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(result) + +} + +func ListRoles(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + reverse := params.ParseReverse(req) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + result, err := iam.ListRoles(namespace, conditions, orderBy, reverse, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(result) + +} + +// List users by namespace +func ListNamespaceUsers(req *restful.Request, resp *restful.Response) { namespace := req.PathParameter("namespace") @@ -80,25 +106,26 @@ func NamespaceUsers(req *restful.Request, resp *restful.Response) { return } + // sort by time by default sort.Slice(users, func(i, j int) bool { - return users[i].Username < users[j].Username + return users[i].RoleBindTime.After(*users[j].RoleBindTime) }) resp.WriteAsJson(users) } -func UserRoles(req *restful.Request, resp *restful.Response) { +func ListUserRoles(req *restful.Request, resp *restful.Response) { username := req.PathParameter("username") - roles, err := iam.GetRoles(username, "") + roles, err := iam.GetUserRoles("", username) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - clusterRoles, err := iam.GetClusterRoles(username) + _, clusterRoles, err := iam.GetUserClusterRoles(username) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) @@ -112,79 +139,62 @@ func UserRoles(req *restful.Request, resp *restful.Response) { resp.WriteAsJson(roleList) } -func NamespaceRulesHandler(req *restful.Request, resp *restful.Response) { - namespace := req.PathParameter("namespace") - username := req.HeaderParameter(constants.UserNameHeader) - - clusterRoles, err := iam.GetClusterRoles(username) - - if err != nil { - resp.WriteError(http.StatusInternalServerError, err) - return - } - - roles, err := iam.GetRoles(username, namespace) - if err != nil { - resp.WriteError(http.StatusInternalServerError, err) - return - } - - for _, clusterRole := range clusterRoles { - role := new(v1.Role) - role.Name = clusterRole.Name - role.Labels = clusterRole.Labels - role.Namespace = namespace - role.Annotations = clusterRole.Annotations - role.Kind = "Role" - role.Rules = clusterRole.Rules - roles = append(roles, role) - } - - rules, err := iam.GetRoleSimpleRules(roles, namespace) - - if err != nil { - resp.WriteError(http.StatusInternalServerError, err) - return - } - - resp.WriteAsJson(rules[namespace]) -} - -func RulesMappingHandler(req *restful.Request, resp *restful.Response) { +func RulesMapping(req *restful.Request, resp *restful.Response) { rules := policy.RoleRuleMapping resp.WriteAsJson(rules) } -func ClusterRulesMappingHandler(req *restful.Request, resp *restful.Response) { +func ClusterRulesMapping(req *restful.Request, resp *restful.Response) { rules := policy.ClusterRoleRuleMapping resp.WriteAsJson(rules) } -func ClusterRoleRules(req *restful.Request, resp *restful.Response) { +func ListClusterRoleRules(req *restful.Request, resp *restful.Response) { clusterRoleName := req.PathParameter("clusterrole") - clusterRole, err := iam.GetClusterRole(clusterRoleName) + rules, err := iam.GetClusterRoleSimpleRules(clusterRoleName) if err != nil { resp.WriteError(http.StatusInternalServerError, err) return } - rules, err := iam.GetClusterRoleSimpleRules([]*v1.ClusterRole{clusterRole}) + resp.WriteAsJson(rules) +} + +func ListClusterRoleUsers(req *restful.Request, resp *restful.Response) { + clusterRoleName := req.PathParameter("clusterrole") + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + reverse := params.ParseReverse(req) + if err != nil { - resp.WriteError(http.StatusInternalServerError, err) + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + result, err := iam.ListClusterRoleUsers(clusterRoleName, conditions, orderBy, reverse, limit, offset) + + if err != nil { + if k8serr.IsNotFound(err) { + resp.WriteError(http.StatusNotFound, err) + } else { + resp.WriteError(http.StatusInternalServerError, err) + } + return + } + + resp.WriteAsJson(result) +} + +func ListRoleRules(req *restful.Request, resp *restful.Response) { + namespaceName := req.PathParameter("namespace") + roleName := req.PathParameter("role") + + rules, err := iam.GetRoleSimpleRules(namespaceName, roleName) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } resp.WriteAsJson(rules) } - -func ClusterRoleUsers(req *restful.Request, resp *restful.Response) { - clusterRoleName := req.PathParameter("clusterrole") - - users, err := iam.ClusterRoleUsers(clusterRoleName) - - if err != nil { - resp.WriteError(http.StatusInternalServerError, err) - return - } - - resp.WriteAsJson(users) -} diff --git a/pkg/apiserver/iam/auth.go b/pkg/apiserver/iam/auth.go index 45eeb1647..00064f64e 100644 --- a/pkg/apiserver/iam/auth.go +++ b/pkg/apiserver/iam/auth.go @@ -18,17 +18,14 @@ package iam import ( - "fmt" "github.com/dgrijalva/jwt-go" "github.com/emicklei/go-restful" - "kubesphere.io/kubesphere/pkg/models" - "kubesphere.io/kubesphere/pkg/simple/client/ldap" + "kubesphere.io/kubesphere/pkg/utils/iputil" + "kubesphere.io/kubesphere/pkg/utils/jwtutil" "net/http" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/iam" - "kubesphere.io/kubesphere/pkg/utils" - jwtutils "kubesphere.io/kubesphere/pkg/utils/jwt" ) type Spec struct { @@ -63,11 +60,11 @@ func LoginHandler(req *restful.Request, resp *restful.Response) { err := req.ReadEntity(&loginRequest) if err != nil || loginRequest.Username == "" || loginRequest.Password == "" { - resp.WriteHeaderAndEntity(http.StatusUnauthorized, errors.Wrap(fmt.Errorf("incorrect username or password"))) + resp.WriteHeaderAndEntity(http.StatusUnauthorized, errors.New("incorrect username or password")) return } - ip := utils.RemoteIp(req.Request) + ip := iputil.RemoteIp(req.Request) token, err := iam.Login(loginRequest.Username, loginRequest.Password, ip) @@ -76,7 +73,7 @@ func LoginHandler(req *restful.Request, resp *restful.Response) { return } - resp.WriteAsJson(models.Token{Token: token}) + resp.WriteAsJson(token) } // k8s token review @@ -91,13 +88,13 @@ func TokenReviewHandler(req *restful.Request, resp *restful.Response) { } if tokenReview.Spec == nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("token must not be null"))) + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New("token must not be null")) return } uToken := tokenReview.Spec.Token - token, err := jwtutils.ValidateToken(uToken) + token, err := jwtutil.ValidateToken(uToken) if err != nil { failed := TokenReview{APIVersion: APIVersion, @@ -112,24 +109,29 @@ func TokenReviewHandler(req *restful.Request, resp *restful.Response) { claims := token.Claims.(jwt.MapClaims) - username := claims["username"].(string) + username, ok := claims["username"].(string) - conn, err := ldap.Client() + if !ok { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New("username not found")) + return + } + + user, err := iam.GetUserInfo(username) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - defer conn.Close() - - user, err := iam.UserDetail(username, conn) + groups, err := iam.GetUserGroups(username) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } + user.Groups = groups + success := TokenReview{APIVersion: APIVersion, Kind: KindTokenReview, Status: &Status{ diff --git a/pkg/apiserver/iam/groups.go b/pkg/apiserver/iam/groups.go index 9de121e8e..64daf66cc 100644 --- a/pkg/apiserver/iam/groups.go +++ b/pkg/apiserver/iam/groups.go @@ -19,7 +19,6 @@ package iam import ( "fmt" - ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" "net/http" "regexp" "strings" @@ -33,8 +32,6 @@ import ( ) func CreateGroup(req *restful.Request, resp *restful.Response) { - //var json map[string]interface{} - var group models.Group err := req.ReadEntity(&group) @@ -45,19 +42,18 @@ func CreateGroup(req *restful.Request, resp *restful.Response) { } if !regexp.MustCompile("[a-z0-9]([-a-z0-9]*[a-z0-9])?").MatchString(group.Name) { - resp.WriteHeaderAndEntity(http.StatusBadRequest, fmt.Errorf("incalid group name %s", group)) + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.New(fmt.Sprintf("incalid group name %s", group))) return } - if group.Creator == "" { - resp.WriteHeaderAndEntity(http.StatusBadRequest, fmt.Errorf("creator should not be null")) - return - } - - created, err := iam.CreateGroup(group) + created, err := iam.CreateGroup(&group) if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + if ldap.IsErrorWithCode(err, ldap.LDAPResultEntryAlreadyExists) { + resp.WriteHeaderAndEntity(http.StatusConflict, errors.Wrap(err)) + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + } return } @@ -75,6 +71,10 @@ func DeleteGroup(req *restful.Request, resp *restful.Response) { err := iam.DeleteGroup(path) if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + resp.WriteHeaderAndEntity(http.StatusNotFound, errors.Wrap(err)) + return + } resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } @@ -106,23 +106,18 @@ func UpdateGroup(req *restful.Request, resp *restful.Response) { } -func GroupDetail(req *restful.Request, resp *restful.Response) { +func DescribeGroup(req *restful.Request, resp *restful.Response) { path := req.PathParameter("path") - conn, err := ldapclient.Client() + group, err := iam.DescribeGroup(path) if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - defer conn.Close() - - group, err := iam.GroupDetail(path, conn) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + resp.WriteHeaderAndEntity(http.StatusNotFound, errors.Wrap(err)) + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + } return } @@ -130,20 +125,11 @@ func GroupDetail(req *restful.Request, resp *restful.Response) { } -func GroupUsers(req *restful.Request, resp *restful.Response) { +func ListGroupUsers(req *restful.Request, resp *restful.Response) { path := req.PathParameter("path") - conn, err := ldapclient.Client() - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - defer conn.Close() - - group, err := iam.GroupDetail(path, conn) + group, err := iam.DescribeGroup(path) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) @@ -156,10 +142,10 @@ func GroupUsers(req *restful.Request, resp *restful.Response) { for i := 0; i < len(group.Members); i++ { name := group.Members[i] - user, err := iam.UserDetail(name, conn) + user, err := iam.DescribeUser(name) if err != nil { - if ldap.IsErrorWithCode(err, 32) { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { group.Members = append(group.Members[:i], group.Members[i+1:]...) i-- modify = true @@ -170,25 +156,6 @@ func GroupUsers(req *restful.Request, resp *restful.Response) { } } - clusterRoles, err := iam.GetClusterRoles(name) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - for i := 0; i < len(clusterRoles); i++ { - if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { - user.ClusterRole = clusterRoles[i].Name - break - } - } - - if group.Path == group.Name { - workspaceRole := iam.GetWorkspaceRole(clusterRoles, group.Name) - user.WorkspaceRole = workspaceRole - } - users = append(users, user) } @@ -200,18 +167,7 @@ func GroupUsers(req *restful.Request, resp *restful.Response) { } -func CountHandler(req *restful.Request, resp *restful.Response) { - count, err := iam.CountChild("") - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(map[string]int{"total_count": count}) -} - -func RootGroupList(req *restful.Request, resp *restful.Response) { +func ListGroups(req *restful.Request, resp *restful.Response) { array := req.QueryParameter("path") @@ -229,18 +185,9 @@ func RootGroupList(req *restful.Request, resp *restful.Response) { groups := make([]*models.Group, 0) - conn, err := ldapclient.Client() - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - defer conn.Close() - for _, v := range paths { path := strings.TrimSpace(v) - group, err := iam.GroupDetail(path, conn) + group, err := iam.DescribeGroup(path) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return diff --git a/pkg/apiserver/iam/users.go b/pkg/apiserver/iam/im.go similarity index 52% rename from pkg/apiserver/iam/users.go rename to pkg/apiserver/iam/im.go index c9ed8e3aa..de248b7dc 100644 --- a/pkg/apiserver/iam/users.go +++ b/pkg/apiserver/iam/im.go @@ -19,9 +19,9 @@ package iam import ( "fmt" + "kubesphere.io/kubesphere/pkg/params" "net/http" "regexp" - "strconv" "strings" "github.com/emicklei/go-restful" @@ -31,7 +31,6 @@ import ( "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models" "kubesphere.io/kubesphere/pkg/models/iam" - ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" ) const ( @@ -63,7 +62,7 @@ func CreateUser(req *restful.Request, resp *restful.Response) { return } - err = iam.CreateUser(user) + created, err := iam.CreateUser(&user) if err != nil { if ldap.IsErrorWithCode(err, ldap.LDAPResultEntryAlreadyExists) { @@ -74,7 +73,7 @@ func CreateUser(req *restful.Request, resp *restful.Response) { return } - resp.WriteAsJson(errors.None) + resp.WriteAsJson(created) } func DeleteUser(req *restful.Request, resp *restful.Response) { @@ -100,7 +99,7 @@ func DeleteUser(req *restful.Request, resp *restful.Response) { func UpdateUser(req *restful.Request, resp *restful.Response) { usernameInPath := req.PathParameter("name") - username := req.HeaderParameter(constants.UserNameHeader) + usernameInHeader := req.HeaderParameter(constants.UserNameHeader) var user models.User err := req.ReadEntity(&user) @@ -125,22 +124,23 @@ func UpdateUser(req *restful.Request, resp *restful.Response) { return } - if username == user.Username && user.Password != "" { - _, err = iam.Login(username, user.CurrentPassword, "") + // change password by self + if usernameInHeader == user.Username && user.Password != "" { + _, err = iam.Login(usernameInHeader, user.CurrentPassword, "") if err != nil { resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("incorrect current password"))) return } } - err = iam.UpdateUser(user) + result, err := iam.UpdateUser(&user) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - resp.WriteAsJson(errors.None) + resp.WriteAsJson(result) } func UserLoginLog(req *restful.Request, resp *restful.Response) { @@ -167,20 +167,11 @@ func UserLoginLog(req *restful.Request, resp *restful.Response) { resp.WriteAsJson(result) } -func CurrentUserDetail(req *restful.Request, resp *restful.Response) { +func DescribeUser(req *restful.Request, resp *restful.Response) { - username := req.HeaderParameter(constants.UserNameHeader) + username := req.PathParameter("username") - conn, err := ldapclient.Client() - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - defer conn.Close() - - user, err := iam.UserDetail(username, conn) + user, err := iam.DescribeUser(username) if err != nil { if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { @@ -191,186 +182,81 @@ func CurrentUserDetail(req *restful.Request, resp *restful.Response) { return } - clusterRoles, err := iam.GetClusterRoles(username) + clusterRole, err := iam.GetUserClusterRole(username) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - clusterRules, err := iam.GetClusterRoleSimpleRules(clusterRoles) + user.ClusterRole = clusterRole.Name + + clusterRules, err := iam.GetUserClusterSimpleRules(username) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - for i := 0; i < len(clusterRoles); i++ { - if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { - user.ClusterRole = clusterRoles[i].Name - break - } + result := struct { + *models.User + ClusterRules []models.SimpleRule `json:"cluster_rules"` + }{ + User: user, + ClusterRules: clusterRules, } - user.ClusterRules = clusterRules - - resp.WriteAsJson(user) + resp.WriteAsJson(result) } -func NamespacesListHandler(req *restful.Request, resp *restful.Response) { - username := req.PathParameter("name") +func Precheck(req *restful.Request, resp *restful.Response) { - namespaces, err := iam.GetNamespaces(username) + check := req.QueryParameter("check") + + exist, err := iam.UserCreateCheck(check) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - resp.WriteAsJson(namespaces) + resp.WriteAsJson(map[string]bool{"exist": exist}) } -func UserDetail(req *restful.Request, resp *restful.Response) { - username := req.PathParameter("name") - usernameFromHeader := req.HeaderParameter(constants.UserNameHeader) - - if username == usernameFromHeader { - CurrentUserDetail(req, resp) - return - } - - conn, err := ldapclient.Client() - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - defer conn.Close() - - user, err := iam.UserDetail(username, conn) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - clusterRoles, err := iam.GetClusterRoles(username) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - clusterRules, err := iam.GetClusterRoleSimpleRules(clusterRoles) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - workspaceRoles := iam.GetWorkspaceRoles(clusterRoles) - - for i := 0; i < len(clusterRoles); i++ { - if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { - user.ClusterRole = clusterRoles[i].Name - break - } - } - - user.ClusterRules = clusterRules - - user.WorkspaceRoles = workspaceRoles - - resp.WriteAsJson(user) -} - -func UserList(req *restful.Request, resp *restful.Response) { - - limit, err := strconv.Atoi(req.QueryParameter("limit")) - if err != nil { - limit = 65535 - } - offset, err := strconv.Atoi(req.QueryParameter("offset")) - if err != nil { - offset = 0 - } +func ListUsers(req *restful.Request, resp *restful.Response) { if check := req.QueryParameter("check"); check != "" { - exist, err := iam.UserCreateCheck(check) - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(map[string]bool{"exist": exist}) + Precheck(req, resp) return } - conn, err := ldapclient.Client() + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + reverse := params.ParseReverse(req) + names := params.ParseArray(req.QueryParameter(params.NameParam)) if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) return } + if len(names) > 0 { + users, err := iam.ListUsersByName(names) - defer conn.Close() - - if query := req.QueryParameter("name"); query != "" { - names := strings.Split(query, ",") - users := make([]*models.User, 0) - for _, name := range names { - user, err := iam.UserDetail(name, conn) - if err != nil { - if ldap.IsErrorWithCode(err, 32) { - continue - } else { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - } - users = append(users, user) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return } - resp.WriteAsJson(users) return } - var total int - var users []models.User - - if query := req.QueryParameter("search"); query != "" { - total, users, err = iam.Search(query, limit, offset) - } else if query := req.QueryParameter("keyword"); query != "" { - total, users, err = iam.Search(query, limit, offset) - } else { - total, users, err = iam.UserList(limit, offset) - } + users, err := iam.ListUsers(conditions, orderBy, reverse, limit, offset) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - for i := 0; i < len(users); i++ { - clusterRoles, err := iam.GetClusterRoles(users[i].Username) - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - for j := 0; j < len(clusterRoles); j++ { - if clusterRoles[j].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { - users[i].ClusterRole = clusterRoles[j].Name - break - } - } - } - - items := make([]interface{}, 0) - - for _, u := range users { - items = append(items, u) - } - - resp.WriteAsJson(models.PageableResponse{Items: items, TotalCount: total}) + resp.WriteAsJson(users) } diff --git a/pkg/models/iam/counter.go b/pkg/apiserver/iam/types.go similarity index 56% rename from pkg/models/iam/counter.go rename to pkg/apiserver/iam/types.go index e4ecfb814..6c25b043f 100644 --- a/pkg/models/iam/counter.go +++ b/pkg/apiserver/iam/types.go @@ -16,39 +16,3 @@ */ package iam - -import "sync" - -type Counter struct { - value int - m *sync.Mutex -} - -func NewCounter(value int) Counter { - c := Counter{} - c.m = &sync.Mutex{} - c.Set(value) - return c -} - -func (c *Counter) Set(value int) { - c.m.Lock() - c.value = value - c.m.Unlock() -} - -func (c *Counter) Add(value int) { - c.m.Lock() - c.value += value - c.m.Unlock() -} - -func (c *Counter) Sub(value int) { - c.m.Lock() - c.value -= value - c.m.Unlock() -} - -func (c *Counter) Get() int { - return c.value -} diff --git a/pkg/apiserver/iam/workspaces.go b/pkg/apiserver/iam/workspaces.go index 0229f1b42..df045dac9 100644 --- a/pkg/apiserver/iam/workspaces.go +++ b/pkg/apiserver/iam/workspaces.go @@ -18,698 +18,155 @@ package iam import ( - "fmt" - "github.com/go-ldap/ldap" - "net/http" - "regexp" - "sort" - "strconv" - "strings" - "github.com/emicklei/go-restful" - "k8s.io/api/core/v1" - rbac "k8s.io/api/rbac/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models" "kubesphere.io/kubesphere/pkg/models/iam" - "kubesphere.io/kubesphere/pkg/models/metrics" "kubesphere.io/kubesphere/pkg/models/workspaces" - ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" - sliceutils "kubesphere.io/kubesphere/pkg/utils" + "kubesphere.io/kubesphere/pkg/params" + "net/http" ) -const UserNameHeader = "X-Token-Username" +func ListWorkspaceRoles(req *restful.Request, resp *restful.Response) { -func WorkspaceRoles(req *restful.Request, resp *restful.Response) { - - name := req.PathParameter("name") - - workspace, err := workspaces.Detail(name) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - roles, err := workspaces.Roles(workspace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(roles) -} - -func WorkspaceMemberDetail(req *restful.Request, resp *restful.Response) { - workspace := req.PathParameter("name") - username := req.PathParameter("member") - - user, err := iam.GetUser(username) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - namespaces, err := workspaces.Namespaces(workspace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - user.WorkspaceRole = user.WorkspaceRoles[workspace] - - roles := make(map[string]string) - - for _, namespace := range namespaces { - if role := user.Roles[namespace.Name]; role != "" { - roles[namespace.Name] = role - } - } - - user.Roles = roles - user.Rules = nil - user.WorkspaceRules = nil - user.WorkspaceRoles = nil - user.ClusterRules = nil - resp.WriteAsJson(user) -} - -func WorkspaceMemberInvite(req *restful.Request, resp *restful.Response) { - var users []models.UserInvite - workspace := req.PathParameter("name") - err := req.ReadEntity(&users) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - err = workspaces.Invite(workspace, users) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(errors.None) -} - -func WorkspaceMemberRemove(req *restful.Request, resp *restful.Response) { - query := req.QueryParameter("name") - workspace := req.PathParameter("name") - - names := strings.Split(query, ",") - - err := workspaces.RemoveMembers(workspace, names) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(errors.None) -} - -func NamespaceCheckHandler(req *restful.Request, resp *restful.Response) { - namespace := req.PathParameter("namespace") - - exist, err := workspaces.NamespaceExistCheck(namespace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(map[string]bool{"exist": exist}) -} - -func NamespaceDeleteHandler(req *restful.Request, resp *restful.Response) { - namespace := req.PathParameter("namespace") - workspace := req.PathParameter("name") - - err := workspaces.DeleteNamespace(workspace, namespace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(errors.None) -} - -func DevOpsProjectDeleteHandler(req *restful.Request, resp *restful.Response) { - devops := req.PathParameter("id") - workspace := req.PathParameter("name") - force := req.QueryParameter("force") - username := req.HeaderParameter(UserNameHeader) - - err := workspaces.UnBindDevopsProject(workspace, devops) - - if err != nil && force != "true" { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - err = workspaces.DeleteDevopsProject(username, devops) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(errors.None) -} - -func DevOpsProjectCreateHandler(req *restful.Request, resp *restful.Response) { - - workspace := req.PathParameter("name") - username := req.HeaderParameter(UserNameHeader) - - var devops models.DevopsProject - - err := req.ReadEntity(&devops) + workspace := req.PathParameter("workspace") + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + reverse := params.ParseReverse(req) if err != nil { resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) return } - project, err := workspaces.CreateDevopsProject(username, workspace, devops) + result, err := iam.ListWorkspaceRoles(workspace, conditions, orderBy, reverse, limit, offset) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - resp.WriteAsJson(project) - + resp.WriteAsJson(result.Items) } -func NamespaceCreateHandler(req *restful.Request, resp *restful.Response) { - workspace := req.PathParameter("name") - username := req.HeaderParameter(UserNameHeader) +func ListWorkspaceRoleRules(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + role := req.PathParameter("role") - namespace := &v1.Namespace{} - - err := req.ReadEntity(namespace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) - return - } - - if namespace.Annotations == nil { - namespace.Annotations = make(map[string]string, 0) - } - - namespace.Annotations["creator"] = username - namespace.Annotations["workspace"] = workspace - - if namespace.Labels == nil { - namespace.Labels = make(map[string]string, 0) - } - - namespace.Labels["kubesphere.io/workspace"] = workspace - - namespace, err = workspaces.CreateNamespace(namespace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) - return - } - - resp.WriteAsJson(namespace) -} - -func DevOpsProjectHandler(req *restful.Request, resp *restful.Response) { - - workspace := req.PathParameter("name") - username := req.PathParameter("username") - keyword := req.QueryParameter("keyword") - - if username == "" { - username = req.HeaderParameter(UserNameHeader) - } - - limit := 65535 - offset := 0 - orderBy := "createTime" - reverse := true - - if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 { - limit, _ = strconv.Atoi(groups[1]) - page, _ := strconv.Atoi(groups[2]) - offset = (page - 1) * limit - } - - if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 { - orderBy = groups[1] - reverse = false - } - - if q := req.QueryParameter("reverse"); q != "" { - b, err := strconv.ParseBool(q) - if err == nil { - reverse = b - } - } - - total, devOpsProjects, err := workspaces.ListDevopsProjectsByUser(username, workspace, keyword, orderBy, reverse, limit, offset) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - result := models.PageableResponse{} - result.TotalCount = total - result.Items = make([]interface{}, 0) - for _, n := range devOpsProjects { - result.Items = append(result.Items, n) - } - resp.WriteAsJson(result) -} - -func WorkspaceCreateHandler(req *restful.Request, resp *restful.Response) { - var workspace models.Workspace - username := req.HeaderParameter(UserNameHeader) - err := req.ReadEntity(&workspace) - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) - return - } - if workspace.Name == "" || strings.Contains(workspace.Name, ":") { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) - return - } - - workspace.Path = workspace.Name - workspace.Members = nil - - if workspace.Admin != "" { - workspace.Creator = workspace.Admin - } else { - workspace.Creator = username - } - - created, err := workspaces.Create(&workspace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(created) - -} - -func DeleteWorkspaceHandler(req *restful.Request, resp *restful.Response) { - name := req.PathParameter("name") - - if name == "" || strings.Contains(name, ":") { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) - return - } - - workspace, err := workspaces.Detail(name) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - err = workspaces.Delete(workspace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(errors.None) -} -func WorkspaceEditHandler(req *restful.Request, resp *restful.Response) { - var workspace models.Workspace - name := req.PathParameter("name") - err := req.ReadEntity(&workspace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) - return - } - - if name != workspace.Name { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("the name of workspace (%s) does not match the name on the URL (%s)", workspace.Name, name))) - return - } - - if workspace.Name == "" || strings.Contains(workspace.Name, ":") { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(fmt.Errorf("invalid workspace name"))) - return - } - - workspace.Path = workspace.Name - - workspace.Members = nil - - edited, err := workspaces.Edit(&workspace) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(edited) -} -func WorkspaceDetailHandler(req *restful.Request, resp *restful.Response) { - - name := req.PathParameter("name") - - workspace, err := workspaces.Detail(name) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(workspace) -} - -// List all workspaces for the current user -func UserWorkspaceListHandler(req *restful.Request, resp *restful.Response) { - keyword := req.QueryParameter("keyword") - username := req.HeaderParameter(constants.UserNameHeader) - - ws, err := workspaces.ListWorkspaceByUser(username, keyword) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - sort.Slice(ws, func(i, j int) bool { - t1, err := ws[i].GetCreateTime() - if err != nil { - return false - } - t2, err := ws[j].GetCreateTime() - if err != nil { - return true - } - return t1.After(t2) - }) - - resp.WriteAsJson(ws) -} - -func UserNamespaceListHandler(req *restful.Request, resp *restful.Response) { - withMetrics, err := strconv.ParseBool(req.QueryParameter("metrics")) - - if err != nil { - withMetrics = false - } - - username := req.PathParameter("username") - keyword := req.QueryParameter("keyword") - if username == "" { - username = req.HeaderParameter(UserNameHeader) - } - limit := 65535 - offset := 0 - orderBy := "createTime" - reverse := true - - if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(req.QueryParameter("paging")); len(groups) == 3 { - limit, _ = strconv.Atoi(groups[1]) - page, _ := strconv.Atoi(groups[2]) - if page < 0 { - page = 1 - } - offset = (page - 1) * limit - } - - if groups := regexp.MustCompile(`^(createTime|name)$`).FindStringSubmatch(req.QueryParameter("order")); len(groups) == 2 { - orderBy = groups[1] - reverse = false - } - - if q := req.QueryParameter("reverse"); q != "" { - b, err := strconv.ParseBool(q) - if err == nil { - reverse = b - } - } - - workspaceName := req.PathParameter("workspace") - - total, namespaces, err := workspaces.ListNamespaceByUser(workspaceName, username, keyword, orderBy, reverse, limit, offset) - - if withMetrics { - namespaces = metrics.GetNamespacesWithMetrics(namespaces) - } - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - result := models.PageableResponse{} - result.TotalCount = total - result.Items = make([]interface{}, 0) - for _, n := range namespaces { - result.Items = append(result.Items, n) - } - - resp.WriteAsJson(result) -} - -func DevopsRulesHandler(req *restful.Request, resp *restful.Response) { - //workspaceName := req.PathParameter("workspace") - username := req.HeaderParameter(constants.UserNameHeader) - devopsName := req.PathParameter("devops") - - var rules []models.SimpleRule - - role, err := iam.GetDevopsRole(devopsName, username) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - switch role { - case "developer": - rules = []models.SimpleRule{ - {Name: "pipelines", Actions: []string{"view", "trigger"}}, - {Name: "roles", Actions: []string{"view"}}, - {Name: "members", Actions: []string{"view"}}, - {Name: "devops", Actions: []string{"view"}}, - } - break - case "owner": - rules = []models.SimpleRule{ - {Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}}, - {Name: "roles", Actions: []string{"view"}}, - {Name: "members", Actions: []string{"create", "edit", "view", "delete"}}, - {Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}}, - {Name: "devops", Actions: []string{"edit", "view", "delete"}}, - } - break - case "maintainer": - rules = []models.SimpleRule{ - {Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}}, - {Name: "roles", Actions: []string{"view"}}, - {Name: "members", Actions: []string{"view"}}, - {Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}}, - {Name: "devops", Actions: []string{"view"}}, - } - break - case "reporter": - fallthrough - default: - rules = []models.SimpleRule{ - {Name: "pipelines", Actions: []string{"view"}}, - {Name: "roles", Actions: []string{"view"}}, - {Name: "members", Actions: []string{"view"}}, - {Name: "devops", Actions: []string{"view"}}, - } - break - } + rules := iam.GetWorkspaceRoleSimpleRules(workspace, role) resp.WriteAsJson(rules) } -func NamespacesRulesHandler(req *restful.Request, resp *restful.Response) { - //workspaceName := req.PathParameter("workspace") - username := req.HeaderParameter(constants.UserNameHeader) - namespaceName := req.PathParameter("namespace") - - clusterRoles, err := iam.GetClusterRoles(username) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - roles, err := iam.GetRoles(username, namespaceName) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - for _, clusterRole := range clusterRoles { - role := new(rbac.Role) - role.Name = clusterRole.Name - role.Labels = clusterRole.Labels - role.Namespace = namespaceName - role.Annotations = clusterRole.Annotations - role.Kind = "Role" - role.Rules = clusterRole.Rules - roles = append(roles, role) - } - - rules, err := iam.GetRoleSimpleRules(roles, namespaceName) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - if rules[namespaceName] == nil { - resp.WriteAsJson(make([]models.SimpleRule, 0)) - } else { - resp.WriteAsJson(rules[namespaceName]) - } -} - -func WorkspaceRulesHandler(req *restful.Request, resp *restful.Response) { +func DescribeWorkspaceRole(req *restful.Request, resp *restful.Response) { workspace := req.PathParameter("workspace") + roleName := req.PathParameter("role") - username := req.HeaderParameter(constants.UserNameHeader) - - clusterRoles, err := iam.GetClusterRoles(username) + role, err := iam.GetWorkspaceRole(workspace, roleName) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - rules := iam.GetWorkspaceSimpleRules(clusterRoles, workspace) - - if rules[workspace] != nil { - resp.WriteAsJson(rules[workspace]) - } else if rules["*"] != nil { - resp.WriteAsJson(rules["*"]) - } else { - resp.WriteAsJson(make([]models.SimpleRule, 0)) - } + resp.WriteAsJson(role) } -func WorkspaceMemberList(req *restful.Request, resp *restful.Response) { +func DescribeWorkspaceUser(req *restful.Request, resp *restful.Response) { workspace := req.PathParameter("workspace") - limit, err := strconv.Atoi(req.QueryParameter("limit")) + username := req.PathParameter("username") + + workspaceRole, err := iam.GetUserWorkspaceRole(workspace, username) + if err != nil { - limit = 500 - } - offset, err := strconv.Atoi(req.QueryParameter("offset")) - if err != nil { - offset = 0 + if k8serr.IsNotFound(err) { + resp.WriteHeaderAndEntity(http.StatusNotFound, errors.Wrap(err)) + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + } + + return } - conn, err := ldapclient.Client() + user, err := iam.DescribeUser(username) if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) return } - defer conn.Close() + user.WorkspaceRole = workspaceRole.Labels[constants.DisplayNameLabelKey] - group, err := iam.GroupDetail(workspace, conn) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - keyword := "" - - if query := req.QueryParameter("keyword"); query != "" { - keyword = query - } - - users := make([]*models.User, 0) - - total := len(group.Members) - - members := sliceutils.RemoveString(group.Members, func(item string) bool { - return keyword != "" && !strings.Contains(item, keyword) - }) - - for i := 0; i < len(members); i++ { - username := members[i] - - if i < offset { - continue - } - - if len(users) == limit { - break - } - - user, err := iam.UserDetail(username, conn) - - if err != nil { - if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { - group.Members = sliceutils.RemoveString(group.Members, func(item string) bool { - return item == username - }) - continue - } else { - resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) - return - } - } - - clusterRoles, err := iam.GetClusterRoles(username) - - for i := 0; i < len(clusterRoles); i++ { - if clusterRoles[i].Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { - user.ClusterRole = clusterRoles[i].Name - break - } - } - - if group.Path == group.Name { - - workspaceRole := iam.GetWorkspaceRole(clusterRoles, group.Name) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - user.WorkspaceRole = workspaceRole - } - - users = append(users, user) - } - - if total > len(group.Members) { - go iam.UpdateGroup(group) - } - if req.QueryParameter("limit") != "" { - resp.WriteAsJson(map[string]interface{}{"items": users, "total_count": len(members)}) - } else { - resp.WriteAsJson(users) - } + resp.WriteAsJson(user) +} + +func ListDevopsRoleRules(req *restful.Request, resp *restful.Response) { + role := req.PathParameter("role") + + rules := iam.GetDevopsRoleSimpleRules(role) + + resp.WriteAsJson(rules) +} + +func InviteUser(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + var user models.User + err := req.ReadEntity(&user) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + err = workspaces.InviteUser(workspace, &user) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func RemoveUser(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + var user models.User + err := req.ReadEntity(&user) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + err = workspaces.InviteUser(workspace, &user) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func ListWorkspaceUsers(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + reverse := params.ParseReverse(req) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + result, err := iam.ListWorkspaceUsers(workspace, conditions, orderBy, reverse, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(result) } diff --git a/pkg/apiserver/resources/application.go b/pkg/apiserver/resources/application.go index 6a921f16b..207d47605 100644 --- a/pkg/apiserver/resources/application.go +++ b/pkg/apiserver/resources/application.go @@ -21,15 +21,17 @@ import ( "github.com/emicklei/go-restful" "kubesphere.io/kubesphere/pkg/errors" "kubesphere.io/kubesphere/pkg/models/applications" + + //"kubesphere.io/kubesphere/pkg/models/applications" "kubesphere.io/kubesphere/pkg/params" "net/http" ) func ApplicationHandler(req *restful.Request, resp *restful.Response) { - limit, offset := params.ParsePaging(req) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) clusterId := req.QueryParameter("cluster_id") runtimeId := req.QueryParameter("runtime_id") - conditions, err := params.ParseConditions(req) + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) if err != nil { if err != nil { resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) diff --git a/pkg/apiserver/resources/cluster_resources.go b/pkg/apiserver/resources/cluster_resources.go deleted file mode 100644 index 4b94d45a6..000000000 --- a/pkg/apiserver/resources/cluster_resources.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package resources - -import ( - "github.com/emicklei/go-restful" - "net/http" - - "kubesphere.io/kubesphere/pkg/errors" - "kubesphere.io/kubesphere/pkg/models/resources" - "kubesphere.io/kubesphere/pkg/params" -) - -func ClusterResourceHandler(req *restful.Request, resp *restful.Response) { - resourceName := req.PathParameter("resources") - conditions, err := params.ParseConditions(req) - orderBy := req.QueryParameter(params.OrderByParam) - limit, offset := params.ParsePaging(req) - reverse := params.ParseReverse(req) - - result, err := resources.ListClusterResource(resourceName, conditions, orderBy, reverse, limit, offset) - - if err != nil { - resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - resp.WriteAsJson(result) -} diff --git a/pkg/apiserver/resources/namespace_resources.go b/pkg/apiserver/resources/resources.go similarity index 59% rename from pkg/apiserver/resources/namespace_resources.go rename to pkg/apiserver/resources/resources.go index 84f2bc006..6acaaddd4 100644 --- a/pkg/apiserver/resources/namespace_resources.go +++ b/pkg/apiserver/resources/resources.go @@ -19,22 +19,39 @@ package resources import ( "github.com/emicklei/go-restful" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/resources" "net/http" "kubesphere.io/kubesphere/pkg/errors" - "kubesphere.io/kubesphere/pkg/models/resources" "kubesphere.io/kubesphere/pkg/params" ) -func NamespaceResourceHandler(req *restful.Request, resp *restful.Response) { +func ListResources(req *restful.Request, resp *restful.Response) { namespace := req.PathParameter("namespace") resourceName := req.PathParameter("resources") - conditions, err := params.ParseConditions(req) + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) orderBy := req.QueryParameter(params.OrderByParam) - limit, offset := params.ParsePaging(req) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) reverse := params.ParseReverse(req) + names := params.ParseArray(req.QueryParameter(params.NameParam)) - result, err := resources.ListNamespaceResource(namespace, resourceName, conditions, orderBy, reverse, limit, offset) + if orderBy == "" { + orderBy = resources.CreateTime + reverse = true + } + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + var result *models.PageableResponse + if len(names) > 0 { + result, err = resources.ListResourcesByName(namespace, resourceName, names) + } else { + result, err = resources.ListResources(namespace, resourceName, conditions, orderBy, reverse, limit, offset) + } if err != nil { resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) diff --git a/pkg/apiserver/routers/routers.go b/pkg/apiserver/routers/routers.go index deda6549a..083e41ef2 100644 --- a/pkg/apiserver/routers/routers.go +++ b/pkg/apiserver/routers/routers.go @@ -50,21 +50,6 @@ func GetAllRouters(request *restful.Request, response *restful.Response) { response.WriteAsJson(routers) } -// Get all namespace ingress controller services for user -func GetAllRoutersOfUser(request *restful.Request, response *restful.Response) { - - username := request.PathParameter("username") - - routers, err := routers.GetAllRoutersOfUser(username) - - if err != nil { - response.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) - return - } - - response.WriteAsJson(routers) -} - // Get ingress controller service for specified namespace func GetRouter(request *restful.Request, response *restful.Response) { diff --git a/pkg/apiserver/tenant/tenant.go b/pkg/apiserver/tenant/tenant.go new file mode 100644 index 000000000..d22a75f77 --- /dev/null +++ b/pkg/apiserver/tenant/tenant.go @@ -0,0 +1,264 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package tenant + +import ( + "github.com/emicklei/go-restful" + "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/models/iam" + "kubesphere.io/kubesphere/pkg/models/tenant" + "kubesphere.io/kubesphere/pkg/models/workspaces" + "kubesphere.io/kubesphere/pkg/params" + "log" + "net/http" +) + +func ListWorkspaceRules(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + username := req.HeaderParameter(constants.UserNameHeader) + + rules, err := iam.GetUserWorkspaceSimpleRules(workspace, username) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(rules) +} + +func ListWorkspaces(req *restful.Request, resp *restful.Response) { + username := req.HeaderParameter(constants.UserNameHeader) + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + reverse := params.ParseReverse(req) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + result, err := tenant.ListWorkspaces(username, conditions, orderBy, reverse, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(result) +} + +func ListNamespaces(req *restful.Request, resp *restful.Response) { + workspace := req.PathParameter("workspace") + username := req.PathParameter("username") + // /workspaces/{workspace}/members/{username}/namespaces + if username == "" { + // /workspaces/{workspace}/namespaces + username = req.HeaderParameter(constants.UserNameHeader) + } + + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + orderBy := req.QueryParameter(params.OrderByParam) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + reverse := params.ParseReverse(req) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + conditions.Match["kubesphere.io/workspace"] = workspace + + result, err := tenant.ListNamespaces(username, conditions, orderBy, reverse, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(result) +} + +func CreateNamespace(req *restful.Request, resp *restful.Response) { + workspaceName := req.PathParameter("workspace") + username := req.HeaderParameter(constants.UserNameHeader) + var namespace v1.Namespace + err := req.ReadEntity(&namespace) + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + workspace, err := tenant.GetWorkspace(workspaceName) + + err = checkResourceQuotas(workspace) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusForbidden, errors.Wrap(err)) + return + } + + if err != nil { + if k8serr.IsNotFound(err) { + resp.WriteHeaderAndEntity(http.StatusForbidden, errors.Wrap(err)) + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + } + return + } + + created, err := tenant.CreateNamespace(workspaceName, &namespace, username) + + if err != nil { + if k8serr.IsAlreadyExists(err) { + resp.WriteHeaderAndEntity(http.StatusConflict, err) + } else { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, err) + } + return + } + resp.WriteAsJson(created) +} + +func DeleteNamespace(req *restful.Request, resp *restful.Response) { + workspaceName := req.PathParameter("workspace") + namespaceName := req.PathParameter("namespace") + + err := workspaces.DeleteNamespace(workspaceName, namespaceName) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func checkResourceQuotas(wokrspace *v1alpha1.Workspace) error { + return nil +} + +func ListDevopsProjects(req *restful.Request, resp *restful.Response) { + + workspace := req.PathParameter("workspace") + username := req.PathParameter(constants.UserNameHeader) + if username == "" { + username = req.HeaderParameter(constants.UserNameHeader) + } + orderBy := req.QueryParameter(params.OrderByParam) + reverse := params.ParseReverse(req) + limit, offset := params.ParsePaging(req.QueryParameter(params.PagingParam)) + conditions, err := params.ParseConditions(req.QueryParameter(params.ConditionsParam)) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + result, err := tenant.ListDevopsProjects(workspace, username, conditions, orderBy, reverse, limit, offset) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(result) +} + +func DeleteDevopsProject(req *restful.Request, resp *restful.Response) { + devops := req.PathParameter("id") + workspace := req.PathParameter("workspace") + force := req.QueryParameter("force") + username := req.HeaderParameter(constants.UserNameHeader) + + err := workspaces.UnBindDevopsProject(workspace, devops) + + if err != nil && force != "true" { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + err = workspaces.DeleteDevopsProject(username, devops) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(errors.None) +} + +func CreateDevopsProject(req *restful.Request, resp *restful.Response) { + + workspace := req.PathParameter("workspace") + username := req.HeaderParameter(constants.UserNameHeader) + + var devops models.DevopsProject + + err := req.ReadEntity(&devops) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusBadRequest, errors.Wrap(err)) + return + } + + log.Println("create workspace", username, workspace, devops) + project, err := workspaces.CreateDevopsProject(username, workspace, devops) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + resp.WriteAsJson(project) + +} + +func ListNamespaceRules(req *restful.Request, resp *restful.Response) { + namespace := req.PathParameter("namespace") + username := req.HeaderParameter(constants.UserNameHeader) + + rules, err := iam.GetUserNamespaceSimpleRules(namespace, username) + + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + resp.WriteAsJson(rules) +} + +func ListDevopsRules(req *restful.Request, resp *restful.Response) { + devops := req.PathParameter("devops") + username := req.HeaderParameter(constants.UserNameHeader) + + rules, err := iam.GetUserDevopsSimpleRules(username, devops) + + if err != nil { + resp.WriteError(http.StatusInternalServerError, err) + return + } + + resp.WriteAsJson(rules) +} diff --git a/pkg/apiserver/terminal/terminal.go b/pkg/apiserver/terminal/terminal.go new file mode 100644 index 000000000..b8e083424 --- /dev/null +++ b/pkg/apiserver/terminal/terminal.go @@ -0,0 +1,56 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package terminal + +import ( + "github.com/emicklei/go-restful" + "gopkg.in/igm/sockjs-go.v2/sockjs" + "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models/terminal" + "net/http" +) + +// TerminalResponse is sent by handleExecShell. The Id is a random session id that binds the original REST request and the SockJS connection. +// Any clientapi in possession of this Id can hijack the terminal session. +type TerminalResponse struct { + Id string `json:"id"` +} + +// CreateAttachHandler is called from main for /api/sockjs +func NewTerminalHandler(path string) http.Handler { + return sockjs.NewHandler(path, sockjs.DefaultOptions, terminal.HandleTerminalSession) +} + +// Handles execute shell API call +func CreateTerminalSession(request *restful.Request, resp *restful.Response) { + + namespace := request.PathParameter("namespace") + podName := request.PathParameter("pod") + containerName := request.QueryParameter("container") + shell := request.QueryParameter("shell") + + sessionId, err := terminal.NewSession(shell, namespace, podName, containerName) + + if err != nil { + resp.WriteHeaderAndEntity(http.StatusInternalServerError, errors.Wrap(err)) + return + } + + TerminalResponse := &TerminalResponse{Id: sessionId} + resp.WriteAsJson(TerminalResponse) +} diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 5ca07dc56..7a7c8cde3 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -19,10 +19,11 @@ limitations under the License. package versioned import ( - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/flowcontrol" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1" ) type Interface interface { @@ -30,6 +31,9 @@ type Interface interface { ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface // Deprecated: please explicitly pick a version if possible. Servicemesh() servicemeshv1alpha2.ServicemeshV1alpha2Interface + TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Tenant() tenantv1alpha1.TenantV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -37,6 +41,7 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient servicemeshV1alpha2 *servicemeshv1alpha2.ServicemeshV1alpha2Client + tenantV1alpha1 *tenantv1alpha1.TenantV1alpha1Client } // ServicemeshV1alpha2 retrieves the ServicemeshV1alpha2Client @@ -50,6 +55,17 @@ func (c *Clientset) Servicemesh() servicemeshv1alpha2.ServicemeshV1alpha2Interfa return c.servicemeshV1alpha2 } +// TenantV1alpha1 retrieves the TenantV1alpha1Client +func (c *Clientset) TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface { + return c.tenantV1alpha1 +} + +// Deprecated: Tenant retrieves the default version of TenantClient. +// Please explicitly pick a version. +func (c *Clientset) Tenant() tenantv1alpha1.TenantV1alpha1Interface { + return c.tenantV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -70,6 +86,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.tenantV1alpha1, err = tenantv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -83,6 +103,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.servicemeshV1alpha2 = servicemeshv1alpha2.NewForConfigOrDie(c) + cs.tenantV1alpha1 = tenantv1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -92,6 +113,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.servicemeshV1alpha2 = servicemeshv1alpha2.New(c) + cs.tenantV1alpha1 = tenantv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 2d5febe62..9e7380c49 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -27,6 +27,8 @@ import ( clientset "kubesphere.io/kubesphere/pkg/client/clientset/versioned" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" fakeservicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1" + faketenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -80,3 +82,13 @@ func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha func (c *Clientset) Servicemesh() servicemeshv1alpha2.ServicemeshV1alpha2Interface { return &fakeservicemeshv1alpha2.FakeServicemeshV1alpha2{Fake: &c.Fake} } + +// TenantV1alpha1 retrieves the TenantV1alpha1Client +func (c *Clientset) TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface { + return &faketenantv1alpha1.FakeTenantV1alpha1{Fake: &c.Fake} +} + +// Tenant retrieves the TenantV1alpha1Client +func (c *Clientset) Tenant() tenantv1alpha1.TenantV1alpha1Interface { + return &faketenantv1alpha1.FakeTenantV1alpha1{Fake: &c.Fake} +} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index e99a87018..658e7f6f6 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -19,12 +19,13 @@ limitations under the License. package fake import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" ) var scheme = runtime.NewScheme() @@ -32,6 +33,7 @@ var codecs = serializer.NewCodecFactory(scheme) var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ servicemeshv1alpha2.AddToScheme, + tenantv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 05e0d720e..7cbb08ed5 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -19,12 +19,13 @@ limitations under the License. package scheme import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" ) var Scheme = runtime.NewScheme() @@ -32,6 +33,7 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ servicemeshv1alpha2.AddToScheme, + tenantv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/typed/tenant/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_tenant_client.go b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_tenant_client.go new file mode 100644 index 000000000..915c4f8d6 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_tenant_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/client-go/rest" + "k8s.io/client-go/testing" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1" +) + +type FakeTenantV1alpha1 struct { + *testing.Fake +} + +func (c *FakeTenantV1alpha1) Workspaces() v1alpha1.WorkspaceInterface { + return &FakeWorkspaces{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeTenantV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_workspace.go b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_workspace.go new file mode 100644 index 000000000..19b5c2a40 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake/fake_workspace.go @@ -0,0 +1,131 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/testing" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" +) + +// FakeWorkspaces implements WorkspaceInterface +type FakeWorkspaces struct { + Fake *FakeTenantV1alpha1 +} + +var workspacesResource = schema.GroupVersionResource{Group: "tenant.kubesphere.io", Version: "v1alpha1", Resource: "workspaces"} + +var workspacesKind = schema.GroupVersionKind{Group: "tenant.kubesphere.io", Version: "v1alpha1", Kind: "Workspace"} + +// Get takes name of the workspace, and returns the corresponding workspace object, and an error if there is any. +func (c *FakeWorkspaces) Get(name string, options v1.GetOptions) (result *v1alpha1.Workspace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(workspacesResource, name), &v1alpha1.Workspace{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Workspace), err +} + +// List takes label and field selectors, and returns the list of Workspaces that match those selectors. +func (c *FakeWorkspaces) List(opts v1.ListOptions) (result *v1alpha1.WorkspaceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(workspacesResource, workspacesKind, opts), &v1alpha1.WorkspaceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.WorkspaceList{ListMeta: obj.(*v1alpha1.WorkspaceList).ListMeta} + for _, item := range obj.(*v1alpha1.WorkspaceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested workspaces. +func (c *FakeWorkspaces) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(workspacesResource, opts)) +} + +// Create takes the representation of a workspace and creates it. Returns the server's representation of the workspace, and an error, if there is any. +func (c *FakeWorkspaces) Create(workspace *v1alpha1.Workspace) (result *v1alpha1.Workspace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(workspacesResource, workspace), &v1alpha1.Workspace{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Workspace), err +} + +// Update takes the representation of a workspace and updates it. Returns the server's representation of the workspace, and an error, if there is any. +func (c *FakeWorkspaces) Update(workspace *v1alpha1.Workspace) (result *v1alpha1.Workspace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(workspacesResource, workspace), &v1alpha1.Workspace{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Workspace), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeWorkspaces) UpdateStatus(workspace *v1alpha1.Workspace) (*v1alpha1.Workspace, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(workspacesResource, "status", workspace), &v1alpha1.Workspace{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Workspace), err +} + +// Delete takes name of the workspace and deletes it. Returns an error if one occurs. +func (c *FakeWorkspaces) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(workspacesResource, name), &v1alpha1.Workspace{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeWorkspaces) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(workspacesResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.WorkspaceList{}) + return err +} + +// Patch applies the patch and returns the patched workspace. +func (c *FakeWorkspaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Workspace, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(workspacesResource, name, pt, data, subresources...), &v1alpha1.Workspace{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Workspace), err +} diff --git a/pkg/client/clientset/versioned/typed/tenant/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..1a06574e0 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type WorkspaceExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/tenant/v1alpha1/tenant_client.go b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/tenant_client.go new file mode 100644 index 000000000..1061b8fbc --- /dev/null +++ b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/tenant_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +type TenantV1alpha1Interface interface { + RESTClient() rest.Interface + WorkspacesGetter +} + +// TenantV1alpha1Client is used to interact with features provided by the tenant.kubesphere.io group. +type TenantV1alpha1Client struct { + restClient rest.Interface +} + +func (c *TenantV1alpha1Client) Workspaces() WorkspaceInterface { + return newWorkspaces(c) +} + +// NewForConfig creates a new TenantV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*TenantV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &TenantV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new TenantV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *TenantV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new TenantV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *TenantV1alpha1Client { + return &TenantV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *TenantV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/client/clientset/versioned/typed/tenant/v1alpha1/workspace.go b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/workspace.go new file mode 100644 index 000000000..21e1ebcf7 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/tenant/v1alpha1/workspace.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +// WorkspacesGetter has a method to return a WorkspaceInterface. +// A group's client should implement this interface. +type WorkspacesGetter interface { + Workspaces() WorkspaceInterface +} + +// WorkspaceInterface has methods to work with Workspace resources. +type WorkspaceInterface interface { + Create(*v1alpha1.Workspace) (*v1alpha1.Workspace, error) + Update(*v1alpha1.Workspace) (*v1alpha1.Workspace, error) + UpdateStatus(*v1alpha1.Workspace) (*v1alpha1.Workspace, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Workspace, error) + List(opts v1.ListOptions) (*v1alpha1.WorkspaceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Workspace, err error) + WorkspaceExpansion +} + +// workspaces implements WorkspaceInterface +type workspaces struct { + client rest.Interface +} + +// newWorkspaces returns a Workspaces +func newWorkspaces(c *TenantV1alpha1Client) *workspaces { + return &workspaces{ + client: c.RESTClient(), + } +} + +// Get takes name of the workspace, and returns the corresponding workspace object, and an error if there is any. +func (c *workspaces) Get(name string, options v1.GetOptions) (result *v1alpha1.Workspace, err error) { + result = &v1alpha1.Workspace{} + err = c.client.Get(). + Resource("workspaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Workspaces that match those selectors. +func (c *workspaces) List(opts v1.ListOptions) (result *v1alpha1.WorkspaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.WorkspaceList{} + err = c.client.Get(). + Resource("workspaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested workspaces. +func (c *workspaces) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("workspaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a workspace and creates it. Returns the server's representation of the workspace, and an error, if there is any. +func (c *workspaces) Create(workspace *v1alpha1.Workspace) (result *v1alpha1.Workspace, err error) { + result = &v1alpha1.Workspace{} + err = c.client.Post(). + Resource("workspaces"). + Body(workspace). + Do(). + Into(result) + return +} + +// Update takes the representation of a workspace and updates it. Returns the server's representation of the workspace, and an error, if there is any. +func (c *workspaces) Update(workspace *v1alpha1.Workspace) (result *v1alpha1.Workspace, err error) { + result = &v1alpha1.Workspace{} + err = c.client.Put(). + Resource("workspaces"). + Name(workspace.Name). + Body(workspace). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *workspaces) UpdateStatus(workspace *v1alpha1.Workspace) (result *v1alpha1.Workspace, err error) { + result = &v1alpha1.Workspace{} + err = c.client.Put(). + Resource("workspaces"). + Name(workspace.Name). + SubResource("status"). + Body(workspace). + Do(). + Into(result) + return +} + +// Delete takes name of the workspace and deletes it. Returns an error if one occurs. +func (c *workspaces) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("workspaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *workspaces) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("workspaces"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched workspace. +func (c *workspaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Workspace, err error) { + result = &v1alpha1.Workspace{} + err = c.client.Patch(pt). + Resource("workspaces"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index 4344b82c4..d54429d6b 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -19,17 +19,18 @@ limitations under the License. package externalversions import ( - reflect "reflect" - sync "sync" - time "time" + "reflect" + "sync" + "time" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" - internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" - servicemesh "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh" + "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant" ) // SharedInformerOption defines the functional option type for SharedInformerFactory. @@ -173,8 +174,13 @@ type SharedInformerFactory interface { WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool Servicemesh() servicemesh.Interface + Tenant() tenant.Interface } func (f *sharedInformerFactory) Servicemesh() servicemesh.Interface { return servicemesh.New(f, f.namespace, f.tweakListOptions) } + +func (f *sharedInformerFactory) Tenant() tenant.Interface { + return tenant.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index effae9291..169808380 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -21,9 +21,10 @@ package externalversions import ( "fmt" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" - v1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other @@ -58,6 +59,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1alpha2.SchemeGroupVersion.WithResource("strategies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Servicemesh().V1alpha2().Strategies().Informer()}, nil + // Group=tenant.kubesphere.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("workspaces"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Tenant().V1alpha1().Workspaces().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/pkg/client/informers/externalversions/tenant/interface.go b/pkg/client/informers/externalversions/tenant/interface.go new file mode 100644 index 000000000..0fde9592e --- /dev/null +++ b/pkg/client/informers/externalversions/tenant/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package tenant + +import ( + "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/client/informers/externalversions/tenant/v1alpha1/interface.go b/pkg/client/informers/externalversions/tenant/v1alpha1/interface.go new file mode 100644 index 000000000..6a2e2d376 --- /dev/null +++ b/pkg/client/informers/externalversions/tenant/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Workspaces returns a WorkspaceInformer. + Workspaces() WorkspaceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Workspaces returns a WorkspaceInformer. +func (v *version) Workspaces() WorkspaceInformer { + return &workspaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/client/informers/externalversions/tenant/v1alpha1/workspace.go b/pkg/client/informers/externalversions/tenant/v1alpha1/workspace.go new file mode 100644 index 000000000..d038faed1 --- /dev/null +++ b/pkg/client/informers/externalversions/tenant/v1alpha1/workspace.go @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + "kubesphere.io/kubesphere/pkg/client/listers/tenant/v1alpha1" +) + +// WorkspaceInformer provides access to a shared informer and lister for +// Workspaces. +type WorkspaceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.WorkspaceLister +} + +type workspaceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewWorkspaceInformer constructs a new informer for Workspace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewWorkspaceInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredWorkspaceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredWorkspaceInformer constructs a new informer for Workspace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredWorkspaceInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TenantV1alpha1().Workspaces().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.TenantV1alpha1().Workspaces().Watch(options) + }, + }, + &tenantv1alpha1.Workspace{}, + resyncPeriod, + indexers, + ) +} + +func (f *workspaceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredWorkspaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *workspaceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&tenantv1alpha1.Workspace{}, f.defaultInformer) +} + +func (f *workspaceInformer) Lister() v1alpha1.WorkspaceLister { + return v1alpha1.NewWorkspaceLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/tenant/v1alpha1/expansion_generated.go b/pkg/client/listers/tenant/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..19b736571 --- /dev/null +++ b/pkg/client/listers/tenant/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// WorkspaceListerExpansion allows custom methods to be added to +// WorkspaceLister. +type WorkspaceListerExpansion interface{} diff --git a/pkg/client/listers/tenant/v1alpha1/workspace.go b/pkg/client/listers/tenant/v1alpha1/workspace.go new file mode 100644 index 000000000..42cd64285 --- /dev/null +++ b/pkg/client/listers/tenant/v1alpha1/workspace.go @@ -0,0 +1,65 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" +) + +// WorkspaceLister helps list Workspaces. +type WorkspaceLister interface { + // List lists all Workspaces in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Workspace, err error) + // Get retrieves the Workspace from the index for a given name. + Get(name string) (*v1alpha1.Workspace, error) + WorkspaceListerExpansion +} + +// workspaceLister implements the WorkspaceLister interface. +type workspaceLister struct { + indexer cache.Indexer +} + +// NewWorkspaceLister returns a new WorkspaceLister. +func NewWorkspaceLister(indexer cache.Indexer) WorkspaceLister { + return &workspaceLister{indexer: indexer} +} + +// List lists all Workspaces in the indexer. +func (s *workspaceLister) List(selector labels.Selector) (ret []*v1alpha1.Workspace, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Workspace)) + }) + return ret, err +} + +// Get retrieves the Workspace from the index for a given name. +func (s *workspaceLister) Get(name string) (*v1alpha1.Workspace, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("workspace"), name) + } + return obj.(*v1alpha1.Workspace), nil +} diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 40b7d54b4..344ab5f8d 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -31,13 +31,16 @@ const ( IngressControllerFolder = DataHome + "/ingress-controller" IngressControllerPrefix = "kubesphere-router-" - WorkspaceLabelKey = "kubesphere.io/workspace" - WorkspaceAdmin = "workspace-admin" - ClusterAdmin = "cluster-admin" - WorkspaceRegular = "workspace-regular" - WorkspaceViewer = "workspace-viewer" - DevopsOwner = "owner" - DevopsReporter = "reporter" + WorkspaceLabelKey = "kubesphere.io/workspace" + DisplayNameLabelKey = "displayName" + CreatorLabelKey = "creator" + OpenPitrixRuntimeAnnotationKey = "openpitrix_runtime" + WorkspaceAdmin = "workspace-admin" + ClusterAdmin = "cluster-admin" + WorkspaceRegular = "workspace-regular" + WorkspaceViewer = "workspace-viewer" + DevopsOwner = "owner" + DevopsReporter = "reporter" UserNameHeader = "X-Token-Username" ) diff --git a/pkg/controller/add_clusterrolebinding.go b/pkg/controller/add_clusterrolebinding.go new file mode 100644 index 000000000..57293115e --- /dev/null +++ b/pkg/controller/add_clusterrolebinding.go @@ -0,0 +1,28 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package controller + +import ( + "kubesphere.io/kubesphere/pkg/controller/clusterrolebinding" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, clusterrolebinding.Add) +} diff --git a/pkg/controller/add_namespace.go b/pkg/controller/add_namespace.go new file mode 100644 index 000000000..7dd21b1b6 --- /dev/null +++ b/pkg/controller/add_namespace.go @@ -0,0 +1,28 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package controller + +import ( + "kubesphere.io/kubesphere/pkg/controller/namespace" +) + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, namespace.Add) +} diff --git a/pkg/controller/add_workspace.go b/pkg/controller/add_workspace.go new file mode 100644 index 000000000..1194ff0a7 --- /dev/null +++ b/pkg/controller/add_workspace.go @@ -0,0 +1,26 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package controller + +import "kubesphere.io/kubesphere/pkg/controller/workspace" + +func init() { + // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. + AddToManagerFuncs = append(AddToManagerFuncs, workspace.Add) +} diff --git a/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go b/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go new file mode 100644 index 000000000..e720599d6 --- /dev/null +++ b/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go @@ -0,0 +1,220 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package clusterrolebinding + +import ( + "context" + "fmt" + corev1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/utils/k8sutil" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ( + log = logf.Log.WithName("controller") +) + +/** +* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller +* business logic. Delete these comments after modifying this file.* + */ + +// Add creates a new Namespace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileClusterRoleBinding{Client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("clusterrolebinding-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to Namespace + err = c.Watch(&source.Kind{Type: &rbac.ClusterRoleBinding{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileClusterRoleBinding{} + +// ReconcileClusterRoleBinding reconciles a Namespace object +type ReconcileClusterRoleBinding struct { + client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a Namespace object and makes changes based on the state read +// and what is in the Namespace.Spec +// +kubebuilder:rbac:groups=core.kubesphere.io,resources=namespaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.kubesphere.io,resources=namespaces/status,verbs=get;update;patch +func (r *ReconcileClusterRoleBinding) Reconcile(request reconcile.Request) (reconcile.Result, error) { + // Fetch the Namespace instance + instance := &rbac.ClusterRoleBinding{} + err := r.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + // Object not found, return. Created objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + workspaceName := instance.Labels[constants.WorkspaceLabelKey] + + if workspaceName != "" && k8sutil.IsControlledBy(instance.OwnerReferences, "Workspace", workspaceName) { + if instance.Name == getWorkspaceAdminRoleBindingName(workspaceName) || + instance.Name == getWorkspaceViewerRoleBindingName(workspaceName) { + nsList := &corev1.NamespaceList{} + options := client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.WorkspaceLabelKey: workspaceName})} + err = r.List(context.TODO(), &options, nsList) + if err != nil { + return reconcile.Result{}, err + } + for _, ns := range nsList.Items { + err = r.updateRoleBindings(instance, &ns) + if err != nil { + return reconcile.Result{}, err + } + } + } + } + return reconcile.Result{}, nil +} + +func (r *ReconcileClusterRoleBinding) updateRoleBindings(clusterRoleBinding *rbac.ClusterRoleBinding, namespace *corev1.Namespace) error { + + workspaceName := namespace.Labels[constants.WorkspaceLabelKey] + + if clusterRoleBinding.Name == getWorkspaceAdminRoleBindingName(workspaceName) { + adminBinding := &rbac.RoleBinding{} + adminBinding.Name = "admin" + adminBinding.Namespace = namespace.Name + adminBinding.RoleRef = rbac.RoleRef{Name: "admin", APIGroup: "rbac.authorization.k8s.io", Kind: "Role"} + adminBinding.Subjects = clusterRoleBinding.Subjects + + found := &rbac.RoleBinding{} + + err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: adminBinding.Name}, found) + + if errors.IsNotFound(err) { + log.Info("Creating default role binding", "namespace", namespace.Name, "name", adminBinding.Name) + err = r.Create(context.TODO(), adminBinding) + if err != nil { + return err + } + } else if err != nil { + return err + } + + if !reflect.DeepEqual(found.RoleRef, adminBinding.RoleRef) { + log.Info("Deleting conflict role binding", "namespace", namespace.Name, "name", adminBinding.Name) + err = r.Delete(context.TODO(), found) + if err != nil { + return err + } + return fmt.Errorf("conflict role binding %s.%s, waiting for recreate", namespace.Name, adminBinding.Name) + } + + if !reflect.DeepEqual(found.Subjects, adminBinding.Subjects) { + found.Subjects = adminBinding.Subjects + log.Info("Updating role binding", "namespace", namespace.Name, "name", adminBinding.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return err + } + } + } + + if clusterRoleBinding.Name == getWorkspaceViewerRoleBindingName(workspaceName) { + + found := &rbac.RoleBinding{} + + viewerBinding := &rbac.RoleBinding{} + viewerBinding.Name = "viewer" + viewerBinding.Namespace = namespace.Name + viewerBinding.RoleRef = rbac.RoleRef{Name: "viewer", APIGroup: "rbac.authorization.k8s.io", Kind: "Role"} + viewerBinding.Subjects = clusterRoleBinding.Subjects + + err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: viewerBinding.Name}, found) + + if errors.IsNotFound(err) { + log.Info("Creating default role binding", "namespace", namespace.Name, "name", viewerBinding.Name) + err = r.Create(context.TODO(), viewerBinding) + if err != nil { + return err + } + } else if err != nil { + return err + } + + if !reflect.DeepEqual(found.RoleRef, viewerBinding.RoleRef) { + log.Info("Deleting conflict role binding", "namespace", namespace.Name, "name", viewerBinding.Name) + err = r.Delete(context.TODO(), found) + if err != nil { + return err + } + return fmt.Errorf("conflict role binding %s.%s, waiting for recreate", namespace.Name, viewerBinding.Name) + } + + if !reflect.DeepEqual(found.Subjects, viewerBinding.Subjects) { + found.Subjects = viewerBinding.Subjects + log.Info("Updating role binding", "namespace", namespace.Name, "name", viewerBinding.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return err + } + } + } + + return nil +} + +func getWorkspaceAdminRoleBindingName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:admin", workspaceName) +} + +func getWorkspaceViewerRoleBindingName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:viewer", workspaceName) +} diff --git a/pkg/controller/clusterrolebinding/clusterrolebinding_controller_suite_test.go b/pkg/controller/clusterrolebinding/clusterrolebinding_controller_suite_test.go new file mode 100644 index 000000000..ae7dedd43 --- /dev/null +++ b/pkg/controller/clusterrolebinding/clusterrolebinding_controller_suite_test.go @@ -0,0 +1,77 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package clusterrolebinding + +import ( + stdlog "log" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "kubesphere.io/kubesphere/pkg/apis" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var cfg *rest.Config + +func TestMain(m *testing.M) { + t := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + } + apis.AddToScheme(scheme.Scheme) + + var err error + if cfg, err = t.Start(); err != nil { + stdlog.Fatal(err) + } + + code := m.Run() + t.Stop() + os.Exit(code) +} + +// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and +// writes the request to requests after Reconcile is finished. +func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { + requests := make(chan reconcile.Request) + fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { + result, err := inner.Reconcile(req) + requests <- req + return result, err + }) + return fn, requests +} + +// StartTestManager adds recFn +func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) { + stop := make(chan struct{}) + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred()) + }() + return stop, wg +} diff --git a/pkg/controller/clusterrolebinding/clusterrolebinding_controller_test.go b/pkg/controller/clusterrolebinding/clusterrolebinding_controller_test.go new file mode 100644 index 000000000..dd6905be0 --- /dev/null +++ b/pkg/controller/clusterrolebinding/clusterrolebinding_controller_test.go @@ -0,0 +1,19 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package clusterrolebinding diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go new file mode 100644 index 000000000..1c24404c0 --- /dev/null +++ b/pkg/controller/namespace/namespace_controller.go @@ -0,0 +1,436 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package namespace + +import ( + "context" + "fmt" + "github.com/golang/glog" + corev1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/pkg/apis/core" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" + "kubesphere.io/kubesphere/pkg/utils/k8sutil" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var ( + log = logf.Log.WithName("controller") + defaultRoles = []rbac.Role{ + {ObjectMeta: metav1.ObjectMeta{Name: "admin"}, Rules: []rbac.PolicyRule{{Verbs: []string{"*"}, APIGroups: []string{"*"}, Resources: []string{"*"}}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "operator"}, Rules: []rbac.PolicyRule{{Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"*"}, Resources: []string{"*"}}, + {Verbs: []string{"*"}, APIGroups: []string{"", "apps", "extensions", "batch", "logging.kubesphere.io", "monitoring.kubesphere.io", "iam.kubesphere.io", "resources.kubesphere.io", "autoscaling"}, Resources: []string{"*"}}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "viewer"}, Rules: []rbac.PolicyRule{{Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"*"}, Resources: []string{"*"}}}}, + } +) + +/** +* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller +* business logic. Delete these comments after modifying this file.* + */ + +// Add creates a new Namespace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileNamespace{Client: mgr.GetClient(), scheme: mgr.GetScheme()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("namespace-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to Namespace + err = c.Watch(&source.Kind{Type: &corev1.Namespace{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileNamespace{} + +// ReconcileNamespace reconciles a Namespace object +type ReconcileNamespace struct { + client.Client + scheme *runtime.Scheme +} + +// Reconcile reads that state of the cluster for a Namespace object and makes changes based on the state read +// and what is in the Namespace.Spec +// +kubebuilder:rbac:groups=core.kubesphere.io,resources=namespaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core.kubesphere.io,resources=namespaces/status,verbs=get;update;patch +func (r *ReconcileNamespace) Reconcile(request reconcile.Request) (reconcile.Result, error) { + // Fetch the Namespace instance + instance := &corev1.Namespace{} + err := r.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + // Object not found, return. Created objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + if !instance.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is being deleted + if err := r.deleteRuntime(instance); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil + } + + workspaceName := instance.Labels[constants.WorkspaceLabelKey] + + // delete default role bindings + if workspaceName == "" { + adminBinding := &rbac.RoleBinding{} + adminBinding.Name = "admin" + adminBinding.Namespace = instance.Name + log.Info("Deleting default role binding", "namespace", instance.Name, "name", adminBinding.Name) + err := r.Delete(context.TODO(), adminBinding) + if err != nil && !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + viewerBinding := &rbac.RoleBinding{} + viewerBinding.Name = "viewer" + viewerBinding.Namespace = instance.Name + log.Info("Deleting default role binding", "namespace", instance.Name, "name", viewerBinding.Name) + err = r.Delete(context.TODO(), viewerBinding) + if err != nil && !errors.IsNotFound(err) { + return reconcile.Result{}, err + } + return reconcile.Result{}, nil + } + + if err = r.checkAndBindWorkspace(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.checkAndCreateRoles(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.checkAndCreateRoleBindings(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.checkAndCreateCephSecret(instance); err != nil { + return reconcile.Result{}, err + } + + if err := r.checkAndCreateRuntime(instance); err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +// Create default roles +func (r *ReconcileNamespace) checkAndCreateRoles(namespace *corev1.Namespace) error { + for _, role := range defaultRoles { + found := &rbac.Role{} + err := r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: role.Name}, found) + if err != nil { + if errors.IsNotFound(err) { + role := role.DeepCopy() + role.Namespace = namespace.Name + log.Info("Creating default role", "namespace", namespace.Name, "role", role.Name) + err = r.Create(context.TODO(), role) + if err != nil { + return err + } + } + return err + } + } + return nil +} + +func (r *ReconcileNamespace) checkAndCreateRoleBindings(namespace *corev1.Namespace) error { + + workspaceName := namespace.Labels[constants.WorkspaceLabelKey] + creatorName := namespace.Labels[constants.CreatorLabelKey] + + creator := rbac.Subject{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: creatorName} + + workspaceAdminBinding := &rbac.ClusterRoleBinding{} + + err := r.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("workspace:%s:admin", workspaceName)}, workspaceAdminBinding) + + if err != nil { + return err + } + + adminBinding := &rbac.RoleBinding{} + adminBinding.Name = "admin" + adminBinding.Namespace = namespace.Name + adminBinding.RoleRef = rbac.RoleRef{Name: "admin", APIGroup: "rbac.authorization.k8s.io", Kind: "Role"} + adminBinding.Subjects = workspaceAdminBinding.Subjects + + if creator.Name != "" { + if adminBinding.Subjects == nil { + adminBinding.Subjects = make([]rbac.Subject, 0) + } + if !k8sutil.ContainsUser(adminBinding.Subjects, creatorName) { + adminBinding.Subjects = append(adminBinding.Subjects, creator) + } + } + + found := &rbac.RoleBinding{} + + err = r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: adminBinding.Name}, found) + + if errors.IsNotFound(err) { + log.Info("Creating default role binding", "namespace", namespace.Name, "name", adminBinding.Name) + err = r.Create(context.TODO(), adminBinding) + if err != nil { + return err + } + } else if err != nil { + return err + } + + if !reflect.DeepEqual(found.RoleRef, adminBinding.RoleRef) { + log.Info("Deleting conflict role binding", "namespace", namespace.Name, "name", adminBinding.Name) + err = r.Delete(context.TODO(), found) + if err != nil { + return err + } + return fmt.Errorf("conflict role binding %s.%s, waiting for recreate", namespace.Name, adminBinding.Name) + } + + if !reflect.DeepEqual(found.Subjects, adminBinding.Subjects) { + found.Subjects = adminBinding.Subjects + log.Info("Updating role binding", "namespace", namespace.Name, "name", adminBinding.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return err + } + } + + workspaceViewerBinding := &rbac.ClusterRoleBinding{} + + err = r.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("workspace:%s:viewer", workspaceName)}, workspaceViewerBinding) + + if err != nil { + return err + } + + viewerBinding := &rbac.RoleBinding{} + viewerBinding.Name = "viewer" + viewerBinding.Namespace = namespace.Name + viewerBinding.RoleRef = rbac.RoleRef{Name: "viewer", APIGroup: "rbac.authorization.k8s.io", Kind: "Role"} + viewerBinding.Subjects = workspaceViewerBinding.Subjects + + err = r.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: viewerBinding.Name}, found) + + if errors.IsNotFound(err) { + log.Info("Creating default role binding", "namespace", namespace.Name, "name", viewerBinding.Name) + err = r.Create(context.TODO(), viewerBinding) + if err != nil { + return err + } + } else if err != nil { + return err + } + + if !reflect.DeepEqual(found.RoleRef, viewerBinding.RoleRef) { + log.Info("Deleting conflict role binding", "namespace", namespace.Name, "name", viewerBinding.Name) + err = r.Delete(context.TODO(), found) + if err != nil { + return err + } + return fmt.Errorf("conflict role binding %s.%s, waiting for recreate", namespace.Name, viewerBinding.Name) + } + + if !reflect.DeepEqual(found.Subjects, viewerBinding.Subjects) { + found.Subjects = viewerBinding.Subjects + log.Info("Updating role binding", "namespace", namespace.Name, "name", viewerBinding.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return err + } + } + + return nil +} + +// Create openpitrix runtime +func (r *ReconcileNamespace) checkAndCreateRuntime(namespace *corev1.Namespace) error { + + if runtimeId := namespace.Annotations[constants.OpenPitrixRuntimeAnnotationKey]; runtimeId != "" { + return nil + } + + cm := &corev1.ConfigMap{} + err := r.Get(context.TODO(), types.NamespacedName{Namespace: constants.KubeSphereControlNamespace, Name: constants.AdminUserName}, cm) + + if err != nil { + return err + } + + runtime := &openpitrix.RunTime{Name: namespace.Name, Zone: namespace.Name, Provider: "kubernetes", RuntimeCredential: cm.Data["config"]} + + log.Info("Creating openpitrix runtime", "namespace", namespace.Name) + if err := openpitrix.Client().CreateRuntime(runtime); err != nil { + return err + } + + return nil +} + +// Delete openpitrix runtime +func (r *ReconcileNamespace) deleteRuntime(namespace *corev1.Namespace) error { + + if runtimeId := namespace.Annotations[constants.OpenPitrixRuntimeAnnotationKey]; runtimeId != "" { + log.Info("Deleting openpitrix runtime", "namespace", namespace.Name, "runtime", runtimeId) + if err := openpitrix.Client().DeleteRuntime(runtimeId); err != nil { + return err + } + } + + return nil +} + +// Create openpitrix runtime +func (r *ReconcileNamespace) checkAndBindWorkspace(namespace *corev1.Namespace) error { + + workspaceName := namespace.Labels[constants.WorkspaceLabelKey] + + if workspaceName == "" { + return nil + } + + workspace := &v1alpha1.Workspace{} + + err := r.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace) + + if err != nil { + if errors.IsNotFound(err) { + log.Error(err, "namespace", namespace.Name) + delete(namespace.Labels, constants.WorkspaceLabelKey) + err = r.Update(context.TODO(), namespace) + if err != nil { + return err + } + } + return err + } + + if !metav1.IsControlledBy(namespace, workspace) { + if err := controllerutil.SetControllerReference(workspace, namespace, r.scheme); err != nil { + return err + } + log.Info("Bind workspace", "namespace", namespace.Name, "workspace", workspaceName) + err = r.Update(context.TODO(), namespace) + if err != nil { + return err + } + } + + return nil +} + +//Create Ceph secret in the new namespace +func (r *ReconcileNamespace) checkAndCreateCephSecret(namespace *corev1.Namespace) error { + + newNsName := namespace.Name + scList := &v1.StorageClassList{} + err := r.List(context.TODO(), &client.ListOptions{}, scList) + if err != nil { + return err + } + for _, sc := range scList.Items { + if sc.Provisioner == "kubernetes.io/rbd" { + log.Info("would create Ceph user secret in storage class %s at namespace %s", sc.GetName(), newNsName) + if secretName, ok := sc.Parameters["userSecretName"]; ok { + secret := &corev1.Secret{} + r.Get(context.TODO(), types.NamespacedName{Namespace: core.NamespaceSystem, Name: secretName}, secret) + if err != nil { + if errors.IsNotFound(err) { + log.Error(err, "cannot find secret in namespace %s, error: %s", core.NamespaceSystem, secretName) + continue + } + log.Error(err, fmt.Sprintf("failed to find secret in namespace %s", core.NamespaceSystem)) + continue + } + glog.Infof("succeed to find secret %s in namespace %s", secret.GetName(), secret.GetNamespace()) + + newSecret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: secret.Kind, + APIVersion: secret.APIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secret.GetName(), + Namespace: newNsName, + Labels: secret.GetLabels(), + Annotations: secret.GetAnnotations(), + DeletionGracePeriodSeconds: secret.GetDeletionGracePeriodSeconds(), + ClusterName: secret.GetClusterName(), + }, + Data: secret.Data, + StringData: secret.StringData, + Type: secret.Type, + } + log.Info(fmt.Sprintf("creating secret %s in namespace %s...", newSecret.GetName(), newSecret.GetNamespace())) + + err = r.Create(context.TODO(), newSecret) + if err != nil { + log.Error(err, fmt.Sprintf("failed to create secret in namespace %s", newSecret.GetNamespace())) + continue + } + } else { + log.Error(err, fmt.Sprintf("failed to find user secret name in storage class %s", sc.GetName())) + } + } + } + + return nil +} diff --git a/pkg/controller/namespace/namespace_controller_suite_test.go b/pkg/controller/namespace/namespace_controller_suite_test.go new file mode 100644 index 000000000..96ac00c93 --- /dev/null +++ b/pkg/controller/namespace/namespace_controller_suite_test.go @@ -0,0 +1,77 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package namespace + +import ( + stdlog "log" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "kubesphere.io/kubesphere/pkg/apis" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var cfg *rest.Config + +func TestMain(m *testing.M) { + t := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + } + apis.AddToScheme(scheme.Scheme) + + var err error + if cfg, err = t.Start(); err != nil { + stdlog.Fatal(err) + } + + code := m.Run() + t.Stop() + os.Exit(code) +} + +// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and +// writes the request to requests after Reconcile is finished. +func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { + requests := make(chan reconcile.Request) + fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { + result, err := inner.Reconcile(req) + requests <- req + return result, err + }) + return fn, requests +} + +// StartTestManager adds recFn +func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) { + stop := make(chan struct{}) + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred()) + }() + return stop, wg +} diff --git a/pkg/controller/namespace/namespace_controller_test.go b/pkg/controller/namespace/namespace_controller_test.go new file mode 100644 index 000000000..5dbadba54 --- /dev/null +++ b/pkg/controller/namespace/namespace_controller_test.go @@ -0,0 +1,19 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package namespace diff --git a/pkg/controller/workspace/workspace_controller.go b/pkg/controller/workspace/workspace_controller.go new file mode 100644 index 000000000..32926b49c --- /dev/null +++ b/pkg/controller/workspace/workspace_controller.go @@ -0,0 +1,534 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package workspace + +import ( + "context" + "fmt" + rbac "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/simple/client/kubesphere" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +var log = logf.Log.WithName("controller") + +/** +* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller +* business logic. Delete these comments after modifying this file.* + */ + +// Add creates a new Workspace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return add(mgr, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + return &ReconcileWorkspace{Client: mgr.GetClient(), scheme: mgr.GetScheme(), + recorder: mgr.GetRecorder("workspace-controller"), ksclient: kubesphere.Client()} +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler +func add(mgr manager.Manager, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New("workspace-controller", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to Workspace + err = c.Watch(&source.Kind{Type: &tenantv1alpha1.Workspace{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + return nil +} + +var _ reconcile.Reconciler = &ReconcileWorkspace{} + +// ReconcileWorkspace reconciles a Workspace object +type ReconcileWorkspace struct { + client.Client + scheme *runtime.Scheme + recorder record.EventRecorder + ksclient kubesphere.Interface +} + +// Reconcile reads that state of the cluster for a Workspace object and makes changes based on the state read +// and what is in the Workspace.Spec +// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces/status,verbs=get;update;patch +func (r *ReconcileWorkspace) Reconcile(request reconcile.Request) (reconcile.Result, error) { + // Fetch the Workspace instance + instance := &tenantv1alpha1.Workspace{} + err := r.Get(context.TODO(), request.NamespacedName, instance) + if err != nil { + if errors.IsNotFound(err) { + // Object not found, return. Created objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + return reconcile.Result{}, err + } + + // name of your custom finalizer + finalizer := "finalizers.tenant.kubesphere.io" + + if instance.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. + if !sliceutil.HasString(instance.ObjectMeta.Finalizers, finalizer) { + instance.ObjectMeta.Finalizers = append(instance.ObjectMeta.Finalizers, finalizer) + if err := r.Update(context.Background(), instance); err != nil { + return reconcile.Result{}, err + } + } + } else { + // The object is being deleted + if sliceutil.HasString(instance.ObjectMeta.Finalizers, finalizer) { + // our finalizer is present, so lets handle our external dependency + if err := r.deleteGroup(instance); err != nil { + // if fail to delete the external dependency here, return with error + // so that it can be retried + return reconcile.Result{}, err + } + + // remove our finalizer from the list and update it. + instance.ObjectMeta.Finalizers = sliceutil.RemoveString(instance.ObjectMeta.Finalizers, func(item string) bool { + return item == finalizer + }) + if err := r.Update(context.Background(), instance); err != nil { + return reconcile.Result{}, err + } + } + + // Our finalizer has finished, so the reconciler can do nothing. + return reconcile.Result{}, nil + } + + if err = r.createWorkspaceAdmin(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.createWorkspaceRegular(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.createWorkspaceViewer(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.createGroup(instance); err != nil { + return reconcile.Result{}, err + } + + if err = r.createWorkspaceRoleBindings(instance); err != nil { + return reconcile.Result{}, err + } + + return reconcile.Result{}, nil +} + +func (r *ReconcileWorkspace) createWorkspaceAdmin(instance *tenantv1alpha1.Workspace) error { + found := &rbac.ClusterRole{} + + admin := getWorkspaceAdmin(instance.Name) + + if err := controllerutil.SetControllerReference(instance, admin, r.scheme); err != nil { + return err + } + + err := r.Get(context.TODO(), types.NamespacedName{Name: admin.Name}, found) + + if err != nil && errors.IsNotFound(err) { + log.Info("Creating workspace role", "workspace", instance.Name, "name", admin.Name) + err = r.Create(context.TODO(), admin) + if err != nil { + return err + } + found = admin + } else if err != nil { + // Error reading the object - requeue the request. + return err + } + + // Update the found object and write the result back if there are any changes + if !reflect.DeepEqual(admin.Rules, found.Rules) || !reflect.DeepEqual(admin.Labels, found.Labels) { + found.Rules = admin.Rules + found.Labels = admin.Labels + log.Info("Updating workspace role", "workspace", instance.Name, "name", admin.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return err + } + } + return nil +} + +func (r *ReconcileWorkspace) createWorkspaceRegular(instance *tenantv1alpha1.Workspace) error { + found := &rbac.ClusterRole{} + + regular := getWorkspaceRegular(instance.Name) + + if err := controllerutil.SetControllerReference(instance, regular, r.scheme); err != nil { + return err + } + + err := r.Get(context.TODO(), types.NamespacedName{Name: regular.Name}, found) + + if err != nil && errors.IsNotFound(err) { + + log.Info("Creating workspace role", "workspace", instance.Name, "name", regular.Name) + err = r.Create(context.TODO(), regular) + // Error reading the object - requeue the request. + if err != nil { + return err + } + found = regular + } else if err != nil { + // Error reading the object - requeue the request. + return err + } + + // Update the found object and write the result back if there are any changes + if !reflect.DeepEqual(regular.Rules, found.Rules) || !reflect.DeepEqual(regular.Labels, found.Labels) { + found.Rules = regular.Rules + found.Labels = regular.Labels + log.Info("Updating workspace role", "workspace", instance.Name, "name", regular.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return err + } + } + + return nil +} + +func (r *ReconcileWorkspace) createWorkspaceViewer(instance *tenantv1alpha1.Workspace) error { + found := &rbac.ClusterRole{} + + viewer := getWorkspaceViewer(instance.Name) + + if err := controllerutil.SetControllerReference(instance, viewer, r.scheme); err != nil { + return err + } + + err := r.Get(context.TODO(), types.NamespacedName{Name: viewer.Name}, found) + + if err != nil && errors.IsNotFound(err) { + log.Info("Creating workspace role", "workspace", instance.Name, "name", viewer.Name) + err = r.Create(context.TODO(), viewer) + // Error reading the object - requeue the request. + if err != nil { + return err + } + found = viewer + } else if err != nil { + // Error reading the object - requeue the request. + return err + } + + // Update the found object and write the result back if there are any changes + if !reflect.DeepEqual(viewer.Rules, found.Rules) || !reflect.DeepEqual(viewer.Labels, found.Labels) { + found.Rules = viewer.Rules + found.Labels = viewer.Labels + log.Info("Updating workspace role", "workspace", instance.Name, "name", viewer.Name) + err = r.Update(context.TODO(), found) + if err != nil { + return err + } + } + + return nil +} + +func (r *ReconcileWorkspace) createGroup(instance *tenantv1alpha1.Workspace) error { + _, err := r.ksclient.DescribeGroup(instance.Name) + + group := &models.Group{ + Name: instance.Name, + } + + if err != nil && kubesphere.IsNotFound(err) { + log.Info("Creating group", "group name", instance.Name) + _, err = r.ksclient.CreateGroup(group) + if err != nil { + if kubesphere.IsExist(err) { + return nil + } + return err + } + } else if err != nil { + return err + } + + return nil +} + +func (r *ReconcileWorkspace) deleteGroup(instance *tenantv1alpha1.Workspace) error { + log.Info("Creating group", "group name", instance.Name) + if err := r.ksclient.DeleteGroup(instance.Name); err != nil { + if kubesphere.IsNotFound(err) { + return nil + } + return err + } + return nil +} + +func (r *ReconcileWorkspace) createWorkspaceRoleBindings(instance *tenantv1alpha1.Workspace) error { + + adminRoleBinding := &rbac.ClusterRoleBinding{} + adminRoleBinding.Name = getWorkspaceAdminRoleBindingName(instance.Name) + adminRoleBinding.Labels = map[string]string{constants.WorkspaceLabelKey: instance.Name} + adminRoleBinding.RoleRef = rbac.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: getWorkspaceAdminRoleName(instance.Name)} + + workspaceManager := rbac.Subject{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: instance.Spec.Manager} + + if workspaceManager.Name != "" { + adminRoleBinding.Subjects = []rbac.Subject{workspaceManager} + } else { + adminRoleBinding.Subjects = []rbac.Subject{} + } + + if err := controllerutil.SetControllerReference(instance, adminRoleBinding, r.scheme); err != nil { + return err + } + + foundRoleBinding := &rbac.ClusterRoleBinding{} + + err := r.Get(context.TODO(), types.NamespacedName{Name: adminRoleBinding.Name}, foundRoleBinding) + + if err != nil && errors.IsNotFound(err) { + log.Info("Creating workspace role binding", "workspace", instance.Name, "name", adminRoleBinding.Name) + err = r.Create(context.TODO(), adminRoleBinding) + // Error reading the object - requeue the request. + if err != nil { + return err + } + foundRoleBinding = adminRoleBinding + } else if err != nil { + // Error reading the object - requeue the request. + return err + } + + // Update the found object and write the result back if there are any changes + if !reflect.DeepEqual(adminRoleBinding.RoleRef, foundRoleBinding.RoleRef) { + log.Info("Deleting conflict workspace role binding", "workspace", instance.Name, "name", adminRoleBinding.Name) + err = r.Delete(context.TODO(), foundRoleBinding) + if err != nil { + return err + } + return fmt.Errorf("conflict workspace role binding %s, waiting for recreate", foundRoleBinding.Name) + } + + if workspaceManager.Name != "" && !hasSubject(foundRoleBinding.Subjects, workspaceManager) { + foundRoleBinding.Subjects = append(foundRoleBinding.Subjects, workspaceManager) + log.Info("Updating workspace role binding", "workspace", instance.Name, "name", adminRoleBinding.Name) + err = r.Update(context.TODO(), foundRoleBinding) + if err != nil { + return err + } + } + + regularRoleBinding := &rbac.ClusterRoleBinding{} + regularRoleBinding.Name = getWorkspaceRegularRoleBindingName(instance.Name) + regularRoleBinding.Labels = map[string]string{constants.WorkspaceLabelKey: instance.Name} + regularRoleBinding.RoleRef = rbac.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: getWorkspaceViewerRoleName(instance.Name)} + regularRoleBinding.Subjects = []rbac.Subject{} + + if err = controllerutil.SetControllerReference(instance, regularRoleBinding, r.scheme); err != nil { + return err + } + + err = r.Get(context.TODO(), types.NamespacedName{Name: regularRoleBinding.Name}, foundRoleBinding) + + if err != nil && errors.IsNotFound(err) { + log.Info("Creating workspace role binding", "workspace", instance.Name, "name", regularRoleBinding.Name) + err = r.Create(context.TODO(), regularRoleBinding) + // Error reading the object - requeue the request. + if err != nil { + return err + } + foundRoleBinding = regularRoleBinding + } else if err != nil { + // Error reading the object - requeue the request. + return err + } + + // Update the found object and write the result back if there are any changes + if !reflect.DeepEqual(regularRoleBinding.RoleRef, foundRoleBinding.RoleRef) { + log.Info("Deleting conflict workspace role binding", "workspace", instance.Name, "name", regularRoleBinding.Name) + err = r.Delete(context.TODO(), foundRoleBinding) + if err != nil { + return err + } + return fmt.Errorf("conflict workspace role binding %s, waiting for recreate", foundRoleBinding.Name) + } + + viewerRoleBinding := &rbac.ClusterRoleBinding{} + viewerRoleBinding.Name = getWorkspaceViewerRoleBindingName(instance.Name) + viewerRoleBinding.Labels = map[string]string{constants.WorkspaceLabelKey: instance.Name} + viewerRoleBinding.RoleRef = rbac.RoleRef{APIGroup: "rbac.authorization.k8s.io", Kind: "ClusterRole", Name: getWorkspaceViewerRoleName(instance.Name)} + viewerRoleBinding.Subjects = []rbac.Subject{} + + if err = controllerutil.SetControllerReference(instance, viewerRoleBinding, r.scheme); err != nil { + return err + } + + err = r.Get(context.TODO(), types.NamespacedName{Name: viewerRoleBinding.Name}, foundRoleBinding) + + if err != nil && errors.IsNotFound(err) { + log.Info("Creating workspace role binding", "workspace", instance.Name, "name", viewerRoleBinding.Name) + err = r.Create(context.TODO(), viewerRoleBinding) + // Error reading the object - requeue the request. + if err != nil { + return err + } + foundRoleBinding = viewerRoleBinding + } else if err != nil { + // Error reading the object - requeue the request. + return err + } + + // Update the found object and write the result back if there are any changes + if !reflect.DeepEqual(viewerRoleBinding.RoleRef, foundRoleBinding.RoleRef) { + log.Info("Deleting conflict workspace role binding", "workspace", instance.Name, "name", viewerRoleBinding.Name) + err = r.Delete(context.TODO(), foundRoleBinding) + if err != nil { + return err + } + return fmt.Errorf("conflict workspace role binding %s, waiting for recreate", foundRoleBinding.Name) + } + + return nil +} + +func hasSubject(subjects []rbac.Subject, user rbac.Subject) bool { + for _, subject := range subjects { + if reflect.DeepEqual(subject, user) { + return true + } + } + return false +} + +func getWorkspaceAdminRoleName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:admin", workspaceName) +} +func getWorkspaceRegularRoleName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:regular", workspaceName) +} +func getWorkspaceViewerRoleName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:viewer", workspaceName) +} + +func getWorkspaceAdminRoleBindingName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:admin", workspaceName) +} + +func getWorkspaceRegularRoleBindingName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:regular", workspaceName) +} + +func getWorkspaceViewerRoleBindingName(workspaceName string) string { + return fmt.Sprintf("workspace:%s:viewer", workspaceName) +} + +func getWorkspaceAdmin(workspaceName string) *rbac.ClusterRole { + admin := &rbac.ClusterRole{} + admin.Name = getWorkspaceAdminRoleName(workspaceName) + admin.Labels = map[string]string{constants.WorkspaceLabelKey: workspaceName, constants.DisplayNameLabelKey: constants.WorkspaceAdmin} + admin.Rules = []rbac.PolicyRule{ + { + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + ResourceNames: []string{workspaceName}, + Resources: []string{"workspaces", "workspaces/*"}, + }, + { + Verbs: []string{"list"}, + APIGroups: []string{"iam.kubesphere.io"}, + Resources: []string{"users"}, + }, + } + + return admin +} + +func getWorkspaceRegular(workspaceName string) *rbac.ClusterRole { + regular := &rbac.ClusterRole{} + regular.Name = getWorkspaceRegularRoleName(workspaceName) + regular.Labels = map[string]string{constants.WorkspaceLabelKey: workspaceName, constants.DisplayNameLabelKey: constants.WorkspaceRegular} + regular.Rules = []rbac.PolicyRule{ + { + Verbs: []string{"get"}, + APIGroups: []string{"*"}, + Resources: []string{"workspaces"}, + ResourceNames: []string{workspaceName}, + }, { + Verbs: []string{"create"}, + APIGroups: []string{"tenant.kubesphere.io"}, + Resources: []string{"workspaces/namespaces", "workspaces/devops"}, + ResourceNames: []string{workspaceName}, + }, + { + Verbs: []string{"get"}, + APIGroups: []string{"iam.kubesphere.io"}, + ResourceNames: []string{workspaceName}, + Resources: []string{"workspaces/members"}, + }, + } + + return regular +} + +func getWorkspaceViewer(workspaceName string) *rbac.ClusterRole { + viewer := &rbac.ClusterRole{} + viewer.Name = getWorkspaceViewerRoleName(workspaceName) + viewer.Labels = map[string]string{constants.WorkspaceLabelKey: workspaceName, constants.DisplayNameLabelKey: constants.WorkspaceViewer} + viewer.Rules = []rbac.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"*"}, + ResourceNames: []string{workspaceName}, + Resources: []string{"workspaces", "workspaces/*"}, + }, + } + return viewer +} diff --git a/pkg/controller/workspace/workspace_controller_suite_test.go b/pkg/controller/workspace/workspace_controller_suite_test.go new file mode 100644 index 000000000..9fe761d0c --- /dev/null +++ b/pkg/controller/workspace/workspace_controller_suite_test.go @@ -0,0 +1,77 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package workspace + +import ( + stdlog "log" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "kubesphere.io/kubesphere/pkg/apis" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var cfg *rest.Config + +func TestMain(m *testing.M) { + t := &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crds")}, + } + apis.AddToScheme(scheme.Scheme) + + var err error + if cfg, err = t.Start(); err != nil { + stdlog.Fatal(err) + } + + code := m.Run() + t.Stop() + os.Exit(code) +} + +// SetupTestReconcile returns a reconcile.Reconcile implementation that delegates to inner and +// writes the request to requests after Reconcile is finished. +func SetupTestReconcile(inner reconcile.Reconciler) (reconcile.Reconciler, chan reconcile.Request) { + requests := make(chan reconcile.Request) + fn := reconcile.Func(func(req reconcile.Request) (reconcile.Result, error) { + result, err := inner.Reconcile(req) + requests <- req + return result, err + }) + return fn, requests +} + +// StartTestManager adds recFn +func StartTestManager(mgr manager.Manager, g *gomega.GomegaWithT) (chan struct{}, *sync.WaitGroup) { + stop := make(chan struct{}) + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + g.Expect(mgr.Start(stop)).NotTo(gomega.HaveOccurred()) + }() + return stop, wg +} diff --git a/pkg/controller/workspace/workspace_controller_test.go b/pkg/controller/workspace/workspace_controller_test.go new file mode 100644 index 000000000..6f0345f56 --- /dev/null +++ b/pkg/controller/workspace/workspace_controller_test.go @@ -0,0 +1,19 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package workspace diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index cf341903b..8a3966608 100644 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -36,6 +36,10 @@ func Wrap(err error) Error { return Error{Message: err.Error()} } +func New(message string) Error { + return Error{Message: message} +} + func Parse(data []byte) error { var j map[string]string err := json.Unmarshal(data, &j) diff --git a/pkg/informers/informers.go b/pkg/informers/informers.go index 624804ded..d737e4d5f 100644 --- a/pkg/informers/informers.go +++ b/pkg/informers/informers.go @@ -24,6 +24,7 @@ import ( s2iInformers "github.com/kubesphere/s2ioperator/pkg/client/informers/externalversions" "k8s.io/client-go/informers" + ksInformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions" "kubesphere.io/kubesphere/pkg/simple/client/k8s" ) @@ -33,8 +34,10 @@ const defaultResync = 600 * time.Second var ( k8sOnce sync.Once s2iOnce sync.Once + ksOnce sync.Once informerFactory informers.SharedInformerFactory s2iInformerFactory s2iInformers.SharedInformerFactory + ksInformerFactory ksInformers.SharedInformerFactory ) func SharedInformerFactory() informers.SharedInformerFactory { @@ -52,3 +55,11 @@ func S2iSharedInformerFactory() s2iInformers.SharedInformerFactory { }) return s2iInformerFactory } + +func KsSharedInformerFactory() ksInformers.SharedInformerFactory { + ksOnce.Do(func() { + k8sClient := k8s.KsClient() + ksInformerFactory = ksInformers.NewSharedInformerFactory(k8sClient, defaultResync) + }) + return ksInformerFactory +} diff --git a/pkg/models/applications/applications.go b/pkg/models/applications/applications.go index 01e1384b1..7e6313f1a 100644 --- a/pkg/models/applications/applications.go +++ b/pkg/models/applications/applications.go @@ -18,11 +18,8 @@ package applications import ( - "encoding/json" - "fmt" "github.com/golang/glog" - "io/ioutil" - v12 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -33,24 +30,11 @@ import ( "kubesphere.io/kubesphere/pkg/models/resources" "kubesphere.io/kubesphere/pkg/params" "kubesphere.io/kubesphere/pkg/simple/client/k8s" - "net/http" - "strconv" + "kubesphere.io/kubesphere/pkg/simple/client/openpitrix" "strings" "time" ) -var ( - OpenPitrixProxyToken string - OpenPitrixServer string -) - -const ( - unknown = "-" - deploySuffix = "-Deployment" - daemonSuffix = "-DaemonSet" - stateSuffix = "-StatefulSet" -) - type Application struct { Name string `json:"name"` RepoName string `json:"repoName"` @@ -70,174 +54,93 @@ type Application struct { ClusterID string `json:"cluster_id"` } -type clusterRole struct { - ClusterID string `json:"cluster_id"` - Role string `json:"role"` -} - -type cluster struct { - ClusterID string `json:"cluster_id"` - Name string `json:"name"` - AppID string `json:"app_id"` - VersionID string `json:"version_id"` - Status string `json:"status"` - UpdateTime time.Time `json:"status_time"` - CreateTime time.Time `json:"create_time"` - RunTimeId string `json:"runtime_id"` - Description string `json:"description"` - ClusterRoleSets []clusterRole `json:"cluster_role_set"` -} - -type clusters struct { - Total int `json:"total_count"` - Clusters []cluster `json:"cluster_set"` -} - -type versionList struct { - Total int `json:"total_count"` - Versions []version `json:"app_version_set"` -} - -type version struct { - Name string `json:"name"` - VersionID string `json:"version_id"` -} - -type runtime struct { - RuntimeID string `json:"runtime_id"` - Zone string `json:"zone"` -} - -type runtimeList struct { - Total int `json:"total_count"` - Runtimes []runtime `json:"runtime_set"` -} - -type app struct { - AppId string `json:"app_id"` - Name string `json:"name"` - ChartName string `json:"chart_name"` - RepoId string `json:"repo_id"` -} - -type repo struct { - RepoId string `json:"repo_id"` - Name string `json:"name"` - Url string `json:"url"` -} - type workLoads struct { - Deployments []v12.Deployment `json:"deployments,omitempty"` - Statefulsets []v12.StatefulSet `json:"statefulsets,omitempty"` - Daemonsets []v12.DaemonSet `json:"daemonsets,omitempty"` + Deployments []appsv1.Deployment `json:"deployments,omitempty"` + Statefulsets []appsv1.StatefulSet `json:"statefulsets,omitempty"` + Daemonsets []appsv1.DaemonSet `json:"daemonsets,omitempty"` } -type appList struct { - Total int `json:"total_count"` - Apps []app `json:"app_set"` +func ListApplication(runtimeId string, conditions *params.Conditions, limit, offset int) (*models.PageableResponse, error) { + clusterList, err := openpitrix.ListClusters(runtimeId, conditions.Match["keyword"], conditions.Match["status"], limit, offset) + if err != nil { + return nil, err + } + result := models.PageableResponse{TotalCount: clusterList.Total} + result.Items = make([]interface{}, 0) + for _, item := range clusterList.Clusters { + var app Application + + app.Name = item.Name + app.ClusterID = item.ClusterID + app.UpdateTime = item.UpdateTime + app.Status = item.Status + versionInfo, _ := openpitrix.GetVersion(item.VersionID) + app.Version = versionInfo + app.VersionId = item.VersionID + runtimeInfo, _ := openpitrix.GetRuntime(item.RunTimeId) + app.Runtime = runtimeInfo + app.RuntimeId = item.RunTimeId + appInfo, _, appId, _ := openpitrix.GetAppInfo(item.AppID) + app.App = appInfo + app.AppId = appId + app.Description = item.Description + + result.Items = append(result.Items, app) + } + + return &result, nil } -type repoList struct { - Total int `json:"total_count"` - Repos []repo `json:"repo_set"` +func GetApp(clusterId string) (*Application, error) { + + item, err := openpitrix.GetCluster(clusterId) + + if err != nil { + return nil, err + } + + var app Application + + app.Name = item.Name + app.ClusterID = item.ClusterID + app.UpdateTime = item.UpdateTime + app.CreateTime = item.CreateTime + app.Status = item.Status + versionInfo, _ := openpitrix.GetVersion(item.VersionID) + app.Version = versionInfo + app.VersionId = item.VersionID + + runtimeInfo, _ := openpitrix.GetRuntime(item.RunTimeId) + app.Runtime = runtimeInfo + app.RuntimeId = item.RunTimeId + appInfo, repoId, appId, _ := openpitrix.GetAppInfo(item.AppID) + app.App = appInfo + app.AppId = appId + app.Description = item.Description + + app.RepoName, _ = openpitrix.GetRepo(repoId) + + workloads, err := getWorkLoads(app.Runtime, item.ClusterRoleSets) + if err != nil { + glog.Error(err) + return nil, err + } + app.WorkLoads = workloads + workloadLabels := getLabels(app.Runtime, app.WorkLoads) + app.Services = getSvcs(app.Runtime, workloadLabels) + app.Ingresses = getIng(app.Runtime, app.Services) + + return &app, nil } -func GetAppInfo(appId string) (string, string, string, error) { - url := fmt.Sprintf("%s/v1/apps?app_id=%s", OpenPitrixServer, appId) - resp, err := makeHttpRequest("GET", url, "") - if err != nil { - glog.Error(err) - return unknown, unknown, unknown, err - } - - var apps appList - err = json.Unmarshal(resp, &apps) - if err != nil { - glog.Error(err) - return unknown, unknown, unknown, err - } - - if len(apps.Apps) == 0 { - return unknown, unknown, unknown, err - } - - return apps.Apps[0].ChartName, apps.Apps[0].RepoId, apps.Apps[0].AppId, nil -} - -func GetRepo(repoId string) (string, error) { - url := fmt.Sprintf("%s/v1/repos?repo_id=%s", OpenPitrixServer, repoId) - resp, err := makeHttpRequest("GET", url, "") - if err != nil { - glog.Error(err) - return unknown, err - } - - var repos repoList - err = json.Unmarshal(resp, &repos) - if err != nil { - glog.Error(err) - return unknown, err - } - - if len(repos.Repos) == 0 { - return unknown, err - } - - return repos.Repos[0].Name, nil -} - -func GetVersion(versionId string) (string, error) { - versionUrl := fmt.Sprintf("%s/v1/app_versions?version_id=%s", OpenPitrixServer, versionId) - resp, err := makeHttpRequest("GET", versionUrl, "") - if err != nil { - glog.Error(err) - return unknown, err - } - - var versions versionList - err = json.Unmarshal(resp, &versions) - if err != nil { - glog.Error(err) - return unknown, err - } - - if len(versions.Versions) == 0 { - return unknown, nil - } - return versions.Versions[0].Name, nil -} - -func GetRuntime(runtimeId string) (string, error) { - - versionUrl := fmt.Sprintf("%s/v1/runtimes?runtime_id=%s", OpenPitrixServer, runtimeId) - resp, err := makeHttpRequest("GET", versionUrl, "") - if err != nil { - glog.Error(err) - return unknown, err - } - - var runtimes runtimeList - err = json.Unmarshal(resp, &runtimes) - if err != nil { - glog.Error(err) - return unknown, err - } - - if len(runtimes.Runtimes) == 0 { - return unknown, nil - } - - return runtimes.Runtimes[0].Zone, nil -} - -func GetWorkLoads(namespace string, clusterRoles []clusterRole) (*workLoads, error) { +func getWorkLoads(namespace string, clusterRoles []openpitrix.ClusterRole) (*workLoads, error) { var works workLoads for _, clusterRole := range clusterRoles { workLoadName := clusterRole.Role if len(workLoadName) > 0 { - if strings.HasSuffix(workLoadName, deploySuffix) { - name := strings.Split(workLoadName, deploySuffix)[0] + if strings.HasSuffix(workLoadName, openpitrix.DeploySuffix) { + name := strings.Split(workLoadName, openpitrix.DeploySuffix)[0] item, err := informers.SharedInformerFactory().Apps().V1().Deployments().Lister().Deployments(namespace).Get(name) @@ -249,8 +152,8 @@ func GetWorkLoads(namespace string, clusterRoles []clusterRole) (*workLoads, err continue } - if strings.HasSuffix(workLoadName, daemonSuffix) { - name := strings.Split(workLoadName, daemonSuffix)[0] + if strings.HasSuffix(workLoadName, openpitrix.DaemonSuffix) { + name := strings.Split(workLoadName, openpitrix.DaemonSuffix)[0] item, err := informers.SharedInformerFactory().Apps().V1().DaemonSets().Lister().DaemonSets(namespace).Get(name) if err != nil { return nil, err @@ -259,8 +162,8 @@ func GetWorkLoads(namespace string, clusterRoles []clusterRole) (*workLoads, err continue } - if strings.HasSuffix(workLoadName, stateSuffix) { - name := strings.Split(workLoadName, stateSuffix)[0] + if strings.HasSuffix(workLoadName, openpitrix.StateSuffix) { + name := strings.Split(workLoadName, openpitrix.StateSuffix)[0] item, err := informers.SharedInformerFactory().Apps().V1().StatefulSets().Lister().StatefulSets(namespace).Get(name) if err != nil { return nil, err @@ -346,7 +249,7 @@ func getIng(namespace string, services []v1.Service) []v1beta1.Ingress { var ings []v1beta1.Ingress for _, svc := range services { - result, err := resources.ListNamespaceResource(namespace, "ingress", ¶ms.Conditions{Fuzzy: map[string]string{"serviceName": svc.Name}}, "", false, -1, 0) + result, err := resources.ListResources(namespace, "ingress", ¶ms.Conditions{Fuzzy: map[string]string{"serviceName": svc.Name}}, "", false, -1, 0) if err != nil { glog.Error(err) return nil @@ -379,159 +282,3 @@ func getIng(namespace string, services []v1.Service) []v1beta1.Ingress { return ings } - -func ListApplication(runtimeId string, conditions *params.Conditions, limit, offset int) (*models.PageableResponse, error) { - if strings.HasSuffix(OpenPitrixServer, "/") { - OpenPitrixServer = strings.TrimSuffix(OpenPitrixServer, "/") - } - - defaultStatus := "status=active&status=stopped&status=pending&status=ceased" - - url := fmt.Sprintf("%s/v1/clusters?limit=%s&offset=%s", OpenPitrixServer, strconv.Itoa(limit), strconv.Itoa(offset)) - - if len(conditions.Fuzzy["name"]) > 0 { - url = fmt.Sprintf("%s&search_word=%s", url, conditions.Fuzzy["name"]) - } - - if len(conditions.Match["status"]) > 0 { - url = fmt.Sprintf("%s&status=%s", url, conditions.Match["status"]) - } else { - url = fmt.Sprintf("%s&%s", url, defaultStatus) - } - - if len(runtimeId) > 0 { - url = fmt.Sprintf("%s&runtime_id=%s", url, runtimeId) - } - - resp, err := makeHttpRequest("GET", url, "") - if err != nil { - glog.Errorf("request %s failed, reason: %s", url, err) - return nil, err - } - - var clusterList clusters - err = json.Unmarshal(resp, &clusterList) - - if err != nil { - return nil, err - } - - result := models.PageableResponse{TotalCount: clusterList.Total} - result.Items = make([]interface{}, 0) - for _, item := range clusterList.Clusters { - var app Application - - app.Name = item.Name - app.ClusterID = item.ClusterID - app.UpdateTime = item.UpdateTime - app.Status = item.Status - versionInfo, _ := GetVersion(item.VersionID) - app.Version = versionInfo - app.VersionId = item.VersionID - runtimeInfo, _ := GetRuntime(item.RunTimeId) - app.Runtime = runtimeInfo - app.RuntimeId = item.RunTimeId - appInfo, _, appId, _ := GetAppInfo(item.AppID) - app.App = appInfo - app.AppId = appId - app.Description = item.Description - - result.Items = append(result.Items, app) - } - - return &result, nil -} - -func GetApp(clusterId string) (*Application, error) { - if strings.HasSuffix(OpenPitrixServer, "/") { - OpenPitrixServer = strings.TrimSuffix(OpenPitrixServer, "/") - } - - url := fmt.Sprintf("%s/v1/clusters?cluster_id=%s", OpenPitrixServer, clusterId) - - resp, err := makeHttpRequest("GET", url, "") - if err != nil { - glog.Error(err) - return nil, err - } - - var clusterList clusters - err = json.Unmarshal(resp, &clusterList) - - if err != nil { - glog.Error(err) - return nil, err - } - - if len(clusterList.Clusters) == 0 { - return nil, fmt.Errorf("NotFound, clusterId:%s", clusterId) - } - - item := clusterList.Clusters[0] - var app Application - - app.Name = item.Name - app.ClusterID = item.ClusterID - app.UpdateTime = item.UpdateTime - app.CreateTime = item.CreateTime - app.Status = item.Status - versionInfo, _ := GetVersion(item.VersionID) - app.Version = versionInfo - app.VersionId = item.VersionID - - runtimeInfo, _ := GetRuntime(item.RunTimeId) - app.Runtime = runtimeInfo - app.RuntimeId = item.RunTimeId - appInfo, repoId, appId, _ := GetAppInfo(item.AppID) - app.App = appInfo - app.AppId = appId - app.Description = item.Description - - app.RepoName, _ = GetRepo(repoId) - - workloads, err := GetWorkLoads(app.Runtime, item.ClusterRoleSets) - if err != nil { - glog.Error(err) - return nil, err - } - app.WorkLoads = workloads - workloadLabels := getLabels(app.Runtime, app.WorkLoads) - app.Services = getSvcs(app.Runtime, workloadLabels) - app.Ingresses = getIng(app.Runtime, app.Services) - - return &app, nil -} - -func makeHttpRequest(method, url, data string) ([]byte, error) { - var req *http.Request - - var err error - if method == "GET" { - req, err = http.NewRequest(method, url, nil) - } else { - req, err = http.NewRequest(method, url, strings.NewReader(data)) - } - - req.Header.Add("Authorization", OpenPitrixProxyToken) - - if err != nil { - glog.Error(err) - return nil, err - } - - httpClient := &http.Client{} - resp, err := httpClient.Do(req) - - if err != nil { - err := fmt.Errorf("Request to %s failed, method: %s, reason: %s ", url, method, err) - glog.Error(err) - return nil, err - } - - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - if resp.StatusCode >= http.StatusBadRequest { - err = fmt.Errorf(string(body)) - } - return body, err -} diff --git a/pkg/models/iam/am.go b/pkg/models/iam/am.go index 977ad4918..0cc970ec9 100644 --- a/pkg/models/iam/am.go +++ b/pkg/models/iam/am.go @@ -23,53 +23,84 @@ import ( "fmt" "github.com/golang/glog" "io/ioutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models/resources" + "kubesphere.io/kubesphere/pkg/params" "kubesphere.io/kubesphere/pkg/simple/client/k8s" - ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" - "log" + "kubesphere.io/kubesphere/pkg/utils/k8sutil" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" "net/http" - "regexp" + "sort" "strings" "github.com/go-ldap/ldap" - corev1 "k8s.io/api/core/v1" "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/util/slice" - "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/models" "kubesphere.io/kubesphere/pkg/models/iam/policy" ) -func GetNamespaces(username string) ([]*corev1.Namespace, error) { +const ClusterRoleKind = "ClusterRole" - roles, err := GetRoles(username, "") +func GetUserDevopsSimpleRules(username, projectId string) ([]models.SimpleRule, error) { + role, err := GetUserDevopsRole(projectId, username) if err != nil { return nil, err } - namespaces := make([]*corev1.Namespace, 0) - namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() - for _, role := range roles { - namespace, err := namespaceLister.Get(role.Name) - if err != nil { - return nil, err + return GetDevopsRoleSimpleRules(role), nil +} + +func GetDevopsRoleSimpleRules(role string) []models.SimpleRule { + var rules []models.SimpleRule + + switch role { + case "developer": + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"view", "trigger"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "devops", Actions: []string{"view"}}, } - namespaces = append(namespaces, namespace) + break + case "owner": + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"create", "edit", "view", "delete"}}, + {Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}}, + {Name: "devops", Actions: []string{"edit", "view", "delete"}}, + } + break + case "maintainer": + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"create", "edit", "view", "delete", "trigger"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "credentials", Actions: []string{"create", "edit", "view", "delete"}}, + {Name: "devops", Actions: []string{"view"}}, + } + break + case "reporter": + fallthrough + default: + rules = []models.SimpleRule{ + {Name: "pipelines", Actions: []string{"view"}}, + {Name: "roles", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "devops", Actions: []string{"view"}}, + } + break } - - return namespaces, nil + return rules } -func GetNamespacesByWorkspace(workspace string) ([]*corev1.Namespace, error) { - namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() - return namespaceLister.List(labels.SelectorFromSet(labels.Set{"kubesphere.io/workspace": workspace})) -} - -func GetDevopsRole(projectId string, username string) (string, error) { +func GetUserDevopsRole(projectId string, username string) (string, error) { //Hard fix if username == "admin" { @@ -116,12 +147,8 @@ func GetDevopsRole(projectId string, username string) (string, error) { return "", nil } -func GetNamespace(namespaceName string) (*corev1.Namespace, error) { - namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() - return namespaceLister.Get(namespaceName) -} - -func GetRoles(username string, namespace string) ([]*v1.Role, error) { +// Get user roles in namespace +func GetUserRoles(namespace, username string) ([]*v1.Role, error) { clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() roleBindingLister := informers.SharedInformerFactory().Rbac().V1().RoleBindings().Lister() roleLister := informers.SharedInformerFactory().Rbac().V1().Roles().Lister() @@ -134,37 +161,35 @@ func GetRoles(username string, namespace string) ([]*v1.Role, error) { roles := make([]*v1.Role, 0) for _, roleBinding := range roleBindings { - - for _, subject := range roleBinding.Subjects { - if subject.Kind == v1.UserKind && subject.Name == username { - if roleBinding.RoleRef.Kind == ClusterRoleKind { - clusterRole, err := clusterRoleLister.Get(roleBinding.RoleRef.Name) - if err == nil { - var role = v1.Role{TypeMeta: (*clusterRole).TypeMeta, ObjectMeta: (*clusterRole).ObjectMeta, Rules: (*clusterRole).Rules} - role.Namespace = roleBinding.Namespace - roles = append(roles, &role) - break - } else if apierrors.IsNotFound(err) { - log.Println(err) - break + if k8sutil.ContainsUser(roleBinding.Subjects, username) { + if roleBinding.RoleRef.Kind == ClusterRoleKind { + clusterRole, err := clusterRoleLister.Get(roleBinding.RoleRef.Name) + if err != nil { + if apierrors.IsNotFound(err) { + glog.Warningf("cluster role %s not found but bind user %s in namespace %s", roleBinding.RoleRef.Name, username, namespace) + continue } else { return nil, err } - } else { - if subject.Kind == v1.UserKind && subject.Name == username { - rule, err := roleLister.Roles(roleBinding.Namespace).Get(roleBinding.RoleRef.Name) - if err == nil { - roles = append(roles, rule) - break - } else if apierrors.IsNotFound(err) { - log.Println(err) - break - } else { - return nil, err - } + } + role := v1.Role{} + role.TypeMeta = clusterRole.TypeMeta + role.ObjectMeta = clusterRole.ObjectMeta + role.Rules = clusterRole.Rules + role.Namespace = roleBinding.Namespace + roles = append(roles, &role) + } else { + role, err := roleLister.Roles(roleBinding.Namespace).Get(roleBinding.RoleRef.Name) + if err != nil { + if apierrors.IsNotFound(err) { + glog.Warningf("role %s not found but bind user %s in namespace %s", roleBinding.RoleRef.Name, username, namespace) + continue + } else { + return nil, err } } + roles = append(roles, role) } } } @@ -172,55 +197,147 @@ func GetRoles(username string, namespace string) ([]*v1.Role, error) { return roles, nil } -func GetClusterRoles(username string) ([]*v1.ClusterRole, error) { +func GetUserClusterRoles(username string) (*v1.ClusterRole, []*v1.ClusterRole, error) { clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - clusterRoleBindings, err := clusterRoleBindingLister.List(labels.SelectorFromSet(labels.Set{"": ""})) + clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) + + if err != nil { + return nil, nil, err + } + + clusterRoles := make([]*v1.ClusterRole, 0) + userFacingClusterRole := &v1.ClusterRole{} + for _, clusterRoleBinding := range clusterRoleBindings { + if k8sutil.ContainsUser(clusterRoleBinding.Subjects, username) { + clusterRole, err := clusterRoleLister.Get(clusterRoleBinding.RoleRef.Name) + if err != nil { + if apierrors.IsNotFound(err) { + glog.Warningf("cluster role %s not found but bind user %s", clusterRoleBinding.RoleRef.Name, username) + continue + } else { + return nil, nil, err + } + } + if clusterRoleBinding.Name == username { + userFacingClusterRole = clusterRole + } + clusterRoles = append(clusterRoles, clusterRole) + } + } + + return userFacingClusterRole, clusterRoles, nil +} + +func GetUserClusterRole(username string) (*v1.ClusterRole, error) { + userFacingClusterRole, _, err := GetUserClusterRoles(username) + if err != nil { + return nil, err + } + return userFacingClusterRole, nil +} + +func GetUserClusterRules(username string) ([]v1.PolicyRule, error) { + _, clusterRoles, err := GetUserClusterRoles(username) if err != nil { return nil, err } - roles := make([]*v1.ClusterRole, 0) + rules := make([]v1.PolicyRule, 0) + for _, clusterRole := range clusterRoles { + rules = append(rules, clusterRole.Rules...) + } - for _, rb := range clusterRoleBindings { - if rb.RoleRef.Kind == ClusterRoleKind { - for _, subject := range rb.Subjects { - if subject.Kind == v1.UserKind && subject.Name == username { + return rules, nil +} - role, err := clusterRoleLister.Get(rb.RoleRef.Name) - role = role.DeepCopy() - if err == nil { - if role.Annotations == nil { - role.Annotations = make(map[string]string, 0) - } +func GetUserRules(namespace, username string) ([]v1.PolicyRule, error) { + roles, err := GetUserRoles(namespace, username) - role.Annotations["rbac.authorization.k8s.io/clusterrolebinding"] = rb.Name + if err != nil { + return nil, err + } - if rb.Annotations != nil && - rb.Annotations["rbac.authorization.k8s.io/clusterrole"] == rb.RoleRef.Name { - role.Annotations["rbac.authorization.k8s.io/clusterrole"] = "true" - } + rules := make([]v1.PolicyRule, 0) + for _, role := range roles { + rules = append(rules, role.Rules...) + } - roles = append(roles, role) - break - } else if apierrors.IsNotFound(err) { - glog.Warningln(err) - break - } else { - return nil, err - } - } - } + return rules, nil +} + +func isUserFacingClusterRole(role *v1.ClusterRole) bool { + if role.Labels[constants.CreatorLabelKey] != "" { + return true + } + return false +} + +func GetWorkspaceRoleBindings(workspace string) ([]*v1.ClusterRoleBinding, error) { + + clusterRoleBindings, err := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister().List(labels.Everything()) + + if err != nil { + return nil, err + } + + result := make([]*v1.ClusterRoleBinding, 0) + + for _, roleBinding := range clusterRoleBindings { + if k8sutil.IsControlledBy(roleBinding.OwnerReferences, "Workspace", workspace) { + result = append(result, roleBinding) } } - return roles, nil + return result, nil +} + +func GetWorkspaceRole(workspace, role string) (*v1.ClusterRole, error) { + if !sliceutil.HasString(constants.WorkSpaceRoles, role) { + return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "workspace role"}, role) + } + role = fmt.Sprintf("workspace:%s:%s", workspace, strings.TrimPrefix(role, "workspace-")) + return informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister().Get(role) +} + +func GetUserWorkspaceRoleMap(username string) (map[string]string, error) { + + clusterRoleBindings, err := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister().List(labels.Everything()) + + if err != nil { + return nil, err + } + + result := make(map[string]string, 0) + + for _, roleBinding := range clusterRoleBindings { + if workspace := k8sutil.GetControlledWorkspace(roleBinding.OwnerReferences); workspace != "" && + k8sutil.ContainsUser(roleBinding.Subjects, username) { + result[workspace] = roleBinding.RoleRef.Name + } + } + + return result, nil +} + +func GetUserWorkspaceRole(workspace, username string) (*v1.ClusterRole, error) { + workspaceRoleMap, err := GetUserWorkspaceRoleMap(username) + + if err != nil { + return nil, err + } + + if workspaceRole := workspaceRoleMap[workspace]; workspaceRole != "" { + return informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister().Get(workspaceRole) + } + + return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "workspace user"}, username) } func GetRoleBindings(namespace string, roleName string) ([]*v1.RoleBinding, error) { roleBindingLister := informers.SharedInformerFactory().Rbac().V1().RoleBindings().Lister() - roleBindingList, err := roleBindingLister.List(labels.Everything()) + roleBindings, err := roleBindingLister.RoleBindings(namespace).List(labels.Everything()) if err != nil { return nil, err @@ -228,7 +345,7 @@ func GetRoleBindings(namespace string, roleName string) ([]*v1.RoleBinding, erro items := make([]*v1.RoleBinding, 0) - for _, roleBinding := range roleBindingList { + for _, roleBinding := range roleBindings { if roleName == "" { items = append(items, roleBinding) } else if roleBinding.RoleRef.Name == roleName { @@ -241,7 +358,7 @@ func GetRoleBindings(namespace string, roleName string) ([]*v1.RoleBinding, erro func GetClusterRoleBindings(clusterRoleName string) ([]*v1.ClusterRoleBinding, error) { clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - roleBindingList, err := clusterRoleBindingLister.List(labels.Everything()) + roleBindings, err := clusterRoleBindingLister.List(labels.Everything()) if err != nil { return nil, err @@ -249,7 +366,7 @@ func GetClusterRoleBindings(clusterRoleName string) ([]*v1.ClusterRoleBinding, e items := make([]*v1.ClusterRoleBinding, 0) - for _, roleBinding := range roleBindingList { + for _, roleBinding := range roleBindings { if roleBinding.RoleRef.Name == clusterRoleName { items = append(items, roleBinding) } @@ -258,46 +375,53 @@ func GetClusterRoleBindings(clusterRoleName string) ([]*v1.ClusterRoleBinding, e return items, nil } -func ClusterRoleUsers(clusterRoleName string) ([]*models.User, error) { +func ListClusterRoleUsers(clusterRoleName string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { roleBindings, err := GetClusterRoleBindings(clusterRoleName) if err != nil { return nil, err } - - conn, err := ldapclient.Client() - - if err != nil { - return nil, err - } - - defer conn.Close() - - names := make([]string, 0) users := make([]*models.User, 0) for _, roleBinding := range roleBindings { for _, subject := range roleBinding.Subjects { - if subject.Kind == v1.UserKind && !strings.HasPrefix(subject.Name, "system") && - !slice.ContainsString(names, subject.Name, nil) { - names = append(names, subject.Name) - - user, err := UserDetail(subject.Name, conn) - - if ldap.IsErrorWithCode(err, 32) { + if subject.Kind == v1.UserKind && !k8sutil.ContainsUser(users, subject.Name) { + user, err := DescribeUser(subject.Name) + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { continue } - if err != nil { return nil, err } - users = append(users, user) } } } - return users, nil + // order & reverse + sort.Slice(users, func(i, j int) bool { + if reverse { + tmp := i + i = j + j = tmp + } + switch orderBy { + default: + fallthrough + case "name": + return strings.Compare(users[i].Username, users[j].Username) <= 0 + } + }) + + result := make([]interface{}, 0) + + for i, d := range users { + if i >= offset && (limit == -1 || len(result) < limit) { + result = append(result, d) + } + } + + return &models.PageableResponse{Items: result, TotalCount: len(users)}, nil } @@ -308,31 +432,21 @@ func RoleUsers(namespace string, roleName string) ([]*models.User, error) { return nil, err } - conn, err := ldapclient.Client() - - if err != nil { - return nil, err - } - - defer conn.Close() - - names := make([]string, 0) users := make([]*models.User, 0) for _, roleBinding := range roleBindings { for _, subject := range roleBinding.Subjects { - if subject.Kind == v1.UserKind && - !strings.HasPrefix(subject.Name, "system") && - !slice.ContainsString(names, subject.Name, nil) { - names = append(names, subject.Name) - user, err := UserDetail(subject.Name, conn) - if ldap.IsErrorWithCode(err, 32) { - continue - } + if subject.Kind == v1.UserKind && !k8sutil.ContainsUser(users, subject.Name) { + user, err := DescribeUser(subject.Name) if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + continue + } return nil, err } + user.Role = roleBinding.RoleRef.Name + users = append(users, user) } } @@ -340,43 +454,57 @@ func RoleUsers(namespace string, roleName string) ([]*models.User, error) { return users, nil } +func ListRoles(namespace string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { + return resources.ListResources(namespace, resources.Roles, conditions, orderBy, reverse, limit, offset) +} + +func ListWorkspaceRoles(workspace string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { + conditions.Match["ownerName"] = workspace + conditions.Match["ownerKind"] = "Workspace" + result, err := resources.ListResources("", resources.ClusterRoles, conditions, orderBy, reverse, limit, offset) + + if err != nil { + return nil, err + } + + for i, item := range result.Items { + if role, ok := item.(*v1.ClusterRole); ok { + role = role.DeepCopy() + role.Name = role.Labels[constants.DisplayNameLabelKey] + result.Items[i] = role + } + } + return result, nil +} + +func ListClusterRoles(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { + return resources.ListResources("", resources.ClusterRoles, conditions, orderBy, reverse, limit, offset) +} + func NamespaceUsers(namespaceName string) ([]*models.User, error) { roleBindings, err := GetRoleBindings(namespaceName, "") - if err != nil { - return nil, err - } - conn, err := ldapclient.Client() if err != nil { return nil, err } - defer conn.Close() - - names := make([]string, 0) users := make([]*models.User, 0) for _, roleBinding := range roleBindings { - for _, subject := range roleBinding.Subjects { - if subject.Kind == v1.UserKind && - !slice.ContainsString(names, subject.Name, nil) && - !strings.HasPrefix(subject.Name, "system") { - if roleBinding.Name == "viewer" { - continue - } - if roleBinding.Name == "admin" { - continue - } - names = append(names, subject.Name) - user, err := UserDetail(subject.Name, conn) - if ldap.IsErrorWithCode(err, 32) { - continue - } + if subject.Kind == v1.UserKind && !k8sutil.ContainsUser(users, subject.Name) { + + user, err := DescribeUser(subject.Name) + if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + continue + } return nil, err } + user.Role = roleBinding.RoleRef.Name + user.RoleBindTime = &roleBinding.CreationTimestamp.Time user.RoleBinding = roleBinding.Name users = append(users, user) } @@ -386,135 +514,116 @@ func NamespaceUsers(namespaceName string) ([]*models.User, error) { return users, nil } -func GetWorkspaceRoles(clusterRoles []*v1.ClusterRole) map[string]string { - - workspaceRoles := make(map[string]string, 0) - - for _, v := range clusterRoles { - if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(v.Name); len(groups) == 3 { - workspaceRoles[groups[1]] = groups[2] +func GetUserWorkspaceSimpleRules(workspace, username string) ([]models.SimpleRule, error) { + clusterRules, err := GetUserClusterRules(username) + if err != nil { + return nil, err + } + if workspacesManager, err := policy.GetClusterAction("workspaces", "edit"); err == nil { + if rulesMatchesAction(clusterRules, workspacesManager) { + return GetWorkspaceRoleSimpleRules(workspace, constants.WorkspaceAdmin), nil } } - return workspaceRoles + workspaceRole, err := GetUserWorkspaceRole(workspace, username) + + if err != nil { + return nil, err + } + return GetWorkspaceRoleSimpleRules(workspace, workspaceRole.Labels[constants.DisplayNameLabelKey]), nil } -func GetWorkspaceRole(clusterRoles []*v1.ClusterRole, workspace string) string { +func GetWorkspaceRoleSimpleRules(workspace, roleName string) []models.SimpleRule { - for _, v := range clusterRoles { - if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(v.Name); len(groups) == 3 { - if groups[1] == workspace { - return groups[2] - } + workspaceRules := make([]models.SimpleRule, 0) + + switch roleName { + case constants.WorkspaceAdmin: + workspaceRules = []models.SimpleRule{ + {Name: "workspaces", Actions: []string{"edit", "delete", "view"}}, + {Name: "members", Actions: []string{"edit", "delete", "create", "view"}}, + {Name: "devops", Actions: []string{"edit", "delete", "create", "view"}}, + {Name: "projects", Actions: []string{"edit", "delete", "create", "view"}}, + {Name: "organizations", Actions: []string{"edit", "delete", "create", "view"}}, + {Name: "roles", Actions: []string{"view"}}, } - } - - return "" -} - -func GetWorkspaceSimpleRules(clusterRoles []*v1.ClusterRole, workspace string) map[string][]models.SimpleRule { - - workspaceRules := make(map[string][]models.SimpleRule, 0) - - clusterSimpleRules := make([]models.SimpleRule, 0) - clusterRules := make([]v1.PolicyRule, 0) - for _, clusterRole := range clusterRoles { - clusterRules = append(clusterRules, clusterRole.Rules...) - } - - for i := 0; i < len(policy.WorkspaceRoleRuleMapping); i++ { - rule := models.SimpleRule{Name: policy.WorkspaceRoleRuleMapping[i].Name} - rule.Actions = make([]string, 0) - for j := 0; j < (len(policy.WorkspaceRoleRuleMapping[i].Actions)); j++ { - if RulesMatchesAction(clusterRules, policy.WorkspaceRoleRuleMapping[i].Actions[j]) { - rule.Actions = append(rule.Actions, policy.WorkspaceRoleRuleMapping[i].Actions[j].Name) - } + case constants.WorkspaceRegular: + workspaceRules = []models.SimpleRule{ + {Name: "workspaces", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "devops", Actions: []string{"create"}}, + {Name: "projects", Actions: []string{"create"}}, + {Name: "organizations", Actions: []string{"view"}}, } - if len(rule.Actions) > 0 { - clusterSimpleRules = append(clusterSimpleRules, rule) - } - } - - if len(clusterRules) > 0 { - workspaceRules["*"] = clusterSimpleRules - } - - for _, v := range clusterRoles { - - if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(v.Name); len(groups) == 3 { - - if workspace != "" && groups[1] != workspace { - continue - } - - policyRules := make([]v1.PolicyRule, 0) - - for _, rule := range v.Rules { - rule.ResourceNames = nil - policyRules = append(policyRules, rule) - } - - rules := make([]models.SimpleRule, 0) - - for i := 0; i < len(policy.WorkspaceRoleRuleMapping); i++ { - rule := models.SimpleRule{Name: policy.WorkspaceRoleRuleMapping[i].Name} - rule.Actions = make([]string, 0) - for j := 0; j < (len(policy.WorkspaceRoleRuleMapping[i].Actions)); j++ { - action := policy.WorkspaceRoleRuleMapping[i].Actions[j] - if RulesMatchesAction(policyRules, action) { - rule.Actions = append(rule.Actions, action.Name) - } - } - if len(rule.Actions) > 0 { - rules = append(rules, rule) - } - } - - workspaceRules[groups[1]] = merge(rules, clusterSimpleRules) + case constants.WorkspaceViewer: + workspaceRules = []models.SimpleRule{ + {Name: "workspaces", Actions: []string{"view"}}, + {Name: "members", Actions: []string{"view"}}, + {Name: "devops", Actions: []string{"view"}}, + {Name: "projects", Actions: []string{"view"}}, + {Name: "organizations", Actions: []string{"view"}}, + {Name: "roles", Actions: []string{"view"}}, } } return workspaceRules } -func merge(clusterRules, rules []models.SimpleRule) []models.SimpleRule { - for _, clusterRule := range clusterRules { - exist := false +// Convert cluster role to rules +func GetClusterRoleSimpleRules(clusterRoleName string) ([]models.SimpleRule, error) { - for i := 0; i < len(rules); i++ { - if rules[i].Name == clusterRule.Name { - exist = true + clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() + clusterRole, err := clusterRoleLister.Get(clusterRoleName) - for _, action := range clusterRule.Actions { - if !slice.ContainsString(rules[i].Actions, action, nil) { - rules[i].Actions = append(rules[i].Actions, action) - } - } - } - } - - if !exist { - rules = append(rules, clusterRule) - } + if err != nil { + return nil, err } - return rules + + return getClusterSimpleRule(clusterRole.Rules), nil } -// Convert cluster roles to rules -func GetClusterRoleSimpleRules(clusterRoles []*v1.ClusterRole) ([]models.SimpleRule, error) { +func GetUserClusterSimpleRules(username string) ([]models.SimpleRule, error) { + clusterRules, err := GetUserClusterRules(username) + if err != nil { + return nil, err + } + return getClusterSimpleRule(clusterRules), nil +} - clusterRules := make([]v1.PolicyRule, 0) +func GetUserNamespaceSimpleRules(namespace, username string) ([]models.SimpleRule, error) { + clusterRules, err := GetUserClusterRules(username) + if err != nil { + return nil, err + } + rules, err := GetUserRules(namespace, username) + if err != nil { + return nil, err + } + rules = append(rules, clusterRules...) - for _, v := range clusterRoles { - clusterRules = append(clusterRules, v.Rules...) + return getSimpleRule(rules), nil +} + +// Convert roles to rules +func GetRoleSimpleRules(namespace string, roleName string) ([]models.SimpleRule, error) { + + roleLister := informers.SharedInformerFactory().Rbac().V1().Roles().Lister() + role, err := roleLister.Roles(namespace).Get(roleName) + + if err != nil { + return nil, err } + return getSimpleRule(role.Rules), nil +} + +func getClusterSimpleRule(policyRules []v1.PolicyRule) []models.SimpleRule { rules := make([]models.SimpleRule, 0) for i := 0; i < len(policy.ClusterRoleRuleMapping); i++ { validActions := make([]string, 0) for j := 0; j < (len(policy.ClusterRoleRuleMapping[i].Actions)); j++ { - if RulesMatchesAction(clusterRules, policy.ClusterRoleRuleMapping[i].Actions[j]) { + if rulesMatchesAction(policyRules, policy.ClusterRoleRuleMapping[i].Actions[j]) { validActions = append(validActions, policy.ClusterRoleRuleMapping[i].Actions[j].Name) } } @@ -523,57 +632,26 @@ func GetClusterRoleSimpleRules(clusterRoles []*v1.ClusterRole) ([]models.SimpleR } } - return rules, nil + return rules } -// Convert roles to rules -func GetRoleSimpleRules(roles []*v1.Role, namespace string) (map[string][]models.SimpleRule, error) { - - rulesMapping := make(map[string][]models.SimpleRule, 0) - - policyRulesMapping := make(map[string][]v1.PolicyRule, 0) - - for _, v := range roles { - - if namespace != "" && v.Namespace != namespace { - continue - } - - policyRules := policyRulesMapping[v.Namespace] - - if policyRules == nil { - policyRules = make([]v1.PolicyRule, 0) - } - - policyRules = append(policyRules, v.Rules...) - - policyRulesMapping[v.Namespace] = policyRules - } - - for namespace, policyRules := range policyRulesMapping { - - rules := make([]models.SimpleRule, 0) - - for i := 0; i < len(policy.RoleRuleMapping); i++ { - rule := models.SimpleRule{Name: policy.RoleRuleMapping[i].Name} - rule.Actions = make([]string, 0) - for j := 0; j < len(policy.RoleRuleMapping[i].Actions); j++ { - if RulesMatchesAction(policyRules, policy.RoleRuleMapping[i].Actions[j]) { - rule.Actions = append(rule.Actions, policy.RoleRuleMapping[i].Actions[j].Name) - } - } - if len(rule.Actions) > 0 { - rules = append(rules, rule) +func getSimpleRule(policyRules []v1.PolicyRule) []models.SimpleRule { + simpleRules := make([]models.SimpleRule, 0) + for i := 0; i < len(policy.RoleRuleMapping); i++ { + rule := models.SimpleRule{Name: policy.RoleRuleMapping[i].Name} + rule.Actions = make([]string, 0) + for j := 0; j < len(policy.RoleRuleMapping[i].Actions); j++ { + if rulesMatchesAction(policyRules, policy.RoleRuleMapping[i].Actions[j]) { + rule.Actions = append(rule.Actions, policy.RoleRuleMapping[i].Actions[j].Name) } } - - rulesMapping[namespace] = rules + if len(rule.Actions) > 0 { + simpleRules = append(simpleRules, rule) + } } - - return rulesMapping, nil + return simpleRules } -// func CreateClusterRoleBinding(username string, clusterRoleName string) error { clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() @@ -583,104 +661,43 @@ func CreateClusterRoleBinding(username string, clusterRoleName string) error { return err } - clusterRoles, err := GetClusterRoles(username) + clusterRoleBinding := &v1.ClusterRoleBinding{} + clusterRoleBinding.Name = username + clusterRoleBinding.RoleRef = v1.RoleRef{Name: clusterRoleName, Kind: ClusterRoleKind} + clusterRoleBinding.Subjects = []v1.Subject{{Kind: v1.UserKind, Name: username}} - if err != nil { - return err - } - - for _, clusterRole := range clusterRoles { - - if clusterRole.Annotations["rbac.authorization.k8s.io/clusterrole"] == "true" { - - if clusterRole.Name == clusterRoleName { - return nil - } - - clusterRoleBindingName := clusterRole.Annotations["rbac.authorization.k8s.io/clusterrolebinding"] - clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - clusterRoleBinding, err := clusterRoleBindingLister.Get(clusterRoleBindingName) - - if err != nil { - return err - } - - for i, v := range clusterRoleBinding.Subjects { - if v.Kind == v1.UserKind && v.Name == username { - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects[:i], clusterRoleBinding.Subjects[i+1:]...) - break - } - } - - _, err = k8s.Client().RbacV1().ClusterRoleBindings().Update(clusterRoleBinding) - - if err != nil { - return err - } - - break - } - } clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) + found, err := clusterRoleBindingLister.Get(username) - if err != nil { + if apierrors.IsNotFound(err) { + _, err = k8s.Client().RbacV1().ClusterRoleBindings().Create(clusterRoleBinding) + return err + } else if err != nil { return err } - var clusterRoleBinding *v1.ClusterRoleBinding - - for _, roleBinding := range clusterRoleBindings { - if roleBinding.Annotations != nil && roleBinding.Annotations["rbac.authorization.k8s.io/clusterrole"] == clusterRoleName && - roleBinding.RoleRef.Name == clusterRoleName { - clusterRoleBinding = roleBinding - break + // cluster role changed + if found.RoleRef.Name != clusterRoleBinding.RoleRef.Name { + deletePolicy := metav1.DeletePropagationForeground + deleteOption := &metav1.DeleteOptions{PropagationPolicy: &deletePolicy} + err := k8s.Client().RbacV1().ClusterRoleBindings().Delete(clusterRoleBinding.Name, deleteOption) + if err != nil { + return err } + _, err = k8s.Client().RbacV1().ClusterRoleBindings().Create(clusterRoleBinding) + return err } - if clusterRoleBinding != nil { - clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, v1.Subject{Kind: v1.UserKind, Name: username}) - _, err := k8s.Client().RbacV1().ClusterRoleBindings().Update(clusterRoleBinding) - if err != nil { - return err - } - } else { - clusterRoleBinding = new(v1.ClusterRoleBinding) - clusterRoleBinding.Annotations = map[string]string{"rbac.authorization.k8s.io/clusterrole": clusterRoleName} - clusterRoleBinding.Name = clusterRoleName - clusterRoleBinding.RoleRef = v1.RoleRef{Name: clusterRoleName, Kind: ClusterRoleKind} - clusterRoleBinding.Subjects = []v1.Subject{{Kind: v1.UserKind, Name: username}} - - _, err = k8s.Client().RbacV1().ClusterRoleBindings().Create(clusterRoleBinding) - - if err != nil { - return err - } + if !k8sutil.ContainsUser(found.Subjects, username) { + found.Subjects = clusterRoleBinding.Subjects + _, err = k8s.Client().RbacV1().ClusterRoleBindings().Update(found) + return err } return nil } -func GetRole(namespace string, roleName string) (*v1.Role, error) { - return informers.SharedInformerFactory().Rbac().V1().Roles().Lister().Roles(namespace).Get(roleName) -} -func GetClusterRole(clusterRoleName string) (*v1.ClusterRole, error) { - clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() - return clusterRoleLister.Get(clusterRoleName) -} - -func RulesMatchesAction(rules []v1.PolicyRule, action models.Action) bool { - - for _, required := range action.Rules { - if !rulesMatchesRequired(rules, required) { - return false - } - } - - return true -} - -func rulesMatchesRequired(rules []v1.PolicyRule, required v1.PolicyRule) bool { +func RulesMatchesRequired(rules []v1.PolicyRule, required v1.PolicyRule) bool { for _, rule := range rules { if ruleMatchesRequired(rule, required) { return true @@ -689,6 +706,17 @@ func rulesMatchesRequired(rules []v1.PolicyRule, required v1.PolicyRule) bool { return false } +func rulesMatchesAction(rules []v1.PolicyRule, action models.Action) bool { + + for _, required := range action.Rules { + if !RulesMatchesRequired(rules, required) { + return false + } + } + + return true +} + func ruleMatchesRequired(rule v1.PolicyRule, required v1.PolicyRule) bool { if len(required.NonResourceURLs) == 0 { diff --git a/pkg/models/iam/iam.go b/pkg/models/iam/iam.go deleted file mode 100644 index cac09c88b..000000000 --- a/pkg/models/iam/iam.go +++ /dev/null @@ -1,188 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package iam - -import ( - "fmt" - "kubesphere.io/kubesphere/pkg/informers" - "kubesphere.io/kubesphere/pkg/simple/client/ldap" - - "k8s.io/api/rbac/v1" - "k8s.io/kubernetes/pkg/util/slice" - - "kubesphere.io/kubesphere/pkg/models" -) - -const ClusterRoleKind = "ClusterRole" - -// Get user list based on workspace role -func WorkspaceRoleUsers(workspace string, roleName string) ([]models.User, error) { - - clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - - workspaceRoleBinding, err := clusterRoleBindingLister.Get(fmt.Sprintf("system:%s:%s", workspace, roleName)) - - if err != nil { - return nil, err - } - - names := make([]string, 0) - - for _, subject := range workspaceRoleBinding.Subjects { - if subject.Kind == v1.UserKind { - names = append(names, subject.Name) - } - } - - users, err := GetUsers(names) - - if err != nil { - return nil, err - } - - for i := 0; i < len(users); i++ { - users[i].WorkspaceRole = roleName - } - - return users, nil -} - -func GetUsers(names []string) ([]models.User, error) { - var users []models.User - - if names == nil || len(names) == 0 { - return make([]models.User, 0), nil - } - - conn, err := ldap.Client() - - if err != nil { - return nil, err - } - - for _, name := range names { - user, err := UserDetail(name, conn) - if err != nil { - return nil, err - } - users = append(users, *user) - } - - return users, nil -} - -func GetUser(name string) (*models.User, error) { - - conn, err := ldap.Client() - - if err != nil { - return nil, err - } - - user, err := UserDetail(name, conn) - - if err != nil { - return nil, err - } - - return user, nil -} - -func GetUserNamespaces(username string, requiredRule v1.PolicyRule) (allNamespace bool, namespaces []string, err error) { - - clusterRoles, err := GetClusterRoles(username) - - if err != nil { - return false, nil, err - } - - clusterRules := make([]v1.PolicyRule, 0) - for _, role := range clusterRoles { - clusterRules = append(clusterRules, role.Rules...) - } - - if requiredRule.Size() == 0 { - if RulesMatchesRequired(clusterRules, v1.PolicyRule{ - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/namespaces"}, - }) { - return true, nil, nil - } - } else { - - if RulesMatchesRequired(clusterRules, requiredRule) { - return true, nil, nil - } - - } - - roles, err := GetRoles("", username) - - if err != nil { - return false, nil, err - } - - rulesMapping := make(map[string][]v1.PolicyRule, 0) - - for _, role := range roles { - rules := rulesMapping[role.Namespace] - if rules == nil { - rules = make([]v1.PolicyRule, 0) - } - rules = append(rules, role.Rules...) - rulesMapping[role.Namespace] = rules - } - - namespaces = make([]string, 0) - - for namespace, rules := range rulesMapping { - if requiredRule.Size() == 0 || RulesMatchesRequired(rules, requiredRule) { - namespaces = append(namespaces, namespace) - } - } - - return false, namespaces, nil -} - -func GetWorkspaceUsers(workspace string, workspaceRole string) ([]string, error) { - clusterRoleBindingLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister() - clusterRoleBinding, err := clusterRoleBindingLister.Get(fmt.Sprintf("system:%s:%s", workspace, workspaceRole)) - - if err != nil { - return nil, err - } - - users := make([]string, 0) - - for _, s := range clusterRoleBinding.Subjects { - if s.Kind == v1.UserKind && !slice.ContainsString(users, s.Name, nil) { - users = append(users, s.Name) - } - } - return users, nil -} - -func RulesMatchesRequired(rules []v1.PolicyRule, required v1.PolicyRule) bool { - for _, rule := range rules { - if ruleMatchesRequired(rule, required) { - return true - } - } - return false -} diff --git a/pkg/models/iam/im.go b/pkg/models/iam/im.go index 34934a7c7..7be15beaa 100644 --- a/pkg/models/iam/im.go +++ b/pkg/models/iam/im.go @@ -22,9 +22,12 @@ import ( "fmt" "kubesphere.io/kubesphere/pkg/constants" "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" "kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/client/redis" + "kubesphere.io/kubesphere/pkg/utils/k8sutil" "regexp" + "sort" "strconv" "strings" "time" @@ -38,11 +41,10 @@ import ( ldapclient "kubesphere.io/kubesphere/pkg/simple/client/ldap" "kubesphere.io/kubesphere/pkg/models" - jwtutils "kubesphere.io/kubesphere/pkg/utils/jwt" + "kubesphere.io/kubesphere/pkg/utils/jwtutil" ) var ( - counter Counter adminEmail string adminPassword string tokenExpireTime time.Duration @@ -82,7 +84,7 @@ func checkAndCreateDefaultGroup(conn ldap.Client) error { nil, ) - groups, err := conn.Search(groupSearchRequest) + _, err := conn.Search(groupSearchRequest) if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { err = createGroupsBaseDN(conn) @@ -95,14 +97,6 @@ func checkAndCreateDefaultGroup(conn ldap.Client) error { return fmt.Errorf("iam database init failed: %s\n", err) } - if groups == nil || len(groups.Entries) == 0 { - _, err = CreateGroup(models.Group{Path: constants.SystemWorkspace, Name: constants.SystemWorkspace, Creator: constants.AdminUserName, Description: "system workspace"}) - - if err != nil { - return fmt.Errorf("system-workspace create failed: %s\n", err) - } - } - return nil } @@ -130,13 +124,10 @@ func checkAndCreateDefaultUser(conn ldap.Client) error { } if users == nil || len(users.Entries) == 0 { - counter = NewCounter(0) - err := CreateUser(models.User{Username: constants.AdminUserName, Email: adminEmail, Password: adminPassword, Description: "Administrator account that was always created by default."}) + _, err := CreateUser(&models.User{Username: constants.AdminUserName, Email: adminEmail, Password: adminPassword, Description: "Administrator account that was always created by default."}) if err != nil { return fmt.Errorf("admin create failed: %s\n", err) } - } else { - counter = NewCounter(len(users.Entries)) } return nil @@ -164,12 +155,12 @@ func createGroupsBaseDN(conn ldap.Client) error { } // User login -func Login(username string, password string, ip string) (string, error) { +func Login(username string, password string, ip string) (*models.Token, error) { conn, err := ldapclient.Client() if err != nil { - return "", err + return nil, err } defer conn.Close() @@ -185,11 +176,11 @@ func Login(username string, password string, ip string) (string, error) { result, err := conn.Search(userSearchRequest) if err != nil { - return "", err + return nil, err } if len(result.Entries) != 1 { - return "", ldap.NewError(ldap.LDAPResultInvalidCredentials, errors.New("incorrect password")) + return nil, ldap.NewError(ldap.LDAPResultInvalidCredentials, errors.New("incorrect password")) } uid := result.Entries[0].GetAttributeValue("uid") @@ -200,7 +191,7 @@ func Login(username string, password string, ip string) (string, error) { err = conn.Bind(dn, password) if err != nil { - return "", err + return nil, err } claims := jwt.MapClaims{} @@ -209,13 +200,11 @@ func Login(username string, password string, ip string) (string, error) { claims["username"] = uid claims["email"] = email - token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) - - uToken, _ := token.SignedString(jwtutils.Secret) + token := jwtutil.MustSigned(claims) loginLog(uid, ip) - return uToken, nil + return &models.Token{Token: token}, nil } func loginLog(uid, ip string) { @@ -226,99 +215,6 @@ func loginLog(uid, ip string) { } } -func UserList(limit int, offset int) (int, []models.User, error) { - - conn, err := ldapclient.Client() - - if err != nil { - return 0, nil, err - } - - defer conn.Close() - - users := make([]models.User, 0) - - pageControl := ldap.NewControlPaging(1000) - - entries := make([]*ldap.Entry, 0) - - cursor := 0 -l1: - for { - - userSearchRequest := ldap.NewSearchRequest( - ldapclient.UserSearchBase, - ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, - "(&(objectClass=inetOrgPerson))", - []string{"uid", "mail", "description"}, - []ldap.Control{pageControl}, - ) - - response, err := conn.Search(userSearchRequest) - - if err != nil { - return 0, nil, err - } - - for _, entry := range response.Entries { - cursor++ - if cursor > offset { - if len(entries) < limit { - entries = append(entries, entry) - } else { - break l1 - } - } - } - - updatedControl := ldap.FindControl(response.Controls, ldap.ControlTypePaging) - if ctrl, ok := updatedControl.(*ldap.ControlPaging); ctrl != nil && ok && len(ctrl.Cookie) != 0 { - pageControl.SetCookie(ctrl.Cookie) - continue - } - - break - } - - redisClient := redis.Client() - - for _, v := range entries { - - uid := v.GetAttributeValue("uid") - email := v.GetAttributeValue("mail") - description := v.GetAttributeValue("description") - user := models.User{Username: uid, Email: email, Description: description} - - avatar, err := redisClient.HMGet("kubesphere:users:avatar", uid).Result() - - if err != nil { - return 0, nil, err - } - - if len(avatar) > 0 { - if url, ok := avatar[0].(string); ok { - user.AvatarUrl = url - } - } - - lastLogin, err := redisClient.LRange(fmt.Sprintf("kubesphere:users:%s:login-log", uid), -1, -1).Result() - - if err != nil { - return 0, nil, err - } - - if len(lastLogin) > 0 { - user.LastLoginTime = strings.Split(lastLogin[0], ",")[0] - } - - user.ClusterRules = make([]models.SimpleRule, 0) - - users = append(users, user) - } - - return counter.Get(), users, nil -} - func LoginLog(username string) ([]string, error) { redisClient := redis.Client() @@ -331,48 +227,77 @@ func LoginLog(username string) ([]string, error) { return data, nil } -func Search(keyword string, limit int, offset int) (int, []models.User, error) { +func ListUsersByName(names []string) (*models.PageableResponse, error) { + users := make([]*models.User, 0) + + for _, name := range names { + if !k8sutil.ContainsUser(users, name) { + user, err := DescribeUser(name) + if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + continue + } + return nil, err + } + users = append(users, user) + } + } + + items := make([]interface{}, 0) + + for _, u := range users { + items = append(items, u) + } + + return &models.PageableResponse{Items: items, TotalCount: len(items)}, nil +} + +func ListUsers(conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { conn, err := ldapclient.Client() if err != nil { - return 0, nil, err + return nil, err } defer conn.Close() - users := make([]models.User, 0) - pageControl := ldap.NewControlPaging(80) - entries := make([]*ldap.Entry, 0) + users := make([]models.User, 0) + + filter := "(&(objectClass=inetOrgPerson))" + + if keyword := conditions.Match["keyword"]; keyword != "" { + filter = fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=*%s*)(mail=*%s*)(description=*%s*)))", keyword, keyword, keyword) + } - cursor := 0 -l1: for { userSearchRequest := ldap.NewSearchRequest( ldapclient.UserSearchBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, - fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=*%s*)(mail=*%s*)(description=*%s*)))", keyword, keyword, keyword), - []string{"uid", "mail", "description"}, + filter, + []string{"uid", "mail", "description", "preferredLanguage", "createTimestamp"}, []ldap.Control{pageControl}, ) response, err := conn.Search(userSearchRequest) if err != nil { - return 0, nil, err + return nil, err } for _, entry := range response.Entries { - cursor++ - if cursor > offset { - if len(entries) < limit { - entries = append(entries, entry) - } else { - break l1 - } - } + + uid := entry.GetAttributeValue("uid") + email := entry.GetAttributeValue("mail") + description := entry.GetAttributeValue("description") + lang := entry.GetAttributeValue("preferredLanguage") + createTimestamp, _ := time.Parse("20060102150405Z", entry.GetAttributeValue("createTimestamp")) + + user := models.User{Username: uid, Email: email, Description: description, Lang: lang, CreateTime: createTimestamp} + + users = append(users, user) } updatedControl := ldap.FindControl(response.Controls, ldap.ControlTypePaging) @@ -384,52 +309,104 @@ l1: break } - redisClient := redis.Client() - - for _, v := range entries { - - uid := v.GetAttributeValue("uid") - email := v.GetAttributeValue("mail") - description := v.GetAttributeValue("description") - user := models.User{Username: uid, Email: email, Description: description} - - avatar, err := redisClient.HMGet("kubesphere:users:avatar", uid).Result() - - if err != nil { - return 0, nil, err + sort.Slice(users, func(i, j int) bool { + if reverse { + tmp := i + i = j + j = tmp } + switch orderBy { + case "username": + fallthrough + case "createTime": + return users[i].CreateTime.Before(users[j].CreateTime) + default: + return strings.Compare(users[i].Username, users[j].Username) <= 0 + } + }) - if len(avatar) > 0 { - if url, ok := avatar[0].(string); ok { - user.AvatarUrl = url + items := make([]interface{}, 0) + + for i, user := range users { + + if i >= offset && len(items) < limit { + + avatar, err := getAvatar(user.Username) + if err != nil { + return nil, err } + user.AvatarUrl = avatar + + lastLoginTime, err := getLastLoginTime(user.Username) + if err != nil { + return nil, err + } + user.LastLoginTime = lastLoginTime + + clusterRole, err := GetUserClusterRole(user.Username) + + if err != nil { + return nil, err + } + + user.ClusterRole = clusterRole.Name + + items = append(items, user) } - - lastLogin, err := redisClient.LRange(fmt.Sprintf("kubesphere:users:%s:login-log", uid), -1, -1).Result() - - if err != nil { - return 0, nil, err - } - - if len(lastLogin) > 0 { - user.LastLoginTime = strings.Split(lastLogin[0], ",")[0] - } - - user.ClusterRules = make([]models.SimpleRule, 0) - - users = append(users, user) } - return counter.Get(), users, nil + return &models.PageableResponse{Items: items, TotalCount: len(users)}, nil } -func UserDetail(username string, conn ldap.Client) (*models.User, error) { +func DescribeUser(username string) (*models.User, error) { + + user, err := GetUserInfo(username) + + if err != nil { + return nil, err + } + + groups, err := GetUserGroups(username) + + if err != nil { + return nil, err + } + + user.Groups = groups + + avatar, err := getAvatar(username) + + if err != nil { + return nil, err + } + + user.AvatarUrl = avatar + + lastLoginTime, err := getLastLoginTime(username) + + if err != nil { + return nil, err + } + + user.LastLoginTime = lastLoginTime + + return user, nil +} + +// Get user info only included email description & lang +func GetUserInfo(username string) (*models.User, error) { + + conn, err := ldapclient.Client() + + if err != nil { + return nil, err + } userSearchRequest := ldap.NewSearchRequest( ldapclient.UserSearchBase, ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false, fmt.Sprintf("(&(objectClass=inetOrgPerson)(uid=%s))", username), - []string{"mail", "description", "preferredLanguage"}, + []string{"mail", "description", "preferredLanguage", "createTimestamp"}, nil, ) @@ -446,7 +423,20 @@ func UserDetail(username string, conn ldap.Client) (*models.User, error) { email := result.Entries[0].GetAttributeValue("mail") description := result.Entries[0].GetAttributeValue("description") lang := result.Entries[0].GetAttributeValue("preferredLanguage") - user := models.User{Username: username, Email: email, Description: description, Lang: lang} + createTimestamp, _ := time.Parse("20060102150405Z", result.Entries[0].GetAttributeValue("createTimestamp")) + user := &models.User{Username: username, Email: email, Description: description, Lang: lang, CreateTime: createTimestamp} + + return user, nil +} + +func GetUserGroups(username string) ([]string, error) { + conn, err := ldapclient.Client() + + if err != nil { + return nil, err + } + + defer conn.Close() groupSearchRequest := ldap.NewSearchRequest( ldapclient.GroupSearchBase, @@ -456,11 +446,10 @@ func UserDetail(username string, conn ldap.Client) (*models.User, error) { nil, ) - result, err = conn.Search(groupSearchRequest) + result, err := conn.Search(groupSearchRequest) if err != nil { return nil, err - } groups := make([]string, 0) @@ -470,41 +459,47 @@ func UserDetail(username string, conn ldap.Client) (*models.User, error) { groups = append(groups, groupName) } - user.Groups = groups + return groups, nil +} - redisClient := redis.Client() - - avatar, err := redisClient.HMGet("kubesphere:users:avatar", username).Result() +func getLastLoginTime(username string) (string, error) { + lastLogin, err := redis.Client().LRange(fmt.Sprintf("kubesphere:users:%s:login-log", username), -1, -1).Result() if err != nil { - return nil, err + return "", err + } + + if len(lastLogin) > 0 { + return strings.Split(lastLogin[0], ",")[0], nil + } + return "", nil +} + +func setAvatar(username, avatar string) error { + _, err := redis.Client().HMSet("kubesphere:users:avatar", map[string]interface{}{"username": avatar}).Result() + return err +} + +func getAvatar(username string) (string, error) { + + avatar, err := redis.Client().HMGet("kubesphere:users:avatar", username).Result() + + if err != nil { + return "", err } if len(avatar) > 0 { if url, ok := avatar[0].(string); ok { - user.AvatarUrl = url + return url, nil } } - - user.Status = 0 - - lastLogin, err := redisClient.LRange(fmt.Sprintf("kubesphere:users:%s:login-log", username), -1, -1).Result() - - if err != nil { - return nil, err - } - - if len(lastLogin) > 0 { - user.LastLoginTime = strings.Split(lastLogin[0], ",")[0] - } - - return &user, nil + return "", nil } func DeleteUser(username string) error { - // bind root DN conn, err := ldapclient.Client() + if err != nil { return err } @@ -521,13 +516,7 @@ func DeleteUser(username string) error { err = deleteRoleBindings(username) - if err != nil { - return err - } - - counter.Sub(1) - - return nil + return err } func deleteRoleBindings(username string) error { @@ -539,7 +528,7 @@ func deleteRoleBindings(username string) error { } for _, roleBinding := range roleBindings { - + roleBinding = roleBinding.DeepCopy() length1 := len(roleBinding.Subjects) for index, subject := range roleBinding.Subjects { @@ -571,6 +560,7 @@ func deleteRoleBindings(username string) error { clusterRoleBindings, err := clusterRoleBindingLister.List(labels.Everything()) for _, clusterRoleBinding := range clusterRoleBindings { + clusterRoleBinding = clusterRoleBinding.DeepCopy() length1 := len(clusterRoleBinding.Subjects) for index, subject := range clusterRoleBinding.Subjects { @@ -637,7 +627,7 @@ func UserCreateCheck(check string) (exist bool, err error) { } } -func CreateUser(user models.User) error { +func CreateUser(user *models.User) (*models.User, error) { user.Username = strings.TrimSpace(user.Username) user.Email = strings.TrimSpace(user.Email) user.Password = strings.TrimSpace(user.Password) @@ -646,7 +636,7 @@ func CreateUser(user models.User) error { conn, err := ldapclient.Client() if err != nil { - return err + return nil, err } defer conn.Close() @@ -662,17 +652,17 @@ func CreateUser(user models.User) error { result, err := conn.Search(userSearchRequest) if err != nil { - return err + return nil, err } if len(result.Entries) > 0 { - return ldap.NewError(ldap.LDAPResultEntryAlreadyExists, fmt.Errorf("username or email already exists")) + return nil, ldap.NewError(ldap.LDAPResultEntryAlreadyExists, fmt.Errorf("username or email already exists")) } maxUid, err := getMaxUid(conn) if err != nil { - return err + return nil, err } maxUid += 1 @@ -688,7 +678,7 @@ func CreateUser(user models.User) error { userCreateRequest.Attribute("mail", []string{user.Email}) // RFC1274: RFC822 Mailbox userCreateRequest.Attribute("userPassword", []string{user.Password}) // RFC4519/2307: password of user if user.Lang != "" { - userCreateRequest.Attribute("preferredLanguage", []string{user.Lang}) // RFC4519/2307: password of user + userCreateRequest.Attribute("preferredLanguage", []string{user.Lang}) } if user.Description != "" { userCreateRequest.Attribute("description", []string{user.Description}) // RFC4519: descriptive information @@ -697,16 +687,22 @@ func CreateUser(user models.User) error { err = conn.Add(userCreateRequest) if err != nil { - return err + return nil, err } - counter.Add(1) + if user.AvatarUrl != "" { + setAvatar(user.Username, user.AvatarUrl) + } if user.ClusterRole != "" { - CreateClusterRoleBinding(user.Username, user.ClusterRole) + err := CreateClusterRoleBinding(user.Username, user.ClusterRole) + + if err != nil { + return nil, err + } } - return nil + return DescribeUser(user.Username) } func getMaxUid(conn ldap.Client) (int, error) { @@ -768,11 +764,12 @@ func getMaxGid(conn ldap.Client) (int, error) { return maxGid, nil } -func UpdateUser(user models.User) error { +func UpdateUser(user *models.User) (*models.User, error) { conn, err := ldapclient.Client() + if err != nil { - return err + return nil, err } defer conn.Close() @@ -794,19 +791,27 @@ func UpdateUser(user models.User) error { userModifyRequest.Replace("userPassword", []string{user.Password}) } + if user.AvatarUrl != "" { + err = setAvatar(user.Username, user.AvatarUrl) + } + + if err != nil { + return nil, err + } + err = conn.Modify(userModifyRequest) if err != nil { - return err + return nil, err } err = CreateClusterRoleBinding(user.Username, user.ClusterRole) if err != nil { - return err + return nil, err } - return nil + return DescribeUser(user.Username) } func DeleteGroup(path string) error { @@ -829,13 +834,14 @@ func DeleteGroup(path string) error { return nil } -func CreateGroup(group models.Group) (*models.Group, error) { +func CreateGroup(group *models.Group) (*models.Group, error) { - // bind root DN conn, err := ldapclient.Client() + if err != nil { return nil, err } + defer conn.Close() maxGid, err := getMaxGid(conn) @@ -861,7 +867,9 @@ func CreateGroup(group models.Group) (*models.Group, error) { groupCreateRequest.Attribute("description", []string{group.Description}) } - groupCreateRequest.Attribute("memberUid", []string{group.Creator}) + if group.Members != nil { + groupCreateRequest.Attribute("memberUid", group.Members) + } err = conn.Add(groupCreateRequest) @@ -871,18 +879,7 @@ func CreateGroup(group models.Group) (*models.Group, error) { group.Gid = strconv.Itoa(maxGid) - group.CreateTime = time.Now().UTC().Format("2006-01-02T15:04:05Z") - - redisClient := redis.Client() - - if err := redisClient.HMSet("kubesphere:groups:create-time", map[string]interface{}{group.Name: group.CreateTime}).Err(); err != nil { - return nil, err - } - if err := redisClient.HMSet("kubesphere:groups:creator", map[string]interface{}{group.Name: group.Creator}).Err(); err != nil { - return nil, err - } - - return &group, nil + return DescribeGroup(group.Path) } func UpdateGroup(group *models.Group) (*models.Group, error) { @@ -894,7 +891,7 @@ func UpdateGroup(group *models.Group) (*models.Group, error) { } defer conn.Close() - old, err := GroupDetail(group.Path, conn) + old, err := DescribeGroup(group.Path) if err != nil { return nil, err @@ -1027,34 +1024,22 @@ func ChildList(path string) ([]models.Group, error) { group.ChildGroups = childGroups - redisClient := redis.Client() - - createTime, _ := redisClient.HMGet("kubesphere:groups:create-time", group.Name).Result() - - if len(createTime) > 0 { - if t, ok := createTime[0].(string); ok { - group.CreateTime = t - } - } - - creator, _ := redisClient.HMGet("kubesphere:groups:creator", group.Name).Result() - - if len(creator) > 0 { - if t, ok := creator[0].(string); ok { - group.Creator = t - } - } - groups = append(groups, group) } return groups, nil } -func GroupDetail(path string, conn ldap.Client) (*models.Group, error) { +func DescribeGroup(path string) (*models.Group, error) { searchBase, cn := splitPath(path) + conn, err := ldapclient.Client() + + if err != nil { + return nil, err + } + groupSearchRequest := ldap.NewSearchRequest(searchBase, ldap.ScopeSingleLevel, ldap.NeverDerefAliases, 0, 0, false, fmt.Sprintf("(&(objectClass=posixGroup)(cn=%s))", cn), @@ -1083,24 +1068,76 @@ func GroupDetail(path string, conn ldap.Client) (*models.Group, error) { group.ChildGroups = childGroups - redisClient := redis.Client() - - createTime, _ := redisClient.HMGet("kubesphere:groups:create-time", group.Name).Result() - - if len(createTime) > 0 { - if t, ok := createTime[0].(string); ok { - group.CreateTime = t - } - } - - creator, _ := redisClient.HMGet("kubesphere:groups:creator", group.Name).Result() - - if len(creator) > 0 { - if t, ok := creator[0].(string); ok { - group.Creator = t - } - } - return &group, nil } + +func WorkspaceUsersTotalCount(workspace string) (int, error) { + workspaceRoleBindings, err := GetWorkspaceRoleBindings(workspace) + + if err != nil { + return 0, err + } + + users := make([]string, 0) + + for _, roleBinding := range workspaceRoleBindings { + for _, subject := range roleBinding.Subjects { + if subject.Kind == v1.UserKind && !k8sutil.ContainsUser(users, subject.Name) { + users = append(users, subject.Name) + } + } + } + + return len(users), nil +} + +func ListWorkspaceUsers(workspace string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { + + workspaceRoleBindings, err := GetWorkspaceRoleBindings(workspace) + + if err != nil { + return nil, err + } + + users := make([]*models.User, 0) + + for _, roleBinding := range workspaceRoleBindings { + for _, subject := range roleBinding.Subjects { + if subject.Kind == v1.UserKind && !k8sutil.ContainsUser(users, subject.Name) { + user, err := DescribeUser(subject.Name) + if err != nil { + return nil, err + } + prefix := fmt.Sprintf("workspace:%s:", workspace) + user.WorkspaceRole = fmt.Sprintf("workspace-%s", strings.TrimPrefix(roleBinding.Name, prefix)) + users = append(users, user) + } + } + } + + // order & reverse + sort.Slice(users, func(i, j int) bool { + if reverse { + tmp := i + i = j + j = tmp + } + switch orderBy { + default: + fallthrough + case "name": + return strings.Compare(users[i].Username, users[j].Username) <= 0 + } + }) + + result := make([]interface{}, 0) + + for i, d := range users { + if i >= offset && (limit == -1 || len(result) < limit) { + result = append(result, d) + } + } + + return &models.PageableResponse{Items: result, TotalCount: len(users)}, nil +} diff --git a/pkg/models/iam/policy/policy.go b/pkg/models/iam/policy/policy.go index 89f98d528..3af78fd8c 100644 --- a/pkg/models/iam/policy/policy.go +++ b/pkg/models/iam/policy/policy.go @@ -20,6 +20,7 @@ package policy import ( "encoding/json" + "fmt" "io/ioutil" "kubesphere.io/kubesphere/pkg/models" @@ -55,292 +56,25 @@ func init() { } var ( - WorkspaceRoleRuleMapping = []models.Rule{ - { - Name: "workspaces", - Actions: []models.Action{ - - {Name: "edit", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"*"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces"}, - }, { - Verbs: []string{"*"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/*"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"jenkins.kubesphere.io"}, - Resources: []string{"*"}, - }, { - Verbs: []string{"*"}, - APIGroups: []string{"devops.kubesphere.io"}, - Resources: []string{"*"}, - }, - }, - }, - {Name: "delete", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"delete"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces"}, - }, - }, - }, - }, - }, - - {Name: "members", - Actions: []models.Action{ - {Name: "view", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/members"}, - }, - }, - }, - {Name: "create", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"create"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/members"}, - }, - }, - }, - {Name: "edit", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"patch", "update"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/members"}, - }, - }, - }, - {Name: "delete", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"delete"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/members"}, - }, - }, - }, - }, - }, - { - Name: "devops", - Actions: []models.Action{ - {Name: "view", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/devops"}, - }, - }, - }, - {Name: "create", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"create"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/devops"}, - }, - }, - }, - {Name: "edit", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"update", "patch"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/devops"}, - }, - }, - }, - {Name: "delete", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"delete"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/devops"}, - }, - }, - }, - }, - }, - { - Name: "projects", - Actions: []models.Action{ - {Name: "view", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/namespaces"}, - }, - }, - }, - {Name: "create", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"create"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/namespaces"}, - }, - }, - }, - {Name: "edit", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"update", "patch"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/namespaces"}, - }, - }, - }, - {Name: "delete", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"delete"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/namespaces"}, - }, - }, - }, - }, - }, - { - Name: "organizations", - Actions: []models.Action{ - {Name: "view", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"workspaces/organizations"}, - }, - }, - }, - {Name: "create", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"create"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"workspaces/organizations"}, - }, - }, - }, - {Name: "edit", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"update", "patch"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"workspaces/organizations"}, - }, - }, - }, - {Name: "delete", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"delete"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"workspaces/organizations"}, - }, - }, - }}, - }, - { - Name: "roles", - Actions: []models.Action{ - {Name: "view", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/roles"}, - }, - }}, - }, - }, - } - ClusterRoleRuleMapping = []models.Rule{ {Name: "workspaces", Actions: []models.Action{ - { - Name: "view", - Rules: []v1.PolicyRule{ - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"users"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"workspaces"}, - Resources: []string{"monitoring/*"}, - }, - { - Verbs: []string{"list"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"quota", "status", "monitoring", "persistentvolumeclaims"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"resources"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces", "workspaces/*"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{""}, - Resources: []string{"namespaces"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"", "apps", "extensions", "batch"}, - Resources: []string{"serviceaccounts", "limitranges", "deployments", "configmaps", "secrets", "jobs", "cronjobs", "persistentvolumeclaims", "statefulsets", "daemonsets", "ingresses", "services", "pods/*", "pods", "events", "deployments/scale"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"rbac.authorization.k8s.io"}, - Resources: []string{"rolebindings", "roles"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"members"}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"router"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"jenkins.kubesphere.io", "devops.kubesphere.io"}, - Resources: []string{"*"}, - }, - }, - }, { Name: "create", Rules: []v1.PolicyRule{ { Verbs: []string{"create"}, - APIGroups: []string{"kubesphere.io"}, + APIGroups: []string{"tenant.kubesphere.io"}, + Resources: []string{"workspaces"}, + }, + }, + }, + { + Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"tenant.kubesphere.io"}, Resources: []string{"workspaces"}, }, }, @@ -349,7 +83,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"*"}, - APIGroups: []string{"kubesphere.io"}, + APIGroups: []string{"tenant.kubesphere.io", "monitoring.kubesphere.io"}, Resources: []string{"workspaces", "workspaces/*"}, }, { @@ -359,7 +93,7 @@ var ( }, { Verbs: []string{"*"}, - APIGroups: []string{"", "apps", "extensions", "batch"}, + APIGroups: []string{"", "apps", "extensions", "batch", "resources.kubesphere.io"}, Resources: []string{"serviceaccounts", "limitranges", "deployments", "configmaps", "secrets", "jobs", "cronjobs", "persistentvolumeclaims", "statefulsets", "daemonsets", "ingresses", "services", "pods/*", "pods", "events", "deployments/scale"}, }, { @@ -367,16 +101,6 @@ var ( APIGroups: []string{"rbac.authorization.k8s.io"}, Resources: []string{"rolebindings", "roles"}, }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"members"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"router"}, - }, { Verbs: []string{"*"}, APIGroups: []string{"jenkins.kubesphere.io", "devops.kubesphere.io"}, @@ -391,9 +115,13 @@ var ( Actions: []models.Action{ {Name: "view", Rules: []v1.PolicyRule{{ - Verbs: []string{"*"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"monitoring", "health", "monitoring/*"}, + Verbs: []string{"get", "list"}, + APIGroups: []string{"monitoring.kubesphere.io"}, + Resources: []string{"*"}, + }, { + Verbs: []string{"get", "list"}, + APIGroups: []string{"resources.kubesphere.io"}, + Resources: []string{"health"}, }}, }, }, @@ -405,14 +133,14 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "watch", "list"}, - APIGroups: []string{"account.kubesphere.io"}, + APIGroups: []string{"iam.kubesphere.io"}, Resources: []string{"users", "users/*"}, }, { Verbs: []string{"get"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"clusterrules"}, - ResourceNames: []string{"mapping"}, + APIGroups: []string{"iam.kubesphere.io"}, + Resources: []string{"rulesmapping"}, + ResourceNames: []string{"clusterroles"}, }, { Verbs: []string{"get", "watch", "list"}, @@ -425,12 +153,12 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"create", "get", "list"}, - APIGroups: []string{"account.kubesphere.io"}, + APIGroups: []string{"iam.kubesphere.io"}, Resources: []string{"users"}, }, { Verbs: []string{"get"}, - APIGroups: []string{"account.kubesphere.io"}, + APIGroups: []string{"iam.kubesphere.io"}, Resources: []string{"clusterrules"}, ResourceNames: []string{"mapping"}, }, @@ -445,7 +173,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list", "update", "patch"}, - APIGroups: []string{"account.kubesphere.io"}, + APIGroups: []string{"iam.kubesphere.io"}, Resources: []string{"users"}, }, { @@ -459,8 +187,8 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"delete", "deletecollection"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"accounts"}, + APIGroups: []string{"iam.kubesphere.io"}, + Resources: []string{"users"}, }, }, }, @@ -483,8 +211,8 @@ var ( }, { Verbs: []string{"get", "list"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"clusterroles/*"}, + APIGroups: []string{"iam.kubesphere.io"}, + Resources: []string{"clusterroles", "clusterroles/*"}, }, }, }, @@ -527,15 +255,9 @@ var ( APIGroups: []string{"storage.k8s.io"}, Resources: []string{"storageclasses"}, }, { - Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"storage-classes"}, - Resources: []string{"resources"}, - }, - { Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"storage/*"}, + APIGroups: []string{"resources.kubesphere.io"}, + Resources: []string{"storageclasses", "storageclasses/*"}, }, }, }, @@ -578,15 +300,13 @@ var ( Resources: []string{"nodes", "events"}, }, { - Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"nodes"}, - Resources: []string{"resources", "monitoring", "monitoring/*"}, + Verbs: []string{"get", "list"}, + APIGroups: []string{"resources.kubesphere.io"}, + Resources: []string{"nodes", "nodes/*"}, }, { - Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"pods"}, - Resources: []string{"resources"}, + Verbs: []string{"get", "list"}, + APIGroups: []string{"monitoring.kubesphere.io"}, + Resources: []string{"nodes"}, }, }, }, @@ -669,14 +389,9 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"list", "get"}, - APIGroups: []string{"kubesphere.io"}, + APIGroups: []string{"resources.kubesphere.io"}, Resources: []string{"components", "components/*"}, }, - { - Verbs: []string{"list", "get"}, - APIGroups: []string{""}, - Resources: []string{"pods"}, - }, }, }, }, @@ -726,12 +441,12 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{"rbac.authorization.k8s.io"}, + APIGroups: []string{"rbac.authorization.k8s.io", "resources.kubesphere.io"}, Resources: []string{"rolebindings"}, }, { Verbs: []string{"get", "list"}, - APIGroups: []string{"account.kubesphere.io"}, + APIGroups: []string{"iam.kubesphere.io"}, Resources: []string{"users"}, }, }, @@ -772,15 +487,9 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{"rbac.authorization.k8s.io"}, + APIGroups: []string{"rbac.authorization.k8s.io", "resources.kubesphere.io"}, Resources: []string{"roles"}, }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"roles"}, - Resources: []string{"resources"}, - }, }, }, {Name: "create", @@ -819,7 +528,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{"apps", "extensions"}, + APIGroups: []string{"apps", "extensions", "resources.kubesphere.io"}, Resources: []string{"deployments", "deployments/scale"}, }, { @@ -875,7 +584,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{"apps"}, + APIGroups: []string{"apps", "resources.kubesphere.io"}, Resources: []string{"statefulsets"}, }, { @@ -929,7 +638,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{"apps", "extensions"}, + APIGroups: []string{"apps", "extensions", "resources.kubesphere.io"}, Resources: []string{"daemonsets"}, }, { @@ -974,8 +683,17 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"pod/shell"}, + APIGroups: []string{"resources.kubesphere.io"}, + Resources: []string{"pod/terminal"}, + }, + }, + }, + {Name: "view", + Rules: []v1.PolicyRule{ + { + Verbs: []string{"get", "list"}, + APIGroups: []string{"resources.kubesphere.io"}, + Resources: []string{"pods"}, }, }, }, @@ -997,7 +715,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"list", "get"}, - APIGroups: []string{""}, + APIGroups: []string{"", "resources.kubesphere.io"}, Resources: []string{"services"}, }, }, @@ -1039,7 +757,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{"kubesphere.io"}, + APIGroups: []string{"resources.kubesphere.io"}, Resources: []string{"router"}, }, }, @@ -1048,7 +766,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"create"}, - APIGroups: []string{"kubesphere.io"}, + APIGroups: []string{"resources.kubesphere.io"}, Resources: []string{"router"}, }, }, @@ -1057,7 +775,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"update", "patch"}, - APIGroups: []string{"kubesphere.io"}, + APIGroups: []string{"resources.kubesphere.io"}, Resources: []string{"router"}, }, }, @@ -1066,7 +784,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"delete"}, - APIGroups: []string{"kubesphere.io"}, + APIGroups: []string{"resources.kubesphere.io"}, Resources: []string{"router"}, }, }, @@ -1081,7 +799,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{"extensions"}, + APIGroups: []string{"extensions", "resources.kubesphere.io"}, Resources: []string{"ingresses"}, }, }, @@ -1121,7 +839,7 @@ var ( Rules: []v1.PolicyRule{ { Verbs: []string{"get", "list"}, - APIGroups: []string{""}, + APIGroups: []string{"", "resources.kubesphere.io"}, Resources: []string{"persistentvolumeclaims"}, }, }, @@ -1160,10 +878,9 @@ var ( {Name: "view", Rules: []v1.PolicyRule{ { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"applications"}, - Resources: []string{"resources"}, + Verbs: []string{"get", "list"}, + APIGroups: []string{"resources.kubesphere.io"}, + Resources: []string{"applications"}, }, { Verbs: []string{"list"}, @@ -1203,7 +920,7 @@ var ( {Name: "view", Rules: []v1.PolicyRule{ { Verbs: []string{"view", "list"}, - APIGroups: []string{"batch"}, + APIGroups: []string{"batch", "resources.kubesphere.io"}, Resources: []string{"jobs"}, }, }}, @@ -1236,7 +953,7 @@ var ( {Name: "view", Rules: []v1.PolicyRule{ { Verbs: []string{"view", "list"}, - APIGroups: []string{"batch"}, + APIGroups: []string{"batch", "resources.kubesphere.io"}, Resources: []string{"cronjobs"}, }, }}, @@ -1269,7 +986,7 @@ var ( {Name: "view", Rules: []v1.PolicyRule{ { Verbs: []string{"view", "list"}, - APIGroups: []string{""}, + APIGroups: []string{"", "resources.kubesphere.io"}, Resources: []string{"secrets"}, }, }}, @@ -1302,7 +1019,7 @@ var ( {Name: "view", Rules: []v1.PolicyRule{ { Verbs: []string{"view", "list"}, - APIGroups: []string{""}, + APIGroups: []string{"", "resources.kubesphere.io"}, Resources: []string{"configmaps"}, }, }}, @@ -1331,3 +1048,16 @@ var ( }, } ) + +func GetClusterAction(module, action string) (models.Action, error) { + for _, rule := range ClusterRoleRuleMapping { + if rule.Name == module { + for _, act := range rule.Actions { + if act.Name == action { + return act, nil + } + } + } + } + return models.Action{}, fmt.Errorf("not found") +} diff --git a/pkg/models/log/constants.go b/pkg/models/log/constants.go index a184b6f64..cfb8efcce 100644 --- a/pkg/models/log/constants.go +++ b/pkg/models/log/constants.go @@ -27,4 +27,4 @@ const ( QueryLevelWorkload QueryLevelPod QueryLevelContainer -) \ No newline at end of file +) diff --git a/pkg/models/log/logcrd.go b/pkg/models/log/logcrd.go index 3abce8e7d..fa3d686c7 100644 --- a/pkg/models/log/logcrd.go +++ b/pkg/models/log/logcrd.go @@ -248,7 +248,7 @@ func FluentbitOutputInsert(output fb.OutputPlugin) *FluentbitOutputsResult { // 1. Update ConfigMap var outputs []fb.OutputPlugin outputs, err := GetFluentbitOutputFromConfigMap() - if err != nil { + if err != nil { // If the ConfigMap doesn't exist, a new one will be created later glog.Errorln(err) } diff --git a/pkg/models/metrics/metrics.go b/pkg/models/metrics/metrics.go index 12619b78b..356366bd3 100644 --- a/pkg/models/metrics/metrics.go +++ b/pkg/models/metrics/metrics.go @@ -1,18 +1,18 @@ /* -Copyright 2019 The KubeSphere Authors. + Copyright 2019 The KubeSphere Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ @@ -802,12 +802,12 @@ func MonitorOneWorkspaceStatistics(wsName string) *FormatedLevelMetric { }() go func() { - members, errMemb := workspaces.GetOrgMembers(wsName) + count, errMemb := workspaces.WorkspaceUserCount(wsName) if errMemb != nil { glog.Errorln(errMemb.Error()) } // add member metric - memberMetrics = getSpecificMetricItem(timestamp, MetricNameWorkspaceMemberCount, WorkspaceResourceKindMember, len(members), errMemb) + memberMetrics = getSpecificMetricItem(timestamp, MetricNameWorkspaceMemberCount, WorkspaceResourceKindMember, count, errMemb) wg.Done() }() diff --git a/pkg/models/metrics/util.go b/pkg/models/metrics/util.go index cdf440cd7..d36658ca3 100644 --- a/pkg/models/metrics/util.go +++ b/pkg/models/metrics/util.go @@ -1,17 +1,19 @@ /* -Copyright 2018 The KubeSphere Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Copyright 2019 The KubeSphere Authors. - http://www.apache.org/licenses/LICENSE-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ package metrics @@ -255,6 +257,7 @@ func ReformatJson(metric string, metricsName string, needAddParams map[string]st var formatMetric FormatedMetric err := jsonIter.Unmarshal([]byte(metric), &formatMetric) + if err != nil { glog.Errorln("Unmarshal metric json failed", err.Error(), metric) } diff --git a/pkg/models/quotas/quotas.go b/pkg/models/quotas/quotas.go index 256b25ddc..b1cc20087 100644 --- a/pkg/models/quotas/quotas.go +++ b/pkg/models/quotas/quotas.go @@ -55,9 +55,9 @@ func getUsage(namespace, resource string) (int, error) { var result *models.PageableResponse var err error if resource == resources.Namespaces || resource == resources.StorageClasses { - result, err = resources.ListClusterResource(resource, ¶ms.Conditions{}, "", false, 1, 0) + result, err = resources.ListResources("", resource, ¶ms.Conditions{}, "", false, 1, 0) } else { - result, err = resources.ListNamespaceResource(namespace, resource, ¶ms.Conditions{}, "", false, 1, 0) + result, err = resources.ListResources(namespace, resource, ¶ms.Conditions{}, "", false, 1, 0) } if err != nil { diff --git a/pkg/models/resources/clusterroles.go b/pkg/models/resources/clusterroles.go index c59c3d9b7..0c7b70d53 100644 --- a/pkg/models/resources/clusterroles.go +++ b/pkg/models/resources/clusterroles.go @@ -20,6 +20,7 @@ package resources import ( "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/params" + "kubesphere.io/kubesphere/pkg/utils/k8sutil" "sort" "strings" @@ -30,16 +31,34 @@ import ( type clusterRoleSearcher struct { } +func (*clusterRoleSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister().Get(name) +} + // exactly Match func (*clusterRoleSearcher) match(match map[string]string, item *rbac.ClusterRole) bool { for k, v := range match { switch k { + case ownerKind: + fallthrough + case ownerName: + kind := match[ownerKind] + name := match[ownerName] + if !k8sutil.IsControlledBy(item.OwnerReferences, kind, name) { + return false + } case name: if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -62,10 +81,6 @@ func (*clusterRoleSearcher) fuzzy(fuzzy map[string]string, item *rbac.ClusterRol return false } return false - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -77,7 +92,7 @@ func (*clusterRoleSearcher) fuzzy(fuzzy map[string]string, item *rbac.ClusterRol func (*clusterRoleSearcher) compare(a, b *rbac.ClusterRole, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough @@ -86,7 +101,7 @@ func (*clusterRoleSearcher) compare(a, b *rbac.ClusterRole, orderBy string) bool } } -func (s *clusterRoleSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { +func (s *clusterRoleSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { clusterRoles, err := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister().List(labels.Everything()) if err != nil { diff --git a/pkg/models/resources/configmaps.go b/pkg/models/resources/configmaps.go index 03cd328aa..f2f0a2c6c 100644 --- a/pkg/models/resources/configmaps.go +++ b/pkg/models/resources/configmaps.go @@ -30,6 +30,10 @@ import ( type configMapSearcher struct { } +func (*configMapSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Core().V1().ConfigMaps().Lister().ConfigMaps(namespace).Get(name) +} + // exactly Match func (*configMapSearcher) match(match map[string]string, item *v1.ConfigMap) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*configMapSearcher) match(match map[string]string, item *v1.ConfigMap) boo if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -66,10 +76,6 @@ func (*configMapSearcher) fuzzy(fuzzy map[string]string, item *v1.ConfigMap) boo if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -81,7 +87,7 @@ func (*configMapSearcher) fuzzy(fuzzy map[string]string, item *v1.ConfigMap) boo func (*configMapSearcher) compare(a, b *v1.ConfigMap, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/cronjobs.go b/pkg/models/resources/cronjobs.go index b41d5cbfe..e3e0a2f60 100644 --- a/pkg/models/resources/cronjobs.go +++ b/pkg/models/resources/cronjobs.go @@ -31,6 +31,10 @@ import ( type cronJobSearcher struct { } +func (*cronJobSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Batch().V1beta1().CronJobs().Lister().CronJobs(namespace).Get(name) +} + func cronJobStatus(item *v1beta1.CronJob) string { if item.Spec.Suspend != nil && *item.Spec.Suspend { return paused @@ -42,12 +46,22 @@ func cronJobStatus(item *v1beta1.CronJob) string { func (*cronJobSearcher) match(match map[string]string, item *v1beta1.CronJob) bool { for k, v := range match { switch k { + case name: + if item.Name != v && item.Labels[displayName] != v { + return false + } case status: if cronJobStatus(item) != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -74,10 +88,6 @@ func (*cronJobSearcher) fuzzy(fuzzy map[string]string, item *v1beta1.CronJob) bo if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -98,7 +108,7 @@ func (*cronJobSearcher) compare(a, b *v1beta1.CronJob, orderBy string) bool { return false } return a.Status.LastScheduleTime.Before(b.Status.LastScheduleTime) - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) default: fallthrough diff --git a/pkg/models/resources/daemonsets.go b/pkg/models/resources/daemonsets.go index 9abf8756a..df2241390 100644 --- a/pkg/models/resources/daemonsets.go +++ b/pkg/models/resources/daemonsets.go @@ -30,6 +30,10 @@ import ( type daemonSetSearcher struct { } +func (*daemonSetSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Apps().V1().DaemonSets().Lister().DaemonSets(namespace).Get(name) +} + func daemonSetStatus(item *v1.DaemonSet) string { if item.Status.NumberAvailable == 0 { return stopped @@ -48,8 +52,18 @@ func (*daemonSetSearcher) match(match map[string]string, item *v1.DaemonSet) boo if daemonSetStatus(item) != v { return false } + case name: + if item.Name != v && item.Labels[displayName] != v { + return false + } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -76,10 +90,6 @@ func (*daemonSetSearcher) fuzzy(fuzzy map[string]string, item *v1.DaemonSet) boo if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -92,7 +102,7 @@ func (*daemonSetSearcher) fuzzy(fuzzy map[string]string, item *v1.DaemonSet) boo func (*daemonSetSearcher) compare(a, b *v1.DaemonSet, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/deployments.go b/pkg/models/resources/deployments.go index 26616c022..3983b067b 100644 --- a/pkg/models/resources/deployments.go +++ b/pkg/models/resources/deployments.go @@ -31,6 +31,10 @@ import ( type deploymentSearcher struct { } +func (*deploymentSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Apps().V1().Deployments().Lister().Deployments(namespace).Get(name) +} + func deploymentStatus(item *v1.Deployment) string { if item.Spec.Replicas != nil { if item.Status.ReadyReplicas == 0 && *item.Spec.Replicas == 0 { @@ -52,8 +56,18 @@ func (*deploymentSearcher) match(match map[string]string, item *v1.Deployment) b if deploymentStatus(item) != v { return false } + case name: + if item.Name != v && item.Labels[displayName] != v { + return false + } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -80,10 +94,6 @@ func (*deploymentSearcher) fuzzy(fuzzy map[string]string, item *v1.Deployment) b if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -96,7 +106,7 @@ func (*deploymentSearcher) fuzzy(fuzzy map[string]string, item *v1.Deployment) b func (*deploymentSearcher) compare(a, b *v1.Deployment, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/ingresses.go b/pkg/models/resources/ingresses.go index 0213b439c..e39aef38f 100644 --- a/pkg/models/resources/ingresses.go +++ b/pkg/models/resources/ingresses.go @@ -31,6 +31,10 @@ import ( type ingressSearcher struct { } +func (*ingressSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Extensions().V1beta1().Ingresses().Lister().Ingresses(namespace).Get(name) +} + // exactly Match func (*ingressSearcher) match(match map[string]string, item *extensions.Ingress) bool { for k, v := range match { @@ -39,8 +43,14 @@ func (*ingressSearcher) match(match map[string]string, item *extensions.Ingress) if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -67,10 +77,6 @@ func (*ingressSearcher) fuzzy(fuzzy map[string]string, item *extensions.Ingress) if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -82,7 +88,7 @@ func (*ingressSearcher) fuzzy(fuzzy map[string]string, item *extensions.Ingress) func (*ingressSearcher) compare(a, b *extensions.Ingress, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/jobs.go b/pkg/models/resources/jobs.go index 0c690437a..1b47510d7 100644 --- a/pkg/models/resources/jobs.go +++ b/pkg/models/resources/jobs.go @@ -32,6 +32,10 @@ import ( type jobSearcher struct { } +func (*jobSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Batch().V1().Jobs().Lister().Jobs(namespace).Get(name) +} + func jobStatus(item *batchv1.Job) string { status := "" @@ -54,8 +58,18 @@ func (*jobSearcher) match(match map[string]string, item *batchv1.Job) bool { if jobStatus(item) != v { return false } + case name: + if item.Name != v && item.Labels[displayName] != v { + return false + } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -82,10 +96,6 @@ func (*jobSearcher) fuzzy(fuzzy map[string]string, item *batchv1.Job) bool { if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -111,6 +121,8 @@ func jobUpdateTime(item *batchv1.Job) time.Time { func (*jobSearcher) compare(a, b *batchv1.Job, orderBy string) bool { switch orderBy { + case CreateTime: + return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case updateTime: return jobUpdateTime(a).Before(jobUpdateTime(b)) case name: diff --git a/pkg/models/resources/namespaces.go b/pkg/models/resources/namespaces.go index d625e6bfd..76aa6c4e4 100644 --- a/pkg/models/resources/namespaces.go +++ b/pkg/models/resources/namespaces.go @@ -30,6 +30,10 @@ import ( type namespaceSearcher struct { } +func (*namespaceSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Core().V1().Namespaces().Lister().Get(name) +} + // exactly Match func (*namespaceSearcher) match(match map[string]string, item *v1.Namespace) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*namespaceSearcher) match(match map[string]string, item *v1.Namespace) boo if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -66,10 +76,6 @@ func (*namespaceSearcher) fuzzy(fuzzy map[string]string, item *v1.Namespace) boo if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -81,7 +87,7 @@ func (*namespaceSearcher) fuzzy(fuzzy map[string]string, item *v1.Namespace) boo func (*namespaceSearcher) compare(a, b *v1.Namespace, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough @@ -90,7 +96,7 @@ func (*namespaceSearcher) compare(a, b *v1.Namespace, orderBy string) bool { } } -func (s *namespaceSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { +func (s *namespaceSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { namespaces, err := informers.SharedInformerFactory().Core().V1().Namespaces().Lister().List(labels.Everything()) if err != nil { diff --git a/pkg/models/resources/nodes.go b/pkg/models/resources/nodes.go index 32d2230a0..af8366c9c 100644 --- a/pkg/models/resources/nodes.go +++ b/pkg/models/resources/nodes.go @@ -30,6 +30,10 @@ import ( type nodeSearcher struct { } +func (*nodeSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Core().V1().Nodes().Lister().Get(name) +} + // exactly Match func (*nodeSearcher) match(match map[string]string, item *v1.Node) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*nodeSearcher) match(match map[string]string, item *v1.Node) bool { if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -66,10 +76,6 @@ func (*nodeSearcher) fuzzy(fuzzy map[string]string, item *v1.Node) bool { if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -81,7 +87,7 @@ func (*nodeSearcher) fuzzy(fuzzy map[string]string, item *v1.Node) bool { func (*nodeSearcher) compare(a, b *v1.Node, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough @@ -90,7 +96,7 @@ func (*nodeSearcher) compare(a, b *v1.Node, orderBy string) bool { } } -func (s *nodeSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { +func (s *nodeSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { nodes, err := informers.SharedInformerFactory().Core().V1().Nodes().Lister().List(labels.Everything()) if err != nil { diff --git a/pkg/models/resources/persistentvolumeclaims.go b/pkg/models/resources/persistentvolumeclaims.go index 7932b581a..2d6790312 100644 --- a/pkg/models/resources/persistentvolumeclaims.go +++ b/pkg/models/resources/persistentvolumeclaims.go @@ -30,6 +30,10 @@ import ( type persistentVolumeClaimSearcher struct { } +func (*persistentVolumeClaimSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Core().V1().PersistentVolumeClaims().Lister().PersistentVolumeClaims(namespace).Get(name) +} + // exactly Match func (*persistentVolumeClaimSearcher) match(match map[string]string, item *v1.PersistentVolumeClaim) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*persistentVolumeClaimSearcher) match(match map[string]string, item *v1.Pe if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -66,10 +76,6 @@ func (*persistentVolumeClaimSearcher) fuzzy(fuzzy map[string]string, item *v1.Pe if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -81,7 +87,7 @@ func (*persistentVolumeClaimSearcher) fuzzy(fuzzy map[string]string, item *v1.Pe func (*persistentVolumeClaimSearcher) compare(a, b *v1.PersistentVolumeClaim, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/pods.go b/pkg/models/resources/pods.go index 2cf9ecf26..df95d97bb 100644 --- a/pkg/models/resources/pods.go +++ b/pkg/models/resources/pods.go @@ -31,25 +31,29 @@ import ( type podSearcher struct { } -func podBelongTo(item *v1.Pod, kind string, name string) bool { +func (*podSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Core().V1().Pods().Lister().Pods(namespace).Get(name) +} - if strings.EqualFold(kind, "Deployment") { +func podBelongTo(item *v1.Pod, kind string, name string) bool { + switch kind { + case "Deployment": if podBelongToDeployment(item, name) { return true } - } else if strings.EqualFold(kind, "ReplicaSet") { + case "ReplicaSet": if podBelongToReplicaSet(item, name) { return true } - } else if strings.EqualFold(kind, "DaemonSet") { + case "DaemonSet": if podBelongToDaemonSet(item, name) { return true } - } else if strings.EqualFold(kind, "StatefulSet") { + case "StatefulSet": if podBelongToStatefulSet(item, name) { return true } - } else if strings.EqualFold(kind, "Job") { + case "Job": if podBelongToJob(item, name) { return true } @@ -57,9 +61,9 @@ func podBelongTo(item *v1.Pod, kind string, name string) bool { return false } -func replicaSetBelongToDeployment(replicaSet *v12.ReplicaSet, name string) bool { +func replicaSetBelongToDeployment(replicaSet *v12.ReplicaSet, deploymentName string) bool { for _, owner := range replicaSet.OwnerReferences { - if owner.Kind == "Deployment" && owner.Name == name { + if owner.Kind == "Deployment" && owner.Name == deploymentName { return true } } @@ -84,38 +88,36 @@ func podBelongToJob(item *v1.Pod, name string) bool { return false } -func podBelongToReplicaSet(item *v1.Pod, name string) bool { +func podBelongToReplicaSet(item *v1.Pod, replicaSetName string) bool { for _, owner := range item.OwnerReferences { - if owner.Kind == "ReplicaSet" && owner.Name == name { + if owner.Kind == "ReplicaSet" && owner.Name == replicaSetName { return true } } return false } -func podBelongToStatefulSet(item *v1.Pod, name string) bool { - replicas, err := informers.SharedInformerFactory().Apps().V1().ReplicaSets().Lister().ReplicaSets(item.Namespace).List(labels.Everything()) - if err != nil { - return false - } - for _, r := range replicas { - if replicaSetBelongToDeployment(r, name) { - return podBelongToReplicaSet(item, r.Name) +func podBelongToStatefulSet(item *v1.Pod, statefulSetName string) bool { + for _, owner := range item.OwnerReferences { + if owner.Kind == "StatefulSet" && owner.Name == statefulSetName { + return true } } return false } -func podBelongToDeployment(item *v1.Pod, name string) bool { +func podBelongToDeployment(item *v1.Pod, deploymentName string) bool { replicas, err := informers.SharedInformerFactory().Apps().V1().ReplicaSets().Lister().ReplicaSets(item.Namespace).List(labels.Everything()) if err != nil { return false } + for _, r := range replicas { - if replicaSetBelongToDeployment(r, name) { - return podBelongToReplicaSet(item, r.Name) + if replicaSetBelongToDeployment(r, deploymentName) && podBelongToReplicaSet(item, r.Name) { + return true } } + return false } @@ -134,10 +136,10 @@ func podBelongToService(item *v1.Pod, serviceName string) bool { if err != nil { return false } - for k, v := range service.Spec.Selector { - if item.Labels[k] != v { - return false - } + + selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() + if !selector.Matches(labels.Set(item.Labels)) { + return false } return true } @@ -146,11 +148,11 @@ func podBelongToService(item *v1.Pod, serviceName string) bool { func (*podSearcher) match(match map[string]string, item *v1.Pod) bool { for k, v := range match { switch k { - case "ownerKind": + case ownerKind: fallthrough - case "ownerName": - kind := match["ownerKind"] - name := match["ownerName"] + case ownerName: + kind := match[ownerKind] + name := match[ownerName] if !podBelongTo(item, kind, name) { return false } @@ -170,6 +172,10 @@ func (*podSearcher) match(match map[string]string, item *v1.Pod) bool { if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: if item.Labels[k] != v { return false @@ -200,10 +206,6 @@ func (*podSearcher) fuzzy(fuzzy map[string]string, item *v1.Pod) bool { if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -215,7 +217,7 @@ func (*podSearcher) fuzzy(fuzzy map[string]string, item *v1.Pod) bool { func (*podSearcher) compare(a, b *v1.Pod, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/resources.go b/pkg/models/resources/resources.go index 1ffcf8a67..59575220b 100644 --- a/pkg/models/resources/resources.go +++ b/pkg/models/resources/resources.go @@ -21,39 +21,45 @@ import ( "fmt" "kubesphere.io/kubesphere/pkg/models" "kubesphere.io/kubesphere/pkg/params" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" "strings" ) func init() { - namespacedResources[ConfigMaps] = &configMapSearcher{} - namespacedResources[CronJobs] = &cronJobSearcher{} - namespacedResources[DaemonSets] = &daemonSetSearcher{} - namespacedResources[Deployments] = &deploymentSearcher{} - namespacedResources[Ingresses] = &ingressSearcher{} - namespacedResources[Jobs] = &jobSearcher{} - namespacedResources[PersistentVolumeClaims] = &persistentVolumeClaimSearcher{} - namespacedResources[Secrets] = &secretSearcher{} - namespacedResources[Services] = &serviceSearcher{} - namespacedResources[StatefulSets] = &statefulSetSearcher{} - namespacedResources[Pods] = &podSearcher{} - namespacedResources[Roles] = &roleSearcher{} - namespacedResources[S2iBuilders] = &s2iBuilderSearcher{} - namespacedResources[S2iRuns] = &s2iRunSearcher{} + resources[ConfigMaps] = &configMapSearcher{} + resources[CronJobs] = &cronJobSearcher{} + resources[DaemonSets] = &daemonSetSearcher{} + resources[Deployments] = &deploymentSearcher{} + resources[Ingresses] = &ingressSearcher{} + resources[Jobs] = &jobSearcher{} + resources[PersistentVolumeClaims] = &persistentVolumeClaimSearcher{} + resources[Secrets] = &secretSearcher{} + resources[Services] = &serviceSearcher{} + resources[StatefulSets] = &statefulSetSearcher{} + resources[Pods] = &podSearcher{} + resources[Roles] = &roleSearcher{} + resources[S2iBuilders] = &s2iBuilderSearcher{} + resources[S2iRuns] = &s2iRunSearcher{} - clusterResources[Nodes] = &nodeSearcher{} - clusterResources[Namespaces] = &namespaceSearcher{} - clusterResources[ClusterRoles] = &clusterRoleSearcher{} - clusterResources[StorageClasses] = &storageClassesSearcher{} - clusterResources[S2iBuilderTemplates] = &s2iBuilderTemplateSearcher{} + resources[Nodes] = &nodeSearcher{} + resources[Namespaces] = &namespaceSearcher{} + resources[ClusterRoles] = &clusterRoleSearcher{} + resources[StorageClasses] = &storageClassesSearcher{} + resources[S2iBuilderTemplates] = &s2iBuilderTemplateSearcher{} + resources[Workspaces] = &workspaceSearcher{} } -var namespacedResources = make(map[string]namespacedSearcherInterface) -var clusterResources = make(map[string]clusterSearcherInterface) +var ( + resources = make(map[string]resourceSearchInterface) + clusterResources = []string{Nodes, Workspaces, Namespaces, ClusterRoles, StorageClasses, S2iBuilderTemplates} +) const ( name = "name" label = "label" - createTime = "createTime" + ownerKind = "ownerKind" + ownerName = "ownerName" + CreateTime = "CreateTime" updateTime = "updateTime" lastScheduleTime = "lastScheduleTime" displayName = "displayName" @@ -72,6 +78,8 @@ const ( Deployments = "deployments" DaemonSets = "daemonsets" Roles = "roles" + Workspaces = "workspaces" + WorkspaceRoles = "workspaceroles" CronJobs = "cronjobs" ConfigMaps = "configmaps" Ingresses = "ingresses" @@ -90,72 +98,58 @@ const ( S2iRuns = "s2iruns" ) -type namespacedSearcherInterface interface { +type resourceSearchInterface interface { + get(namespace, name string) (interface{}, error) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) } -type clusterSearcherInterface interface { - search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) + +func ListResourcesByName(namespace, resource string, names []string) (*models.PageableResponse, error) { + items := make([]interface{}, 0) + if searcher, ok := resources[resource]; ok { + for _, name := range names { + item, err := searcher.get(namespace, name) + + if err != nil { + return nil, err + } + + items = append(items, item) + } + + } else { + return nil, fmt.Errorf("not found") + } + + return &models.PageableResponse{TotalCount: len(items), Items: items}, nil } -func ListNamespaceResource(namespace, resource string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { +func ListResources(namespace, resource string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { items := make([]interface{}, 0) - total := 0 var err error var result []interface{} - if searcher, ok := namespacedResources[resource]; ok { + // none namespace resource + if namespace != "" && sliceutil.HasString(clusterResources, resource) { + return nil, fmt.Errorf("not found") + } + + if searcher, ok := resources[resource]; ok { result, err = searcher.search(namespace, conditions, orderBy, reverse) } else { - return nil, fmt.Errorf("not support") + return nil, fmt.Errorf("not found") } if err != nil { return nil, err } - total = len(result) - for i, d := range result { if i >= offset && (limit == -1 || len(items) < limit) { items = append(items, d) } } - return &models.PageableResponse{TotalCount: total, Items: items}, nil -} - -func ListClusterResource(resource string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { - items := make([]interface{}, 0) - total := 0 - var err error - - if err != nil { - return nil, err - } - - var result []interface{} - - if searcher, ok := clusterResources[resource]; ok { - result, err = searcher.search(conditions, orderBy, reverse) - } else if searcher, ok := namespacedResources[resource]; ok { - result, err = searcher.search("", conditions, orderBy, reverse) - } else { - return nil, fmt.Errorf("not support") - } - - if err != nil { - return nil, err - } - - total = len(result) - - for i, d := range result { - if i >= offset && len(items) < limit { - items = append(items, d) - } - } - - return &models.PageableResponse{TotalCount: total, Items: items}, nil + return &models.PageableResponse{TotalCount: len(result), Items: items}, nil } func searchFuzzy(m map[string]string, key, value string) bool { diff --git a/pkg/models/resources/roles.go b/pkg/models/resources/roles.go index 7ee3bed37..221d42a20 100644 --- a/pkg/models/resources/roles.go +++ b/pkg/models/resources/roles.go @@ -30,6 +30,10 @@ import ( type roleSearcher struct { } +func (*roleSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Rbac().V1().Roles().Lister().Roles(namespace).Get(name) +} + // exactly Match func (*roleSearcher) match(match map[string]string, item *rbac.Role) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*roleSearcher) match(match map[string]string, item *rbac.Role) bool { if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -62,10 +72,6 @@ func (*roleSearcher) fuzzy(fuzzy map[string]string, item *rbac.Role) bool { return false } return false - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -77,7 +83,7 @@ func (*roleSearcher) fuzzy(fuzzy map[string]string, item *rbac.Role) bool { func (*roleSearcher) compare(a, b *rbac.Role, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/s2ibuilder.go b/pkg/models/resources/s2ibuilder.go index b5cfa137d..0a9f11a85 100644 --- a/pkg/models/resources/s2ibuilder.go +++ b/pkg/models/resources/s2ibuilder.go @@ -30,6 +30,10 @@ import ( type s2iBuilderSearcher struct { } +func (*s2iBuilderSearcher) get(namespace, name string) (interface{}, error) { + return informers.S2iSharedInformerFactory().Devops().V1alpha1().S2iBuilders().Lister().S2iBuilders(namespace).Get(name) +} + // exactly Match func (*s2iBuilderSearcher) match(match map[string]string, item *v1alpha1.S2iBuilder) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*s2iBuilderSearcher) match(match map[string]string, item *v1alpha1.S2iBuil if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -66,10 +76,6 @@ func (*s2iBuilderSearcher) fuzzy(fuzzy map[string]string, item *v1alpha1.S2iBuil if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -81,7 +87,7 @@ func (*s2iBuilderSearcher) fuzzy(fuzzy map[string]string, item *v1alpha1.S2iBuil func (*s2iBuilderSearcher) compare(a, b *v1alpha1.S2iBuilder, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/s2ibuildertemplate.go b/pkg/models/resources/s2ibuildertemplate.go index 9557a64fb..7140323b4 100644 --- a/pkg/models/resources/s2ibuildertemplate.go +++ b/pkg/models/resources/s2ibuildertemplate.go @@ -30,6 +30,10 @@ import ( type s2iBuilderTemplateSearcher struct { } +func (*s2iBuilderTemplateSearcher) get(namespace, name string) (interface{}, error) { + return informers.S2iSharedInformerFactory().Devops().V1alpha1().S2iBuilderTemplates().Lister().Get(name) +} + // exactly Match func (*s2iBuilderTemplateSearcher) match(match map[string]string, item *v1alpha1.S2iBuilderTemplate) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*s2iBuilderTemplateSearcher) match(match map[string]string, item *v1alpha1 if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -62,10 +72,6 @@ func (*s2iBuilderTemplateSearcher) fuzzy(fuzzy map[string]string, item *v1alpha1 return false } return false - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -77,7 +83,7 @@ func (*s2iBuilderTemplateSearcher) fuzzy(fuzzy map[string]string, item *v1alpha1 func (*s2iBuilderTemplateSearcher) compare(a, b *v1alpha1.S2iBuilderTemplate, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough @@ -86,7 +92,7 @@ func (*s2iBuilderTemplateSearcher) compare(a, b *v1alpha1.S2iBuilderTemplate, or } } -func (s *s2iBuilderTemplateSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { +func (s *s2iBuilderTemplateSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { builderTemplates, err := informers.S2iSharedInformerFactory().Devops().V1alpha1().S2iBuilderTemplates().Lister().List(labels.Everything()) if err != nil { diff --git a/pkg/models/resources/s2irun.go b/pkg/models/resources/s2irun.go index 8f36084ad..72f23ce93 100644 --- a/pkg/models/resources/s2irun.go +++ b/pkg/models/resources/s2irun.go @@ -33,6 +33,10 @@ import ( type s2iRunSearcher struct { } +func (*s2iRunSearcher) get(namespace, name string) (interface{}, error) { + return informers.S2iSharedInformerFactory().Devops().V1alpha1().S2iRuns().Lister().S2iRuns(namespace).Get(name) +} + // exactly Match func (*s2iRunSearcher) match(match map[string]string, item *v1alpha1.S2iRun) bool { for k, v := range match { @@ -42,11 +46,17 @@ func (*s2iRunSearcher) match(match map[string]string, item *v1alpha1.S2iRun) boo return false } case status: - if string(item.Status.RunState) != v{ + if string(item.Status.RunState) != v { + return false + } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { return false } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -73,10 +83,6 @@ func (*s2iRunSearcher) fuzzy(fuzzy map[string]string, item *v1alpha1.S2iRun) boo if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -88,7 +94,7 @@ func (*s2iRunSearcher) fuzzy(fuzzy map[string]string, item *v1alpha1.S2iRun) boo func (*s2iRunSearcher) compare(a, b *v1alpha1.S2iRun, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/secrets.go b/pkg/models/resources/secrets.go index 7654774a4..7eefd2f93 100644 --- a/pkg/models/resources/secrets.go +++ b/pkg/models/resources/secrets.go @@ -30,6 +30,10 @@ import ( type secretSearcher struct { } +func (*secretSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Core().V1().Secrets().Lister().Secrets(namespace).Get(name) +} + // exactly Match func (*secretSearcher) match(match map[string]string, item *v1.Secret) bool { for k, v := range match { @@ -42,8 +46,14 @@ func (*secretSearcher) match(match map[string]string, item *v1.Secret) bool { if string(item.Type) != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -70,10 +80,6 @@ func (*secretSearcher) fuzzy(fuzzy map[string]string, item *v1.Secret) bool { if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -85,7 +91,7 @@ func (*secretSearcher) fuzzy(fuzzy map[string]string, item *v1.Secret) bool { func (*secretSearcher) compare(a, b *v1.Secret, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/services.go b/pkg/models/resources/services.go index 05f99c591..b5dc7535f 100644 --- a/pkg/models/resources/services.go +++ b/pkg/models/resources/services.go @@ -30,6 +30,10 @@ import ( type serviceSearcher struct { } +func (*serviceSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Core().V1().Services().Lister().Services(namespace).Get(name) +} + // exactly Match func (*serviceSearcher) match(match map[string]string, item *v1.Service) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*serviceSearcher) match(match map[string]string, item *v1.Service) bool { if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -66,10 +76,6 @@ func (*serviceSearcher) fuzzy(fuzzy map[string]string, item *v1.Service) bool { if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { return false } - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -81,7 +87,7 @@ func (*serviceSearcher) fuzzy(fuzzy map[string]string, item *v1.Service) bool { func (*serviceSearcher) compare(a, b *v1.Service, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/statefulsets.go b/pkg/models/resources/statefulsets.go index 45f67f4ff..e3f9789d0 100644 --- a/pkg/models/resources/statefulsets.go +++ b/pkg/models/resources/statefulsets.go @@ -30,6 +30,10 @@ import ( type statefulSetSearcher struct { } +func (*statefulSetSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Apps().V1().StatefulSets().Lister().StatefulSets(namespace).Get(name) +} + func statefulSetStatus(item *v1.StatefulSet) string { if item.Spec.Replicas != nil { if item.Status.ReadyReplicas == 0 && *item.Spec.Replicas == 0 { @@ -52,7 +56,9 @@ func (*statefulSetSearcher) match(match map[string]string, item *v1.StatefulSet) return false } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -95,7 +101,7 @@ func (*statefulSetSearcher) fuzzy(fuzzy map[string]string, item *v1.StatefulSet) func (*statefulSetSearcher) compare(a, b *v1.StatefulSet, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough diff --git a/pkg/models/resources/storageclasses.go b/pkg/models/resources/storageclasses.go index c5466e40f..ba21aaef0 100644 --- a/pkg/models/resources/storageclasses.go +++ b/pkg/models/resources/storageclasses.go @@ -30,6 +30,10 @@ import ( type storageClassesSearcher struct { } +func (*storageClassesSearcher) get(namespace, name string) (interface{}, error) { + return informers.SharedInformerFactory().Storage().V1().StorageClasses().Lister().Get(name) +} + // exactly Match func (*storageClassesSearcher) match(match map[string]string, item *v1.StorageClass) bool { for k, v := range match { @@ -38,8 +42,14 @@ func (*storageClassesSearcher) match(match map[string]string, item *v1.StorageCl if item.Name != v && item.Labels[displayName] != v { return false } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } default: - return false + if item.Labels[k] != v { + return false + } } } return true @@ -62,10 +72,6 @@ func (*storageClassesSearcher) fuzzy(fuzzy map[string]string, item *v1.StorageCl return false } return false - case keyword: - if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { - return false - } default: if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { return false @@ -77,7 +83,7 @@ func (*storageClassesSearcher) fuzzy(fuzzy map[string]string, item *v1.StorageCl func (*storageClassesSearcher) compare(a, b *v1.StorageClass, orderBy string) bool { switch orderBy { - case createTime: + case CreateTime: return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) case name: fallthrough @@ -86,7 +92,7 @@ func (*storageClassesSearcher) compare(a, b *v1.StorageClass, orderBy string) bo } } -func (s *storageClassesSearcher) search(conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { +func (s *storageClassesSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { storageClasses, err := informers.SharedInformerFactory().Storage().V1().StorageClasses().Lister().List(labels.Everything()) if err != nil { diff --git a/pkg/models/resources/workspaces.go b/pkg/models/resources/workspaces.go new file mode 100644 index 000000000..41580b6a0 --- /dev/null +++ b/pkg/models/resources/workspaces.go @@ -0,0 +1,132 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package resources + +import ( + tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/params" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/labels" +) + +type workspaceSearcher struct { +} + +func (*workspaceSearcher) get(namespace, name string) (interface{}, error) { + return informers.KsSharedInformerFactory().Tenant().V1alpha1().Workspaces().Lister().Get(name) +} + +// exactly Match +func (*workspaceSearcher) match(match map[string]string, item *tenantv1alpha1.Workspace) bool { + for k, v := range match { + switch k { + case name: + if item.Name != v && item.Labels[displayName] != v { + return false + } + case keyword: + if !strings.Contains(item.Name, v) && !searchFuzzy(item.Labels, "", v) && !searchFuzzy(item.Annotations, "", v) { + return false + } + default: + if item.Labels[k] != v { + return false + } + } + } + return true +} + +// Fuzzy searchInNamespace +func (*workspaceSearcher) fuzzy(fuzzy map[string]string, item *tenantv1alpha1.Workspace) bool { + for k, v := range fuzzy { + switch k { + case name: + if !strings.Contains(item.Name, v) && !strings.Contains(item.Labels[displayName], v) { + return false + } + case label: + if !searchFuzzy(item.Labels, "", v) { + return false + } + case annotation: + if !searchFuzzy(item.Annotations, "", v) { + return false + } + return false + case app: + if !strings.Contains(item.Labels[chart], v) && !strings.Contains(item.Labels[release], v) { + return false + } + default: + if !searchFuzzy(item.Labels, k, v) && !searchFuzzy(item.Annotations, k, v) { + return false + } + } + } + return true +} + +func (*workspaceSearcher) compare(a, b *tenantv1alpha1.Workspace, orderBy string) bool { + switch orderBy { + case CreateTime: + return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) + case name: + fallthrough + default: + return strings.Compare(a.Name, b.Name) <= 0 + } +} + +func (s *workspaceSearcher) search(namespace string, conditions *params.Conditions, orderBy string, reverse bool) ([]interface{}, error) { + + workspaces, err := informers.KsSharedInformerFactory().Tenant().V1alpha1().Workspaces().Lister().List(labels.Everything()) + + if err != nil { + return nil, err + } + + result := make([]*tenantv1alpha1.Workspace, 0) + + if len(conditions.Match) == 0 && len(conditions.Fuzzy) == 0 { + result = workspaces + } else { + for _, item := range workspaces { + if s.match(conditions.Match, item) && s.fuzzy(conditions.Fuzzy, item) { + result = append(result, item) + } + } + } + sort.Slice(result, func(i, j int) bool { + if reverse { + tmp := i + i = j + j = tmp + } + return s.compare(result[i], result[j], orderBy) + }) + + r := make([]interface{}, 0) + for _, i := range result { + r = append(r, i) + } + return r, nil +} diff --git a/pkg/models/routers/routers.go b/pkg/models/routers/routers.go index 34fb4a3e4..9f85c4777 100644 --- a/pkg/models/routers/routers.go +++ b/pkg/models/routers/routers.go @@ -32,12 +32,9 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/api/rbac/v1" - "strings" "kubesphere.io/kubesphere/pkg/constants" - "kubesphere.io/kubesphere/pkg/models/iam" ) func GetAllRouters() ([]*corev1.Service, error) { @@ -54,39 +51,6 @@ func GetAllRouters() ([]*corev1.Service, error) { return services, nil } -func GetAllRoutersOfUser(username string) ([]*corev1.Service, error) { - allNamespace, namespaces, err := iam.GetUserNamespaces(username, v1.PolicyRule{ - Verbs: []string{"get", "list"}, - APIGroups: []string{""}, - Resources: []string{"services"}, - }) - - // return by cluster role - if err != nil { - glog.Error(err) - return nil, err - } - - if allNamespace { - return GetAllRouters() - } - - routers := make([]*corev1.Service, 0) - - for _, namespace := range namespaces { - router, err := GetRouter(namespace) - if err != nil { - glog.Error(err) - return routers, err - } else if router != nil { - routers = append(routers, router) - } - } - - return routers, nil - -} - // Get router from a namespace func GetRouter(namespace string) (*corev1.Service, error) { serviceName := constants.IngressControllerPrefix + namespace diff --git a/pkg/models/status/status.go b/pkg/models/status/status.go index 28c1896c1..c726a408c 100644 --- a/pkg/models/status/status.go +++ b/pkg/models/status/status.go @@ -40,7 +40,7 @@ func GetNamespacesResourceStatus(namespace string) (*workLoadStatus, error) { notReadyStatus = "pending" } - notReadyList, err = resources.ListNamespaceResource(namespace, resource, ¶ms.Conditions{Match: map[string]string{"status": notReadyStatus}}, "", false, -1, 0) + notReadyList, err = resources.ListResources(namespace, resource, ¶ms.Conditions{Match: map[string]string{"status": notReadyStatus}}, "", false, -1, 0) if err != nil { return nil, err diff --git a/pkg/models/tenant/devops.go b/pkg/models/tenant/devops.go new file mode 100644 index 000000000..5c68dc56f --- /dev/null +++ b/pkg/models/tenant/devops.go @@ -0,0 +1,119 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package tenant + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "kubesphere.io/kubesphere/pkg/constants" + kserr "kubesphere.io/kubesphere/pkg/errors" + "kubesphere.io/kubesphere/pkg/models" + "kubesphere.io/kubesphere/pkg/params" + "kubesphere.io/kubesphere/pkg/simple/client/mysql" + "net/http" + "sort" + "strings" +) + +func ListDevopsProjects(workspace, username string, conditions *params.Conditions, orderBy string, reverse bool, limit int, offset int) (*models.PageableResponse, error) { + + db := mysql.Client() + + var workspaceDOPBindings []models.WorkspaceDPBinding + + if err := db.Where("workspace = ?", workspace).Find(&workspaceDOPBindings).Error; err != nil { + return nil, err + } + + devOpsProjects := make([]models.DevopsProject, 0) + + request, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/api/v1alpha/projects", constants.DevopsAPIServer), nil) + request.Header.Add(constants.UserNameHeader, username) + + resp, err := http.DefaultClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + if resp.StatusCode > 200 { + return nil, kserr.Parse(data) + } + + err = json.Unmarshal(data, &devOpsProjects) + + if err != nil { + return nil, err + } + + if keyword := conditions.Match["keyword"]; keyword != "" { + for i := 0; i < len(devOpsProjects); i++ { + if !strings.Contains(devOpsProjects[i].Name, keyword) { + devOpsProjects = append(devOpsProjects[:i], devOpsProjects[i+1:]...) + i-- + } + } + } + + sort.Slice(devOpsProjects, func(i, j int) bool { + switch orderBy { + case "name": + if reverse { + return devOpsProjects[i].Name < devOpsProjects[j].Name + } else { + return devOpsProjects[i].Name > devOpsProjects[j].Name + } + default: + if reverse { + return devOpsProjects[i].CreateTime.After(*devOpsProjects[j].CreateTime) + } else { + return devOpsProjects[i].CreateTime.Before(*devOpsProjects[j].CreateTime) + } + } + }) + + for i := 0; i < len(devOpsProjects); i++ { + inWorkspace := false + + for _, binding := range workspaceDOPBindings { + if binding.DevOpsProject == *devOpsProjects[i].ProjectId { + inWorkspace = true + } + } + if !inWorkspace { + devOpsProjects = append(devOpsProjects[:i], devOpsProjects[i+1:]...) + i-- + } + } + + // limit offset + result := make([]interface{}, 0) + for i, v := range devOpsProjects { + if len(result) < limit && i >= offset { + result = append(result, v) + } + } + + return &models.PageableResponse{Items: result, TotalCount: len(devOpsProjects)}, nil +} diff --git a/pkg/models/tenant/namespaces.go b/pkg/models/tenant/namespaces.go new file mode 100644 index 000000000..77758a778 --- /dev/null +++ b/pkg/models/tenant/namespaces.go @@ -0,0 +1,134 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package tenant + +import ( + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/labels" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models/iam" + "kubesphere.io/kubesphere/pkg/params" + "sort" + "strings" +) + +type namespaceSearcher struct { +} + +// Exactly Match +func (*namespaceSearcher) match(match map[string]string, item *v1.Namespace) bool { + for k, v := range match { + switch k { + default: + if item.Labels[k] != v { + return false + } + } + } + return true +} + +func (*namespaceSearcher) fuzzy(fuzzy map[string]string, item *v1.Namespace) bool { + + for k, v := range fuzzy { + switch k { + case "name": + if !strings.Contains(item.Name, v) && !strings.Contains(item.Labels["displayName"], v) { + return false + } + default: + return false + } + } + + return true +} + +func (*namespaceSearcher) compare(a, b *v1.Namespace, orderBy string) bool { + switch orderBy { + case "createTime": + return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) + case "name": + fallthrough + default: + return strings.Compare(a.Name, b.Name) <= 0 + } +} + +func (*namespaceSearcher) GetNamespaces(username string) ([]*v1.Namespace, error) { + + roles, err := iam.GetUserRoles("", username) + + if err != nil { + return nil, err + } + + namespaces := make([]*v1.Namespace, 0) + namespaceLister := informers.SharedInformerFactory().Core().V1().Namespaces().Lister() + for _, role := range roles { + namespace, err := namespaceLister.Get(role.Namespace) + if err != nil { + return nil, err + } + namespaces = append(namespaces, namespace) + } + + return namespaces, nil +} + +func (s *namespaceSearcher) search(username string, conditions *params.Conditions, orderBy string, reverse bool) ([]*v1.Namespace, error) { + + rules, err := iam.GetUserClusterRules(username) + + if err != nil { + return nil, err + } + + namespaces := make([]*v1.Namespace, 0) + + if iam.RulesMatchesRequired(rules, rbacv1.PolicyRule{Verbs: []string{"list"}, APIGroups: []string{"tenant.kubesphere.io"}, Resources: []string{"namespaces"}}) { + namespaces, err = informers.SharedInformerFactory().Core().V1().Namespaces().Lister().List(labels.Everything()) + } else { + namespaces, err = s.GetNamespaces(username) + } + + if err != nil { + return nil, err + } + + result := make([]*v1.Namespace, 0) + + for _, namespace := range namespaces { + if s.match(conditions.Match, namespace) && s.fuzzy(conditions.Fuzzy, namespace) { + result = append(result, namespace) + } + } + + // order & reverse + sort.Slice(result, func(i, j int) bool { + if reverse { + tmp := i + i = j + j = tmp + } + return s.compare(result[i], result[j], orderBy) + }) + + return result, nil +} diff --git a/pkg/models/tenant/tenant.go b/pkg/models/tenant/tenant.go new file mode 100644 index 000000000..e7bd311c4 --- /dev/null +++ b/pkg/models/tenant/tenant.go @@ -0,0 +1,103 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package tenant + +import ( + "k8s.io/api/core/v1" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/models" + ws "kubesphere.io/kubesphere/pkg/models/workspaces" + "kubesphere.io/kubesphere/pkg/params" + "kubesphere.io/kubesphere/pkg/simple/client/k8s" + "strconv" +) + +var ( + workspaces = workspaceSearcher{} + namespaces = namespaceSearcher{} +) + +func CreateNamespace(workspaceName string, namespace *v1.Namespace, username string) (*v1.Namespace, error) { + if namespace.Labels == nil { + namespace.Labels = make(map[string]string, 0) + } + if username != "" { + namespace.Labels[constants.CreatorLabelKey] = username + } + + namespace.Labels[constants.WorkspaceLabelKey] = workspaceName + + return k8s.Client().CoreV1().Namespaces().Create(namespace) +} + +func ListWorkspaces(username string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { + + workspaces, err := workspaces.search(username, conditions, orderBy, reverse) + + if err != nil { + return nil, err + } + + // limit offset + result := make([]interface{}, 0) + for i, workspace := range workspaces { + if len(result) < limit && i >= offset { + workspace := workspace.DeepCopy() + ns, err := ListNamespaces(username, ¶ms.Conditions{Match: map[string]string{"kubesphere.io/workspace": workspace.Name}}, "", false, 1, 0) + if err != nil { + return nil, err + } + if workspace.Annotations == nil { + workspace.Annotations = make(map[string]string) + } + workspace.Annotations["kubesphere.io/namespace-count"] = strconv.Itoa(ns.TotalCount) + devops, err := ListDevopsProjects(workspace.Name, username, ¶ms.Conditions{}, "", false, 1, 0) + if err != nil { + return nil, err + } + workspace.Annotations["kubesphere.io/devops-count"] = strconv.Itoa(devops.TotalCount) + userCount, err := ws.WorkspaceUserCount(workspace.Name) + if err != nil { + return nil, err + } + workspace.Annotations["kubesphere.io/member-count"] = strconv.Itoa(userCount) + result = append(result, workspace) + } + } + + return &models.PageableResponse{Items: result, TotalCount: len(workspaces)}, nil +} + +func ListNamespaces(username string, conditions *params.Conditions, orderBy string, reverse bool, limit, offset int) (*models.PageableResponse, error) { + + namespaces, err := namespaces.search(username, conditions, orderBy, reverse) + + if err != nil { + return nil, err + } + + // limit offset + result := make([]interface{}, 0) + for i, v := range namespaces { + if len(result) < limit && i >= offset { + result = append(result, v) + } + } + + return &models.PageableResponse{Items: result, TotalCount: len(namespaces)}, nil +} diff --git a/pkg/models/tenant/workspaces.go b/pkg/models/tenant/workspaces.go new file mode 100644 index 000000000..1ed84781a --- /dev/null +++ b/pkg/models/tenant/workspaces.go @@ -0,0 +1,130 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package tenant + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/labels" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/constants" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/models/iam" + "kubesphere.io/kubesphere/pkg/params" + "sort" + "strings" +) + +type workspaceSearcher struct { +} + +// Exactly Match +func (*workspaceSearcher) match(match map[string]string, item *v1alpha1.Workspace) bool { + for k, v := range match { + switch k { + case "name": + if item.Name != v && item.Labels[constants.DisplayNameLabelKey] != v { + return false + } + default: + if item.Labels[k] != v { + return false + } + } + } + return true +} + +func (*workspaceSearcher) fuzzy(fuzzy map[string]string, item *v1alpha1.Workspace) bool { + + for k, v := range fuzzy { + switch k { + case "name": + if !strings.Contains(item.Name, v) && !strings.Contains(item.Labels["displayName"], v) { + return false + } + default: + return false + } + } + + return true +} + +func (*workspaceSearcher) compare(a, b *v1alpha1.Workspace, orderBy string) bool { + switch orderBy { + case "createTime": + return a.CreationTimestamp.Time.Before(b.CreationTimestamp.Time) + case "name": + fallthrough + default: + return strings.Compare(a.Name, b.Name) <= 0 + } +} + +func (s *workspaceSearcher) search(username string, conditions *params.Conditions, orderBy string, reverse bool) ([]*v1alpha1.Workspace, error) { + rules, err := iam.GetUserClusterRules(username) + + if err != nil { + return nil, err + } + + workspaces := make([]*v1alpha1.Workspace, 0) + + if iam.RulesMatchesRequired(rules, rbacv1.PolicyRule{Verbs: []string{"list"}, APIGroups: []string{"tenant.kubesphere.io"}, Resources: []string{"workspaces"}}) { + workspaces, err = informers.KsSharedInformerFactory().Tenant().V1alpha1().Workspaces().Lister().List(labels.Everything()) + if err != nil { + return nil, err + } + } else { + workspaceRoles, err := iam.GetUserWorkspaceRoleMap(username) + if err != nil { + return nil, err + } + for k := range workspaceRoles { + workspace, err := informers.KsSharedInformerFactory().Tenant().V1alpha1().Workspaces().Lister().Get(k) + if err != nil { + return nil, err + } + workspaces = append(workspaces, workspace) + } + } + + result := make([]*v1alpha1.Workspace, 0) + + for _, workspace := range workspaces { + if s.match(conditions.Match, workspace) && s.fuzzy(conditions.Fuzzy, workspace) { + result = append(result, workspace) + } + } + + // order & reverse + sort.Slice(result, func(i, j int) bool { + if reverse { + tmp := i + i = j + j = tmp + } + return s.compare(result[i], result[j], orderBy) + }) + + return result, nil +} + +func GetWorkspace(workspaceName string) (*v1alpha1.Workspace, error) { + return informers.KsSharedInformerFactory().Tenant().V1alpha1().Workspaces().Lister().Get(workspaceName) +} diff --git a/pkg/models/terminal/terminal.go b/pkg/models/terminal/terminal.go new file mode 100644 index 000000000..6a665ddc9 --- /dev/null +++ b/pkg/models/terminal/terminal.go @@ -0,0 +1,301 @@ +// Copyright 2018 The Kubesphere Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// the code is mainly from: +// https://github.com/kubernetes/dashboard/blob/master/src/app/backend/handler/terminal.go +// thanks to the related developer + +package terminal + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/golang/glog" + "gopkg.in/igm/sockjs-go.v2/sockjs" + "io" + "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/remotecommand" + "kubesphere.io/kubesphere/pkg/simple/client/k8s" +) + +// PtyHandler is what remotecommand expects from a pty +type PtyHandler interface { + io.Reader + io.Writer + remotecommand.TerminalSizeQueue +} + +// TerminalSession implements PtyHandler (using a SockJS connection) +type TerminalSession struct { + id string + bound chan error + sockJSSession sockjs.Session + sizeChan chan remotecommand.TerminalSize +} + +// TerminalMessage is the messaging protocol between ShellController and TerminalSession. +// +// OP DIRECTION FIELD(S) USED DESCRIPTION +// --------------------------------------------------------------------- +// bind fe->be SessionID Id sent back from TerminalResponse +// stdin fe->be Data Keystrokes/paste buffer +// resize fe->be Rows, Cols New terminal size +// stdout be->fe Data Output from the process +// toast be->fe Data OOB message to be shown to the user +type TerminalMessage struct { + Op, Data, SessionID string + Rows, Cols uint16 +} + +// TerminalSize handles pty->process resize events +// Called in a loop from remotecommand as long as the process is running +func (t TerminalSession) Next() *remotecommand.TerminalSize { + select { + case size := <-t.sizeChan: + return &size + } +} + +// Read handles pty->process messages (stdin, resize) +// Called in a loop from remotecommand as long as the process is running +func (t TerminalSession) Read(p []byte) (int, error) { + m, err := t.sockJSSession.Recv() + if err != nil { + return 0, err + } + + var msg TerminalMessage + if err := json.Unmarshal([]byte(m), &msg); err != nil { + return 0, err + } + + switch msg.Op { + case "stdin": + return copy(p, msg.Data), nil + case "resize": + t.sizeChan <- remotecommand.TerminalSize{Width: msg.Cols, Height: msg.Rows} + return 0, nil + default: + return 0, fmt.Errorf("unknown message type '%s'", msg.Op) + } +} + +// Write handles process->pty stdout +// Called from remotecommand whenever there is any output +func (t TerminalSession) Write(p []byte) (int, error) { + msg, err := json.Marshal(TerminalMessage{ + Op: "stdout", + Data: string(p), + }) + if err != nil { + return 0, err + } + + if err = t.sockJSSession.Send(string(msg)); err != nil { + return 0, err + } + return len(p), nil +} + +// Toast can be used to send the user any OOB messages +// hterm puts these in the center of the terminal +func (t TerminalSession) Toast(p string) error { + msg, err := json.Marshal(TerminalMessage{ + Op: "toast", + Data: p, + }) + if err != nil { + return err + } + + if err = t.sockJSSession.Send(string(msg)); err != nil { + return err + } + return nil +} + +// Close shuts down the SockJS connection and sends the status code and reason to the client +// Can happen if the process exits or if there is an error starting up the process +// For now the status code is unused and reason is shown to the user (unless "") +func (t TerminalSession) Close(status uint32, reason string) { + t.sockJSSession.Close(status, reason) +} + +// terminalSessions stores a map of all TerminalSession objects +// FIXME: this structure needs locking +var terminalSessions = make(map[string]TerminalSession) + +// handleTerminalSession is Called by net/http for any new /api/sockjs connections +func HandleTerminalSession(session sockjs.Session) { + glog.Infof("handleTerminalSession, ID:%s", session.ID()) + var ( + buf string + err error + msg TerminalMessage + terminalSession TerminalSession + ok bool + ) + + if buf, err = session.Recv(); err != nil { + glog.Errorf("handleTerminalSession: can't Recv: %v", err) + return + } + + if err = json.Unmarshal([]byte(buf), &msg); err != nil { + glog.Errorf("handleTerminalSession: can't UnMarshal (%v): %s", err, buf) + return + } + + if msg.Op != "bind" { + glog.Errorf("handleTerminalSession: expected 'bind' message, got: %s", buf) + return + } + + if terminalSession, ok = terminalSessions[msg.SessionID]; !ok { + glog.Errorf("handleTerminalSession: can't find session '%s'", msg.SessionID) + return + } + + terminalSession.sockJSSession = session + terminalSessions[msg.SessionID] = terminalSession + terminalSession.bound <- nil +} + +// startProcess is called by handleAttach +// Executed cmd in the container specified in request and connects it up with the ptyHandler (a session) +func startProcess(namespace, podName, containerName string, cmd []string, ptyHandler PtyHandler) error { + + k8sClient := k8s.Client() + cfg, err := k8s.Config() + if err != nil { + return err + } + + req := k8sClient.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(podName). + Namespace(namespace). + SubResource("exec") + req.VersionedParams(&v1.PodExecOptions{ + Container: containerName, + Command: cmd, + Stdin: true, + Stdout: true, + Stderr: true, + TTY: true, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(cfg, "POST", req.URL()) + if err != nil { + return err + } + + err = exec.Stream(remotecommand.StreamOptions{ + Stdin: ptyHandler, + Stdout: ptyHandler, + Stderr: ptyHandler, + TerminalSizeQueue: ptyHandler, + Tty: true, + }) + if err != nil { + return err + } + + return nil +} + +// genTerminalSessionId generates a random session ID string. The format is not really interesting. +// This ID is used to identify the session when the client opens the SockJS connection. +// Not the same as the SockJS session id! We can't use that as that is generated +// on the client side and we don't have it yet at this point. +func genTerminalSessionId() (string, error) { + + bytes := make([]byte, 16) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + id := make([]byte, hex.EncodedLen(len(bytes))) + hex.Encode(id, bytes) + glog.Infof("genTerminalSessionId, id:" + string(id)) + return string(id), nil +} + +// isValidShell checks if the shell is an allowed one +func isValidShell(validShells []string, shell string) bool { + for _, validShell := range validShells { + if validShell == shell { + return true + } + } + return false +} + +// WaitingForConnection is called from apihandler.handleAttach as a goroutine +// Waits for the SockJS connection to be opened by the client the session to be bound in handleTerminalSession +func WaitingForConnection(shell string, namespace, podName, containerName string, sessionId string) { + glog.Infof("WaitingForConnection, ID:%s", sessionId) + session := terminalSessions[sessionId] + select { + case <-session.bound: + close(session.bound) + defer delete(terminalSessions, sessionId) + var err error + validShells := []string{"sh", "bash"} + + if isValidShell(validShells, shell) { + cmd := []string{shell} + err = startProcess(namespace, podName, containerName, cmd, session) + } else { + // No shell given or it was not valid: try some shells until one succeeds or all fail + // FIXME: if the first shell fails then the first keyboard event is lost + for _, testShell := range validShells { + cmd := []string{testShell} + if err = startProcess(namespace, podName, containerName, cmd, session); err == nil { + break + } + } + } + + if err != nil { + session.Close(2, err.Error()) + return + } + + session.Close(1, "Process exited") + } +} + +func NewSession(shell, namespace, podName, containerName string) (string, error) { + sessionId, err := genTerminalSessionId() + if err != nil { + return "", err + } + + terminalSessions[sessionId] = TerminalSession{ + id: sessionId, + bound: make(chan error), + sizeChan: make(chan remotecommand.TerminalSize), + } + + if err != nil { + return "", err + } + + go WaitingForConnection(shell, namespace, podName, containerName, sessionId) + + return sessionId, nil +} diff --git a/pkg/models/types.go b/pkg/models/types.go index 160be8b30..e59897597 100644 --- a/pkg/models/types.go +++ b/pkg/models/types.go @@ -36,15 +36,6 @@ type Workspace struct { DevopsProjects []string `json:"devops_projects"` } -type UserInvite struct { - Username string `json:"username"` - Role string `json:"role"` -} - -func (g Group) GetCreateTime() (time.Time, error) { - return time.Parse("2006-01-02T15:04:05Z", g.CreateTime) -} - type WorkspaceDPBinding struct { Workspace string `gorm:"primary_key"` DevOpsProject string `gorm:"primary_key"` @@ -76,27 +67,23 @@ type SimpleRule struct { } type User struct { - Username string `json:"username"` - //UID string `json:"uid"` - Groups []string `json:"groups,omitempty"` - Password string `json:"password,omitempty"` - CurrentPassword string `json:"current_password,omitempty"` - //Extra map[string]interface{} `json:"extra"` - AvatarUrl string `json:"avatar_url"` - Description string `json:"description"` - Email string `json:"email"` - LastLoginTime string `json:"last_login_time"` - Status int `json:"status"` - ClusterRole string `json:"cluster_role"` - ClusterRules []SimpleRule `json:"cluster_rules"` - Roles map[string]string `json:"roles,omitempty"` - Rules map[string][]SimpleRule `json:"rules,omitempty"` - Role string `json:"role,omitempty"` - RoleBinding string `json:"role_binding,omitempty"` - Lang string `json:"lang,omitempty"` - WorkspaceRoles map[string]string `json:"workspace_roles,omitempty"` - WorkspaceRole string `json:"workspace_role,omitempty"` - WorkspaceRules map[string][]SimpleRule `json:"workspace_rules,omitempty"` + Username string `json:"username"` + Email string `json:"email"` + Lang string `json:"lang,omitempty"` + Description string `json:"description"` + CreateTime time.Time `json:"create_time"` + Groups []string `json:"groups,omitempty"` + Password string `json:"password,omitempty"` + CurrentPassword string `json:"current_password,omitempty"` + AvatarUrl string `json:"avatar_url"` + LastLoginTime string `json:"last_login_time"` + Status int `json:"status"` + ClusterRole string `json:"cluster_role"` + Roles map[string]string `json:"roles,omitempty"` + Role string `json:"role,omitempty"` + RoleBinding string `json:"role_binding,omitempty"` + RoleBindTime *time.Time `json:"role_bind_time,omitempty"` + WorkspaceRole string `json:"workspace_role,omitempty"` } type Group struct { @@ -105,8 +92,6 @@ type Group struct { Gid string `json:"gid"` Members []string `json:"members"` Logo string `json:"logo"` - Creator string `json:"creator"` - CreateTime string `json:"create_time"` ChildGroups []string `json:"child_groups"` Description string `json:"description"` } diff --git a/pkg/models/workspaces/workspaces.go b/pkg/models/workspaces/workspaces.go index 2633af192..66e060b35 100644 --- a/pkg/models/workspaces/workspaces.go +++ b/pkg/models/workspaces/workspaces.go @@ -22,9 +22,15 @@ import ( "encoding/json" "fmt" "io/ioutil" + "k8s.io/apimachinery/pkg/runtime/schema" + "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" + "kubesphere.io/kubesphere/pkg/models/resources" + "kubesphere.io/kubesphere/pkg/params" "kubesphere.io/kubesphere/pkg/simple/client/k8s" "kubesphere.io/kubesphere/pkg/simple/client/ldap" "kubesphere.io/kubesphere/pkg/simple/client/mysql" + "kubesphere.io/kubesphere/pkg/utils/k8sutil" + "kubesphere.io/kubesphere/pkg/utils/sliceutil" "net/http" "kubesphere.io/kubesphere/pkg/constants" @@ -32,22 +38,17 @@ import ( "kubesphere.io/kubesphere/pkg/models" "kubesphere.io/kubesphere/pkg/models/iam" - "log" "strings" "github.com/jinzhu/gorm" core "k8s.io/api/core/v1" "errors" - "regexp" - + "github.com/golang/glog" "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/kubernetes/pkg/util/slice" - - "github.com/golang/glog" "sort" @@ -128,21 +129,13 @@ func CreateDevopsProject(username string, workspace string, devops models.Devops } func createDefaultDevopsRoleBinding(workspace string, project models.DevopsProject) error { - admins, err := iam.GetWorkspaceUsers(workspace, constants.WorkspaceAdmin) - - if err != nil { - return err - } + admins := []string{""} for _, admin := range admins { createDevopsRoleBinding(workspace, *project.ProjectId, admin, constants.DevopsOwner) } - viewers, err := iam.GetWorkspaceUsers(workspace, constants.WorkspaceViewer) - - if err != nil { - return err - } + viewers := []string{""} for _, viewer := range viewers { createDevopsRoleBinding(workspace, *project.ProjectId, viewer, constants.DevopsReporter) @@ -151,33 +144,6 @@ func createDefaultDevopsRoleBinding(workspace string, project models.DevopsProje return nil } -func deleteDevopsRoleBinding(workspace string, projectId string, user string) { - projects := make([]string, 0) - - if projectId != "" { - projects = append(projects, projectId) - } else { - p, err := GetDevOpsProjects(workspace) - if err != nil { - glog.Warning("delete devops role binding failed", workspace, projectId, user) - return - } - projects = append(projects, p...) - } - - for _, project := range projects { - request, _ := http.NewRequest(http.MethodDelete, fmt.Sprintf("http://%s/api/v1alpha/projects/%s/members/%s", constants.DevopsAPIServer, project, user), nil) - request.Header.Add("X-Token-Username", "admin") - resp, err := http.DefaultClient.Do(request) - if err != nil || resp.StatusCode > 200 { - glog.Warning("delete devops role binding failed", workspace, project, user) - } - if resp != nil { - resp.Body.Close() - } - } -} - func createDevopsRoleBinding(workspace string, projectId string, user string, role string) { projects := make([]string, 0) @@ -242,23 +208,17 @@ func ListNamespaceByUser(workspaceName string, username string, keyword string, } }) - clusterRoles, err := iam.GetClusterRoles(username) + rules, err := iam.GetUserClusterRules(username) if err != nil { return 0, nil, err } - rules := make([]v1.PolicyRule, 0) - - for _, clusterRole := range clusterRoles { - rules = append(rules, clusterRole.Rules...) - } - namespacesManager := v1.PolicyRule{APIGroups: []string{"kubesphere.io"}, ResourceNames: []string{workspaceName}, Verbs: []string{"get"}, Resources: []string{"workspaces/namespaces"}} if !iam.RulesMatchesRequired(rules, namespacesManager) { for i := 0; i < len(namespaces); i++ { - roles, err := iam.GetRoles(namespaces[i].Name, username) + roles, err := iam.GetUserRoles(namespaces[i].Name, username) if err != nil { return 0, nil, err } @@ -313,13 +273,12 @@ func DeleteNamespace(workspace string, namespaceName string) error { if err != nil { return err } - if namespace.Labels != nil && namespace.Labels["kubesphere.io/workspace"] == workspace { + if namespace.Labels[constants.WorkspaceLabelKey] == workspace { deletePolicy := meta_v1.DeletePropagationForeground return k8s.Client().CoreV1().Namespaces().Delete(namespaceName, &meta_v1.DeleteOptions{PropagationPolicy: &deletePolicy}) } else { return errors.New("resource not found") } - } func Delete(workspace *models.Workspace) error { @@ -339,6 +298,7 @@ func Delete(workspace *models.Workspace) error { return nil } +// TODO func release(workspace *models.Workspace) error { for _, namespace := range workspace.Namespaces { err := DeleteNamespace(workspace.Name, namespace) @@ -348,7 +308,7 @@ func release(workspace *models.Workspace) error { } for _, devops := range workspace.DevopsProjects { - err := DeleteDevopsProject(workspace.Creator, devops) + err := DeleteDevopsProject("admin", devops) if err != nil && !strings.Contains(err.Error(), "not found") { return err } @@ -381,30 +341,6 @@ func workspaceRoleRelease(workspace string) error { return nil } -func Create(workspace *models.Workspace) (*models.Workspace, error) { - - group, err := iam.CreateGroup(workspace.Group) - if err != nil { - return nil, err - } - - created := models.Workspace{ - Group: *group, - } - - created.Members = make([]string, 0) - created.Namespaces = make([]string, 0) - created.DevopsProjects = make([]string, 0) - - err = WorkspaceRoleInit(workspace) - - if err != nil { - return nil, err - } - - return &created, nil -} - func Edit(workspace *models.Workspace) (*models.Workspace, error) { group, err := iam.UpdateGroup(&workspace.Group) @@ -418,24 +354,8 @@ func Edit(workspace *models.Workspace) (*models.Workspace, error) { return workspace, nil } -func Detail(name string) (*models.Workspace, error) { - - conn, err := ldap.Client() - if err != nil { - return nil, err - } - - defer conn.Close() - - group, err := iam.GroupDetail(name, conn) - - if err != nil { - return nil, err - } - - db := mysql.Client() - - workspace, err := convertGroupToWorkspace(db, *group) +func DescribeWorkspace(workspaceName string) (*v1alpha1.Workspace, error) { + workspace, err := informers.KsSharedInformerFactory().Tenant().V1alpha1().Workspaces().Lister().Get(workspaceName) if err != nil { return nil, err @@ -444,48 +364,6 @@ func Detail(name string) (*models.Workspace, error) { return workspace, nil } -// List all workspaces for the current user -func ListWorkspaceByUser(username string, keyword string) ([]*models.Workspace, error) { - clusterRoles, err := iam.GetClusterRoles(username) - - if err != nil { - return nil, err - } - - rules := make([]v1.PolicyRule, 0) - - for _, clusterRole := range clusterRoles { - rules = append(rules, clusterRole.Rules...) - } - - workspacesManager := v1.PolicyRule{APIGroups: []string{"kubesphere.io"}, Verbs: []string{"list", "get"}, Resources: []string{"workspaces"}} - - var workspaces []*models.Workspace - if iam.RulesMatchesRequired(rules, workspacesManager) { - workspaces, err = fetch(nil) - } else { - workspaceNames := make([]string, 0) - for _, clusterRole := range clusterRoles { - if groups := regexp.MustCompile(fmt.Sprintf(`^system:(\S+):(%s)$`, strings.Join(constants.WorkSpaceRoles, "|"))).FindStringSubmatch(clusterRole.Name); len(groups) == 3 { - if !slice.ContainsString(workspaceNames, groups[1], nil) { - workspaceNames = append(workspaceNames, groups[1]) - } - } - } - workspaces, err = fetch(workspaceNames) - } - - if keyword != "" { - for i := 0; i < len(workspaces); i++ { - if !strings.Contains(workspaces[i].Name, keyword) { - workspaces = append(workspaces[:i], workspaces[i+1:]...) - i-- - } - } - } - return workspaces, err -} - func fetch(names []string) ([]*models.Workspace, error) { if names != nil && len(names) == 0 { @@ -505,7 +383,7 @@ func fetch(names []string) ([]*models.Workspace, error) { } defer conn.Close() for _, name := range names { - group, err := iam.GroupDetail(name, conn) + group, err := iam.DescribeGroup(name) if err != nil { return nil, err } @@ -527,90 +405,6 @@ func fetch(names []string) ([]*models.Workspace, error) { return workspaces, nil } -func ListDevopsProjectsByUser(username string, workspace string, keyword string, orderBy string, reverse bool, limit int, offset int) (int, []models.DevopsProject, error) { - - db := mysql.Client() - - var workspaceDOPBindings []models.WorkspaceDPBinding - - if err := db.Where("workspace = ?", workspace).Find(&workspaceDOPBindings).Error; err != nil { - return 0, nil, err - } - - devOpsProjects := make([]models.DevopsProject, 0) - - request, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/api/v1alpha/projects", constants.DevopsAPIServer), nil) - request.Header.Add(constants.UserNameHeader, username) - - result, err := http.DefaultClient.Do(request) - if err != nil { - return 0, nil, err - } - defer result.Body.Close() - data, err := ioutil.ReadAll(result.Body) - - if err != nil { - return 0, nil, err - } - - if result.StatusCode > 200 { - return 0, nil, kserr.Parse(data) - } - - err = json.Unmarshal(data, &devOpsProjects) - - if err != nil { - return 0, nil, err - } - - if keyword != "" { - for i := 0; i < len(devOpsProjects); i++ { - if !strings.Contains(devOpsProjects[i].Name, keyword) { - devOpsProjects = append(devOpsProjects[:i], devOpsProjects[i+1:]...) - i-- - } - } - } - - sort.Slice(devOpsProjects, func(i, j int) bool { - switch orderBy { - case "name": - if reverse { - return devOpsProjects[i].Name < devOpsProjects[j].Name - } else { - return devOpsProjects[i].Name > devOpsProjects[j].Name - } - default: - if reverse { - return devOpsProjects[i].CreateTime.After(*devOpsProjects[j].CreateTime) - } else { - return devOpsProjects[i].CreateTime.Before(*devOpsProjects[j].CreateTime) - } - } - }) - - for i := 0; i < len(devOpsProjects); i++ { - inWorkspace := false - - for _, binding := range workspaceDOPBindings { - if binding.DevOpsProject == *devOpsProjects[i].ProjectId { - inWorkspace = true - } - } - if !inWorkspace { - devOpsProjects = append(devOpsProjects[:i], devOpsProjects[i+1:]...) - i-- - } - } - - if len(devOpsProjects) < offset { - return len(devOpsProjects), make([]models.DevopsProject, 0), nil - } else if len(devOpsProjects) < limit+offset { - return len(devOpsProjects), devOpsProjects[offset:], nil - } else { - return len(devOpsProjects), devOpsProjects[offset : limit+offset], nil - } -} func convertGroupToWorkspace(db *gorm.DB, group models.Group) (*models.Workspace, error) { namespaces, err := Namespaces(group.Name) @@ -642,449 +436,73 @@ func convertGroupToWorkspace(db *gorm.DB, group models.Group) (*models.Workspace return &workspace, nil } -func CreateNamespace(namespace *core.Namespace) (*core.Namespace, error) { +func InviteUser(workspaceName string, user *models.User) error { - ns, err := k8s.Client().CoreV1().Namespaces().Create(namespace) + workspaceRole, err := iam.GetUserWorkspaceRole(workspaceName, user.Username) - if err != nil { - return nil, err - } - - return ns, nil -} - -func Invite(workspaceName string, users []models.UserInvite) error { - for _, user := range users { - if !slice.ContainsString(constants.WorkSpaceRoles, user.Role, nil) { - return fmt.Errorf("role %s not exist", user.Role) - } - } - - workspace, err := Detail(workspaceName) - - if err != nil { + if err != nil && !apierrors.IsNotFound(err) { return err } - for _, user := range users { - if !slice.ContainsString(workspace.Members, user.Username, nil) { - workspace.Members = append(workspace.Members, user.Username) - } - } + workspaceRoleName := fmt.Sprintf("workspace:%s:%s", workspaceName, strings.TrimPrefix(user.WorkspaceRole, "workspace-")) - workspace, err = Edit(workspace) - - if err != nil { - return err - } - - for _, user := range users { - err := CreateWorkspaceRoleBinding(workspace, user.Username, user.Role) + if workspaceRole != nil && workspaceRole.Name != workspaceRoleName { + err := DeleteWorkspaceRoleBinding(workspaceName, user.Username, user.WorkspaceRole) if err != nil { return err } } - return nil + return CreateWorkspaceRoleBinding(workspaceName, user.Username, user.WorkspaceRole) } -func NamespaceExistCheck(namespaceName string) (bool, error) { +func CreateWorkspaceRoleBinding(workspace, username string, role string) error { - _, err := k8s.Client().CoreV1().Namespaces().Get(namespaceName, meta_v1.GetOptions{}) - - if err != nil { - if apierrors.IsNotFound(err) { - return false, nil - } else { - return false, err - } + if !sliceutil.HasString(constants.WorkSpaceRoles, role) { + return apierrors.NewNotFound(schema.GroupResource{Resource: "workspace role"}, role) } - return true, nil + + roleBindingName := fmt.Sprintf("workspace:%s:%s", workspace, strings.TrimPrefix(role, "workspace-")) + + workspaceRoleBinding, err := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister().Get(roleBindingName) + workspaceRoleBinding = workspaceRoleBinding.DeepCopy() + if err != nil { + return err + } + + if !k8sutil.ContainsUser(workspaceRoleBinding.Subjects, username) { + workspaceRoleBinding.Subjects = append(workspaceRoleBinding.Subjects, v1.Subject{APIGroup: "rbac.authorization.k8s.io", Kind: "User", Name: username}) + _, err = k8s.Client().RbacV1().ClusterRoleBindings().Update(workspaceRoleBinding) + } + + return err } -func RemoveMembers(workspaceName string, users []string) error { +func DeleteWorkspaceRoleBinding(workspace, username string, role string) error { - workspace, err := Detail(workspaceName) + if !sliceutil.HasString(constants.WorkSpaceRoles, role) { + return apierrors.NewNotFound(schema.GroupResource{Resource: "workspace role"}, role) + } + + roleBindingName := fmt.Sprintf("workspace:%s:%s", workspace, strings.TrimPrefix(role, "workspace-")) + + workspaceRoleBinding, err := informers.SharedInformerFactory().Rbac().V1().ClusterRoleBindings().Lister().Get(roleBindingName) + workspaceRoleBinding = workspaceRoleBinding.DeepCopy() if err != nil { return err } - err = UnbindWorkspace(workspace, users) - - if err != nil { - return err - } - - for i := 0; i < len(workspace.Members); i++ { - if slice.ContainsString(users, workspace.Members[i], nil) { - workspace.Members = append(workspace.Members[:i], workspace.Members[i+1:]...) + for i, v := range workspaceRoleBinding.Subjects { + if v.Kind == v1.UserKind && v.Name == username { + workspaceRoleBinding.Subjects = append(workspaceRoleBinding.Subjects[:i], workspaceRoleBinding.Subjects[i+1:]...) i-- } } - workspace, err = Edit(workspace) + workspaceRoleBinding, err = k8s.Client().RbacV1().ClusterRoleBindings().Update(workspaceRoleBinding) - if err != nil { - return err - } - - return nil -} - -func Roles(workspace *models.Workspace) ([]*v1.ClusterRole, error) { - roles := make([]*v1.ClusterRole, 0) - clusterRoleLister := informers.SharedInformerFactory().Rbac().V1().ClusterRoles().Lister() - for _, name := range constants.WorkSpaceRoles { - - clusterRole, err := clusterRoleLister.Get(fmt.Sprintf("system:%s:%s", workspace.Name, name)) - - if err != nil { - if apierrors.IsNotFound(err) { - go WorkspaceRoleInit(workspace) - } - return nil, err - } - - clusterRole = clusterRole.DeepCopy() - - clusterRole.Name = name - roles = append(roles, clusterRole) - } - - return roles, nil -} - -func WorkspaceRoleInit(workspace *models.Workspace) error { - k8sClient := k8s.Client() - - admin := new(v1.ClusterRole) - admin.Name = fmt.Sprintf("system:%s:%s", workspace.Name, constants.WorkspaceAdmin) - admin.Kind = iam.ClusterRoleKind - admin.Rules = []v1.PolicyRule{ - { - Verbs: []string{"*"}, - APIGroups: []string{"kubesphere.io", "account.kubesphere.io"}, - ResourceNames: []string{workspace.Name}, - Resources: []string{"workspaces", "workspaces/*"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"devops.kubesphere.io", "jenkins.kubesphere.io"}, - Resources: []string{"*"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"namespaces"}, - Resources: []string{"status/*", "monitoring/*", "quota/*"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"resources"}, - }, - { - Verbs: []string{"list"}, - APIGroups: []string{"account.kubesphere.io"}, - Resources: []string{"users"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"workspaces"}, - Resources: []string{"monitoring/" + workspace.Name}, - }, - } - - admin.Labels = map[string]string{"creator": "system"} - - regular := new(v1.ClusterRole) - regular.Name = fmt.Sprintf("system:%s:%s", workspace.Name, constants.WorkspaceRegular) - regular.Kind = iam.ClusterRoleKind - regular.Rules = []v1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces"}, - ResourceNames: []string{workspace.Name}, - }, { - Verbs: []string{"create"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/namespaces", "workspaces/devops"}, - ResourceNames: []string{workspace.Name}, - }, - { - Verbs: []string{"delete"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"workspaces/namespaces", "workspaces/devops"}, - ResourceNames: []string{workspace.Name}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"namespaces"}, - Resources: []string{"quota/*", "status/*", "monitoring/*"}, - }, - { - Verbs: []string{"*"}, - APIGroups: []string{"devops.kubesphere.io"}, - Resources: []string{"*"}, - }, { - Verbs: []string{"*"}, - APIGroups: []string{"jenkins.kubesphere.io"}, - Resources: []string{"*"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"resources"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{workspace.Name}, - Resources: []string{"workspaces/members"}, - }, - } - - regular.Labels = map[string]string{"creator": "system"} - - viewer := new(v1.ClusterRole) - viewer.Name = fmt.Sprintf("system:%s:%s", workspace.Name, constants.WorkspaceViewer) - viewer.Kind = iam.ClusterRoleKind - viewer.Rules = []v1.PolicyRule{ - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io", "account.kubesphere.io"}, - ResourceNames: []string{workspace.Name}, - Resources: []string{"workspaces", "workspaces/*"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"namespaces"}, - Resources: []string{"quota/*", "status/*", "monitoring/*"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - Resources: []string{"resources"}, - }, - { - Verbs: []string{"get"}, - APIGroups: []string{"kubesphere.io"}, - ResourceNames: []string{"workspaces"}, - Resources: []string{"monitoring/" + workspace.Name}, - }, - { - Verbs: []string{"get", "list"}, - APIGroups: []string{"devops.kubesphere.io"}, - Resources: []string{"*"}, - }, { - Verbs: []string{"get", "list"}, - APIGroups: []string{"jenkins.kubesphere.io"}, - Resources: []string{"*"}, - }, - } - - viewer.Labels = map[string]string{"creator": "system"} - - _, err := k8sClient.RbacV1().ClusterRoles().Create(admin) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - log.Println("cluster role create failed", admin.Name, err) - return err - } - } - - adminRoleBinding := new(v1.ClusterRoleBinding) - adminRoleBinding.Name = admin.Name - adminRoleBinding.RoleRef = v1.RoleRef{Kind: "ClusterRole", Name: admin.Name} - adminRoleBinding.Subjects = []v1.Subject{{Kind: v1.UserKind, Name: workspace.Creator}} - - _, err = k8sClient.RbacV1().ClusterRoleBindings().Create(adminRoleBinding) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - log.Println("cluster rolebinding create failed", adminRoleBinding.Name, err) - return err - } - } - - _, err = k8sClient.RbacV1().ClusterRoles().Create(regular) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - log.Println("cluster role create failed", viewer.Name, err) - return err - } - } - - regularRoleBinding := new(v1.ClusterRoleBinding) - regularRoleBinding.Name = regular.Name - regularRoleBinding.RoleRef = v1.RoleRef{Kind: "ClusterRole", Name: regular.Name} - regularRoleBinding.Subjects = make([]v1.Subject, 0) - _, err = k8sClient.RbacV1().ClusterRoleBindings().Create(regularRoleBinding) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - log.Println("cluster rolebinding create failed", regularRoleBinding.Name, err) - return err - } - } - - _, err = k8sClient.RbacV1().ClusterRoles().Create(viewer) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - log.Println("cluster role create failed", viewer.Name, err) - return err - } - } - - viewerRoleBinding := new(v1.ClusterRoleBinding) - viewerRoleBinding.Name = viewer.Name - viewerRoleBinding.RoleRef = v1.RoleRef{Kind: "ClusterRole", Name: viewer.Name} - viewerRoleBinding.Subjects = make([]v1.Subject, 0) - _, err = k8sClient.RbacV1().ClusterRoleBindings().Create(viewerRoleBinding) - if err != nil { - if !apierrors.IsAlreadyExists(err) { - log.Println("cluster rolebinding create failed", viewerRoleBinding.Name, err) - return err - } - } - - return nil -} - -func unbindWorkspaceRole(workspace string, users []string) error { - k8sClient := k8s.Client() - - for _, name := range constants.WorkSpaceRoles { - roleBinding, err := k8sClient.RbacV1().ClusterRoleBindings().Get(fmt.Sprintf("system:%s:%s", workspace, name), meta_v1.GetOptions{}) - - if err != nil { - return err - } - - modify := false - - for i := 0; i < len(roleBinding.Subjects); i++ { - if roleBinding.Subjects[i].Kind == v1.UserKind && slice.ContainsString(users, roleBinding.Subjects[i].Name, nil) { - roleBinding.Subjects = append(roleBinding.Subjects[:i], roleBinding.Subjects[i+1:]...) - i-- - modify = true - } - } - - if modify { - roleBinding, err = k8sClient.RbacV1().ClusterRoleBindings().Update(roleBinding) - if err != nil { - return err - } - } - } - - return nil -} - -func unbindNamespacesRole(namespaces []string, users []string) error { - - k8sClient := k8s.Client() - for _, namespace := range namespaces { - - roleBindings, err := k8sClient.RbacV1().RoleBindings(namespace).List(meta_v1.ListOptions{}) - - if err != nil { - return err - } - for _, roleBinding := range roleBindings.Items { - - modify := false - for i := 0; i < len(roleBinding.Subjects); i++ { - if roleBinding.Subjects[i].Kind == v1.UserKind && slice.ContainsString(users, roleBinding.Subjects[i].Name, nil) { - roleBinding.Subjects = append(roleBinding.Subjects[:i], roleBinding.Subjects[i+1:]...) - modify = true - } - } - if modify { - _, err := k8sClient.RbacV1().RoleBindings(namespace).Update(&roleBinding) - if err != nil { - return err - } - } - } - } - - return nil -} - -func UnbindWorkspace(workspace *models.Workspace, users []string) error { - - err := unbindNamespacesRole(workspace.Namespaces, users) - - if err != nil { - return err - } - - err = unbindWorkspaceRole(workspace.Name, users) - - if err != nil { - return err - } - - return nil -} - -func CreateWorkspaceRoleBinding(workspace *models.Workspace, username string, role string) error { - - k8sClient := k8s.Client() - - for _, roleName := range constants.WorkSpaceRoles { - roleBinding, err := k8sClient.RbacV1().ClusterRoleBindings().Get(fmt.Sprintf("system:%s:%s", workspace.Name, roleName), meta_v1.GetOptions{}) - - if err != nil { - if apierrors.IsNotFound(err) { - go WorkspaceRoleInit(workspace) - } - return err - } - - modify := false - - for i, v := range roleBinding.Subjects { - if v.Kind == v1.UserKind && v.Name == username { - if roleName == role { - return nil - } else { - modify = true - roleBinding.Subjects = append(roleBinding.Subjects[:i], roleBinding.Subjects[i+1:]...) - if roleName == constants.WorkspaceAdmin || roleName == constants.WorkspaceViewer { - go deleteDevopsRoleBinding(workspace.Name, "", username) - } - break - } - } - } - - if roleName == role { - modify = true - roleBinding.Subjects = append(roleBinding.Subjects, v1.Subject{Kind: v1.UserKind, Name: username}) - if roleName == constants.WorkspaceAdmin { - go createDevopsRoleBinding(workspace.Name, "", username, constants.DevopsOwner) - } else if roleName == constants.WorkspaceViewer { - go createDevopsRoleBinding(workspace.Name, "", username, constants.DevopsReporter) - } - } - - if !modify { - continue - } - - _, err = k8sClient.RbacV1().ClusterRoleBindings().Update(roleBinding) - if err != nil { - return err - } - } - - return nil + return err } func GetDevOpsProjects(workspaceName string) ([]string, error) { @@ -1105,12 +523,12 @@ func GetDevOpsProjects(workspaceName string) ([]string, error) { return devOpsProjects, nil } -func GetOrgMembers(workspace string) ([]string, error) { - ws, err := Detail(workspace) +func WorkspaceUserCount(workspace string) (int, error) { + count, err := iam.WorkspaceUsersTotalCount(workspace) if err != nil { - return nil, err + return 0, err } - return ws.Members, nil + return count, nil } func GetOrgRoles(name string) ([]string, error) { @@ -1135,13 +553,13 @@ func WorkspaceNamespaces(workspaceName string) ([]string, error) { func WorkspaceCount() (int, error) { - workspaces, err := iam.ChildList("") + ws, err := resources.ListResources("", resources.Workspaces, ¶ms.Conditions{}, "", false, 1, 0) if err != nil { return 0, err } - return len(workspaces), nil + return ws.TotalCount, nil } func GetAllProjectNums() (int, error) { @@ -1155,7 +573,6 @@ func GetAllProjectNums() (int, error) { func GetAllDevOpsProjectsNums() (int, error) { db := mysql.Client() - var count int if err := db.Model(&models.WorkspaceDPBinding{}).Count(&count).Error; err != nil { return 0, err @@ -1164,11 +581,9 @@ func GetAllDevOpsProjectsNums() (int, error) { } func GetAllAccountNums() (int, error) { - totalCount, _, err := iam.UserList(1, 0) - + users, err := iam.ListUsers(¶ms.Conditions{}, "", false, 1, 0) if err != nil { return 0, err } - - return totalCount, nil + return users.TotalCount, nil } diff --git a/pkg/params/params.go b/pkg/params/params.go index a8a1ecde2..dc679be22 100644 --- a/pkg/params/params.go +++ b/pkg/params/params.go @@ -30,10 +30,11 @@ const ( OrderByParam = "orderBy" ConditionsParam = "conditions" ReverseParam = "reverse" + NameParam = "name" ) -func ParsePaging(req *restful.Request) (limit, offset int) { - paging := req.QueryParameter(PagingParam) +func ParsePaging(paging string) (limit, offset int) { + limit = 10 offset = 0 if groups := regexp.MustCompile(`^limit=(\d+),page=(\d+)$`).FindStringSubmatch(paging); len(groups) == 3 { @@ -47,8 +48,8 @@ func ParsePaging(req *restful.Request) (limit, offset int) { return } -func ParseConditions(req *restful.Request) (*Conditions, error) { - conditionsStr := req.QueryParameter(ConditionsParam) +func ParseConditions(conditionsStr string) (*Conditions, error) { + conditions := &Conditions{Match: make(map[string]string, 0), Fuzzy: make(map[string]string, 0)} if conditionsStr == "" { @@ -81,6 +82,18 @@ func ParseReverse(req *restful.Request) bool { return b } +func ParseArray(str string) []string { + arr := make([]string, 0) + + for _, item := range strings.Split(str, ",") { + if item = strings.TrimSpace(item); item != "" { + arr = append(arr, item) + } + } + + return arr +} + type Conditions struct { Match map[string]string Fuzzy map[string]string diff --git a/pkg/simple/client/k8s/ksclient.go b/pkg/simple/client/k8s/ksclient.go new file mode 100644 index 000000000..064dd7365 --- /dev/null +++ b/pkg/simple/client/k8s/ksclient.go @@ -0,0 +1,46 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package k8s + +import ( + "log" + "sync" + + ks "kubesphere.io/kubesphere/pkg/client/clientset/versioned" +) + +var ( + ksClient *ks.Clientset + ksClientOnce sync.Once +) + +func KsClient() *ks.Clientset { + + ksClientOnce.Do(func() { + + config, err := Config() + + if err != nil { + log.Fatalln(err) + } + + ksClient = ks.NewForConfigOrDie(config) + }) + + return ksClient +} diff --git a/pkg/simple/client/k8s/s2iclient.go b/pkg/simple/client/k8s/s2iclient.go index 4d9929897..910710f79 100644 --- a/pkg/simple/client/k8s/s2iclient.go +++ b/pkg/simple/client/k8s/s2iclient.go @@ -1,3 +1,20 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ package k8s import ( @@ -23,8 +40,6 @@ func S2iClient() *s2i.Clientset { } s2iClient = s2i.NewForConfigOrDie(config) - - KubeConfig = config }) return s2iClient diff --git a/pkg/simple/client/kubesphere/kubesphereclient.go b/pkg/simple/client/kubesphere/kubesphereclient.go new file mode 100644 index 000000000..88ae0f38d --- /dev/null +++ b/pkg/simple/client/kubesphere/kubesphereclient.go @@ -0,0 +1,274 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package kubesphere + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "kubesphere.io/kubesphere/pkg/models" + "log" + "net/http" + "strings" + "sync" +) + +var ( + accountAPIServer string + once sync.Once + c client +) + +type Interface interface { + CreateGroup(group *models.Group) (*models.Group, error) + UpdateGroup(group *models.Group) (*models.Group, error) + DescribeGroup(name string) (*models.Group, error) + DeleteGroup(name string) error +} + +type client struct { + client http.Client +} + +func init() { + flag.StringVar(&accountAPIServer, "ks-account-api-server", "http://ks-account.kubesphere-system.svc", "kubesphere account api server") +} + +func Client() Interface { + once.Do(func() { + c = client{client: http.Client{}} + }) + return c +} + +type Error struct { + status int + message string +} + +func (e Error) Error() string { + return fmt.Sprintf("status: %d,message: %s", e.status, e.message) +} + +func (c client) CreateGroup(group *models.Group) (*models.Group, error) { + data, err := json.Marshal(group) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/kapis/iam.kubesphere.io/v1alpha2/groups", accountAPIServer), bytes.NewReader(data)) + + if err != nil { + return nil, err + } + req.Header.Add("Content-Type", "application/json") + + log.Println(req.Method, req.URL, string(data)) + resp, err := c.client.Do(req) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + data, err = ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + if resp.StatusCode > http.StatusOK { + return nil, Error{resp.StatusCode, string(data)} + } + + err = json.Unmarshal(data, group) + + if err != nil { + return nil, err + } + + return group, nil +} + +func (c client) UpdateGroup(group *models.Group) (*models.Group, error) { + data, err := json.Marshal(group) + + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodPut, fmt.Sprintf("%s/kapis/iam.kubesphere.io/v1alpha2/groups/%s", accountAPIServer, group.Name), bytes.NewReader(data)) + + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", "application/json") + if err != nil { + return nil, err + } + log.Println(req.Method, req.URL, string(data)) + resp, err := c.client.Do(req) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + data, err = ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + if resp.StatusCode > http.StatusOK { + return nil, Error{resp.StatusCode, string(data)} + } + + err = json.Unmarshal(data, group) + + if err != nil { + return nil, err + } + + return group, nil +} + +func (c client) DeleteGroup(name string) error { + req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("%s/kapis/iam.kubesphere.io/v1alpha2/groups/%s", accountAPIServer, name), nil) + + if err != nil { + return err + } + + log.Println(req.Method, req.URL) + resp, err := c.client.Do(req) + + if err != nil { + return err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + if resp.StatusCode > http.StatusOK { + return Error{resp.StatusCode, string(data)} + } + + return nil +} + +func (c client) DescribeGroup(name string) (*models.Group, error) { + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/kapis/iam.kubesphere.io/v1alpha2/groups/%s", accountAPIServer, name), nil) + + if err != nil { + return nil, err + } + log.Println(req.Method, req.URL) + resp, err := c.client.Do(req) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + if resp.StatusCode > http.StatusOK { + return nil, Error{resp.StatusCode, string(data)} + } + + var group models.Group + err = json.Unmarshal(data, &group) + + if err != nil { + return nil, err + } + + return &group, nil +} + +func (c client) ListUsers() (*models.PageableResponse, error) { + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/kapis/iam.kubesphere.io/v1alpha2/users", accountAPIServer), nil) + + if err != nil { + return nil, err + } + req.Header.Add("Authorization", accountAPIServer) + if err != nil { + return nil, err + } + log.Println(req.Method, req.URL) + resp, err := c.client.Do(req) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + + if resp.StatusCode > http.StatusOK { + return nil, Error{resp.StatusCode, string(data)} + } + + var result models.PageableResponse + err = json.Unmarshal(data, &result) + + if err != nil { + return nil, err + } + + return &result, nil +} + +func IsNotFound(err error) bool { + if e, ok := err.(Error); ok { + if e.status == http.StatusNotFound { + return true + } + if strings.Contains(e.message, "not exist") { + return true + } + if strings.Contains(e.message, "not found") { + return true + } + } + return false +} + +func IsExist(err error) bool { + if e, ok := err.(Error); ok { + if e.status == http.StatusConflict { + return true + } + if strings.Contains(e.message, "Already Exists") { + return true + } + } + return false +} diff --git a/pkg/simple/client/ldap/ldapclient.go b/pkg/simple/client/ldap/ldapclient.go index e5afb3345..e2cba094f 100644 --- a/pkg/simple/client/ldap/ldapclient.go +++ b/pkg/simple/client/ldap/ldapclient.go @@ -32,6 +32,7 @@ var ( ManagerPassword string UserSearchBase string GroupSearchBase string + poolSize int ) func init() { @@ -40,13 +41,14 @@ func init() { flag.StringVar(&ManagerPassword, "ldap-manager-password", "admin", "ldap manager password") flag.StringVar(&UserSearchBase, "ldap-user-search-base", "ou=Users,dc=example,dc=org", "ldap user search base") flag.StringVar(&GroupSearchBase, "ldap-group-search-base", "ou=Groups,dc=example,dc=org", "ldap group search base") + flag.IntVar(&poolSize, "ldap-pool-size", 64, "ldap connection pool size") } func ldapClientPool() Pool { once.Do(func() { var err error - pool, err = NewChannelPool(8, 96, "kubesphere", func(s string) (ldap.Client, error) { + pool, err = NewChannelPool(8, poolSize, "kubesphere", func(s string) (ldap.Client, error) { conn, err := ldap.Dial("tcp", ldapHost) if err != nil { return nil, err diff --git a/pkg/simple/client/mysql/dbclient.go b/pkg/simple/client/mysql/dbclient.go index 9b23d8be0..f4934469e 100644 --- a/pkg/simple/client/mysql/dbclient.go +++ b/pkg/simple/client/mysql/dbclient.go @@ -44,7 +44,6 @@ func Client() *gorm.DB { dbClientOnce.Do(func() { var err error dbClient, err = gorm.Open("mysql", dsn) - if err != nil { log.Fatalln(err) } diff --git a/pkg/simple/client/openpitrix/applications.go b/pkg/simple/client/openpitrix/applications.go new file mode 100644 index 000000000..9e95e62f5 --- /dev/null +++ b/pkg/simple/client/openpitrix/applications.go @@ -0,0 +1,291 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package openpitrix + +import ( + "encoding/json" + "fmt" + "github.com/golang/glog" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +const ( + Unknown = "-" + DeploySuffix = "-Deployment" + DaemonSuffix = "-DaemonSet" + StateSuffix = "-StatefulSet" +) + +type Cluster struct { + ClusterID string `json:"cluster_id"` + Name string `json:"name"` + AppID string `json:"app_id"` + VersionID string `json:"version_id"` + Status string `json:"status"` + UpdateTime time.Time `json:"status_time"` + CreateTime time.Time `json:"create_time"` + RunTimeId string `json:"runtime_id"` + Description string `json:"description"` + ClusterRoleSets []ClusterRole `json:"cluster_role_set"` +} + +type ClusterRole struct { + ClusterID string `json:"cluster_id"` + Role string `json:"role"` +} + +type ClusterList struct { + Total int `json:"total_count"` + Clusters []Cluster `json:"cluster_set"` +} + +type VersionList struct { + Total int `json:"total_count"` + Versions []version `json:"app_version_set"` +} + +type version struct { + Name string `json:"name"` + VersionID string `json:"version_id"` +} + +type runtime struct { + RuntimeID string `json:"runtime_id"` + Zone string `json:"zone"` +} + +type runtimeList struct { + Total int `json:"total_count"` + Runtimes []runtime `json:"runtime_set"` +} + +type app struct { + AppId string `json:"app_id"` + Name string `json:"name"` + ChartName string `json:"chart_name"` + RepoId string `json:"repo_id"` +} + +type repo struct { + RepoId string `json:"repo_id"` + Name string `json:"name"` + Url string `json:"url"` +} + +type appList struct { + Total int `json:"total_count"` + Apps []app `json:"app_set"` +} + +type repoList struct { + Total int `json:"total_count"` + Repos []repo `json:"repo_set"` +} + +func GetAppInfo(appId string) (string, string, string, error) { + url := fmt.Sprintf("%s/v1/apps?app_id=%s", openpitrixAPIServer, appId) + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Error(err) + return Unknown, Unknown, Unknown, err + } + + var apps appList + err = json.Unmarshal(resp, &apps) + if err != nil { + glog.Error(err) + return Unknown, Unknown, Unknown, err + } + + if len(apps.Apps) == 0 { + return Unknown, Unknown, Unknown, err + } + + return apps.Apps[0].ChartName, apps.Apps[0].RepoId, apps.Apps[0].AppId, nil +} + +func GetCluster(clusterId string) (*Cluster, error) { + if strings.HasSuffix(openpitrixAPIServer, "/") { + openpitrixAPIServer = strings.TrimSuffix(openpitrixAPIServer, "/") + } + + url := fmt.Sprintf("%s/v1/clusters?cluster_id=%s", openpitrixAPIServer, clusterId) + + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Error(err) + return nil, err + } + + var clusterList ClusterList + err = json.Unmarshal(resp, &clusterList) + + if err != nil { + glog.Error(err) + return nil, err + } + + if len(clusterList.Clusters) == 0 { + return nil, fmt.Errorf("NotFound, clusterId:%s", clusterId) + } + + return &clusterList.Clusters[0], nil +} + +func ListClusters(runtimeId, searchWord, status string, limit, offset int) (*ClusterList, error) { + if strings.HasSuffix(openpitrixAPIServer, "/") { + openpitrixAPIServer = strings.TrimSuffix(openpitrixAPIServer, "/") + } + + defaultStatus := "status=active&status=stopped&status=pending&status=ceased" + + url := fmt.Sprintf("%s/v1/clusters?limit=%s&offset=%s", openpitrixAPIServer, strconv.Itoa(limit), strconv.Itoa(offset)) + + if searchWord != "" { + url = fmt.Sprintf("%s&search_word=%s", url, searchWord) + } + + if status != "" { + url = fmt.Sprintf("%s&status=%s", url, status) + } else { + url = fmt.Sprintf("%s&%s", url, defaultStatus) + } + + if len(runtimeId) > 0 { + url = fmt.Sprintf("%s&runtime_id=%s", url, runtimeId) + } + + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Errorf("request %s failed, reason: %s", url, err) + return nil, err + } + + var clusterList ClusterList + err = json.Unmarshal(resp, &clusterList) + + if err != nil { + return nil, err + } + + return &clusterList, nil +} + +func GetRepo(repoId string) (string, error) { + url := fmt.Sprintf("%s/v1/repos?repo_id=%s", openpitrixAPIServer, repoId) + resp, err := makeHttpRequest("GET", url, "") + if err != nil { + glog.Error(err) + return Unknown, err + } + + var repos repoList + err = json.Unmarshal(resp, &repos) + if err != nil { + glog.Error(err) + return Unknown, err + } + + if len(repos.Repos) == 0 { + return Unknown, err + } + + return repos.Repos[0].Name, nil +} + +func GetVersion(versionId string) (string, error) { + versionUrl := fmt.Sprintf("%s/v1/app_versions?version_id=%s", openpitrixAPIServer, versionId) + resp, err := makeHttpRequest("GET", versionUrl, "") + if err != nil { + glog.Error(err) + return Unknown, err + } + + var versions VersionList + err = json.Unmarshal(resp, &versions) + if err != nil { + glog.Error(err) + return Unknown, err + } + + if len(versions.Versions) == 0 { + return Unknown, nil + } + return versions.Versions[0].Name, nil +} + +func GetRuntime(runtimeId string) (string, error) { + + versionUrl := fmt.Sprintf("%s/v1/runtimes?runtime_id=%s", openpitrixAPIServer, runtimeId) + resp, err := makeHttpRequest("GET", versionUrl, "") + if err != nil { + glog.Error(err) + return Unknown, err + } + + var runtimes runtimeList + err = json.Unmarshal(resp, &runtimes) + if err != nil { + glog.Error(err) + return Unknown, err + } + + if len(runtimes.Runtimes) == 0 { + return Unknown, nil + } + + return runtimes.Runtimes[0].Zone, nil +} + +func makeHttpRequest(method, url, data string) ([]byte, error) { + var req *http.Request + + var err error + if method == "GET" { + req, err = http.NewRequest(method, url, nil) + } else { + req, err = http.NewRequest(method, url, strings.NewReader(data)) + } + + req.Header.Add("Authorization", openpitrixProxyToken) + + if err != nil { + glog.Error(err) + return nil, err + } + + httpClient := &http.Client{} + resp, err := httpClient.Do(req) + + if err != nil { + err := fmt.Errorf("Request to %s failed, method: %s, reason: %s ", url, method, err) + glog.Error(err) + return nil, err + } + + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if resp.StatusCode >= http.StatusBadRequest { + err = fmt.Errorf(string(body)) + } + return body, err +} diff --git a/pkg/simple/client/openpitrix/openpitrixclient.go b/pkg/simple/client/openpitrix/openpitrixclient.go new file mode 100644 index 000000000..6d97b3df0 --- /dev/null +++ b/pkg/simple/client/openpitrix/openpitrixclient.go @@ -0,0 +1,142 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package openpitrix + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "sync" +) + +var ( + openpitrixAPIServer string + openpitrixProxyToken string + once sync.Once + c client +) + +type RunTime struct { + RuntimeId string `json:"runtime_id"` + RuntimeUrl string `json:"runtime_url"` + Name string `json:"name"` + Provider string `json:"provider"` + Zone string `json:"zone"` + RuntimeCredential string `json:"runtime_credential"` +} + +type Interface interface { + CreateRuntime(runtime *RunTime) error + DeleteRuntime(runtimeId string) error +} + +type Error struct { + status int + message string +} + +func (e Error) Error() string { + return fmt.Sprintf("status: %d,message: %s", e.status, e.message) +} + +type client struct { + client http.Client +} + +func init() { + flag.StringVar(&openpitrixAPIServer, "openpitrix-api-server", "http://openpitrix-api-gateway.openpitrix-system.svc:9100", "openpitrix api server") + flag.StringVar(&openpitrixProxyToken, "openpitrix-proxy-token", "", "openpitrix proxy token") +} + +func Client() Interface { + once.Do(func() { + c = client{client: http.Client{}} + }) + return c +} + +func (c client) CreateRuntime(runtime *RunTime) error { + + data, err := json.Marshal(runtime) + if err != nil { + return err + } + + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s/v1/runtimes", openpitrixAPIServer), bytes.NewReader(data)) + + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Authorization", openpitrixProxyToken) + + log.Println(req.Method, req.URL, openpitrixProxyToken, string(data)) + resp, err := c.client.Do(req) + + if err != nil { + return err + } + defer resp.Body.Close() + data, err = ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + if resp.StatusCode > http.StatusOK { + return Error{resp.StatusCode, string(data)} + } + + return nil +} + +func (c client) DeleteRuntime(runtimeId string) error { + data := []byte(fmt.Sprintf(`{"runtime_id":"%s"}`, runtimeId)) + req, err := http.NewRequest(http.MethodDelete, fmt.Sprintf("%s/v1/runtimes", openpitrixAPIServer), bytes.NewReader(data)) + + if err != nil { + return err + } + + req.Header.Add("Authorization", openpitrixProxyToken) + if err != nil { + return err + } + log.Println(req.Method, req.URL) + resp, err := c.client.Do(req) + + if err != nil { + return err + } + defer resp.Body.Close() + data, err = ioutil.ReadAll(resp.Body) + + if err != nil { + return err + } + + if resp.StatusCode > http.StatusOK { + return Error{resp.StatusCode, string(data)} + } + + return nil +} diff --git a/pkg/simple/client/redis/redis.go b/pkg/simple/client/redis/redis.go index 48111840a..a97cc9c57 100644 --- a/pkg/simple/client/redis/redis.go +++ b/pkg/simple/client/redis/redis.go @@ -20,7 +20,10 @@ package redis import ( "flag" "log" + "os" + "os/signal" "sync" + "syscall" "github.com/go-redis/redis" ) @@ -50,6 +53,12 @@ func Client() *redis.Client { if err := redisClient.Ping().Err(); err != nil { log.Fatalln(err) } + c := make(chan os.Signal, 0) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + redisClient.Close() + }() }) return redisClient diff --git a/pkg/simple/controller/namespace/namespaces.go b/pkg/simple/controller/namespace/namespaces.go deleted file mode 100644 index bf4d3bd0e..000000000 --- a/pkg/simple/controller/namespace/namespaces.go +++ /dev/null @@ -1,196 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ -package namespace - -import ( - "fmt" - "github.com/golang/glog" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - "time" - - corev1 "k8s.io/api/core/v1" - rbac "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/api/errors" - metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - coreinformers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/informers/rbac/v1" - rbacinformers "k8s.io/client-go/informers/rbac/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" -) - -var log = logf.Log.WithName("namespace-controller") - -const threadiness = 2 - -var ( - defaultRoles = []rbac.Role{ - {ObjectMeta: metaV1.ObjectMeta{Name: "admin", Annotations: map[string]string{"creator": "system"}}, Rules: []rbac.PolicyRule{{Verbs: []string{"*"}, APIGroups: []string{"*"}, Resources: []string{"*"}}}}, - {ObjectMeta: metaV1.ObjectMeta{Name: "operator", Annotations: map[string]string{"creator": "system"}}, Rules: []rbac.PolicyRule{{Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"*"}, Resources: []string{"*"}}, {Verbs: []string{"*"}, APIGroups: []string{"", "apps", "extensions", "batch", "kubesphere.io", "account.kubesphere.io", "autoscaling"}, Resources: []string{"*"}}}}, - {ObjectMeta: metaV1.ObjectMeta{Name: "viewer", Annotations: map[string]string{"creator": "system"}}, Rules: []rbac.PolicyRule{{Verbs: []string{"get", "list", "watch"}, APIGroups: []string{"*"}, Resources: []string{"*"}}}}, - } -) - -type NamespaceController struct { - clientset kubernetes.Interface - namespaceInformer coreinformers.NamespaceInformer - roleInformer v1.RoleInformer - workqueue workqueue.RateLimitingInterface -} - -func NewNamespaceController( - clientset kubernetes.Interface, - namespaceInformer coreinformers.NamespaceInformer, - roleInformer rbacinformers.RoleInformer) *NamespaceController { - - controller := &NamespaceController{ - clientset: clientset, - namespaceInformer: namespaceInformer, - roleInformer: roleInformer, - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespaces"), - } - - log.V(3).Info("setting up event handlers") - - namespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.handleObject, - UpdateFunc: func(old, new interface{}) { - newNamespace := new.(*corev1.Namespace) - oldNamespace := old.(*corev1.Namespace) - if newNamespace.ResourceVersion == oldNamespace.ResourceVersion { - return - } - controller.handleObject(new) - }, - DeleteFunc: controller.handleObject, - }) - - roleInformer.Lister() - - return controller -} - -func (c *NamespaceController) Start(stopCh <-chan struct{}) error { - defer utilruntime.HandleCrash() - defer c.workqueue.ShutDown() - - log.V(3).Info("starting namespace controller") - defer glog.Info("shutting down namespace controller") - - // Wait for the caches to be synced before starting workers - log.Info("waiting for informer caches to sync") - if ok := cache.WaitForCacheSync(stopCh, c.namespaceInformer.Informer().HasSynced, c.roleInformer.Informer().HasSynced); !ok { - glog.Fatalf("controller exit with error: failed to wait for caches to sync") - } - - log.V(3).Info("starting workers") - for i := 0; i < threadiness; i++ { - go wait.Until(c.runWorker, time.Second, stopCh) - } - - <-stopCh - - return nil -} - -func (c *NamespaceController) runWorker() { - for c.processNextWorkItem() { - } -} - -func (c *NamespaceController) processNextWorkItem() bool { - obj, shutdown := c.workqueue.Get() - - if shutdown { - return false - } - - err := func(obj interface{}) error { - defer c.workqueue.Done(obj) - var namespace string - var ok bool - - if namespace, ok = obj.(string); !ok { - c.workqueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) - return nil - } - - if err := c.reconcile(namespace); err != nil { - c.workqueue.AddRateLimited(namespace) - return fmt.Errorf("error syncing '%s': %s, requeuing", namespace, err.Error()) - } - - c.workqueue.Forget(obj) - log.V(4).Info("successfully namespace synced ", "namespace", namespace) - return nil - }(obj) - - if err != nil { - utilruntime.HandleError(err) - return true - } - - return true -} - -func (c *NamespaceController) reconcile(name string) error { - - _, err := c.namespaceInformer.Lister().Get(name) - - // Handler delete event - if errors.IsNotFound(err) { - return nil - } - - // Handler update or create event - if err := c.checkAndCreateRoles(name); err != nil { - return err - } - - return nil -} - -func (c *NamespaceController) handleObject(obj interface{}) { - if namespace, ok := obj.(*corev1.Namespace); ok { - c.workqueue.AddRateLimited(namespace.Name) - } -} - -// Create default roles -func (c *NamespaceController) checkAndCreateRoles(namespace string) error { - for _, role := range defaultRoles { - _, err := c.roleInformer.Lister().Roles(namespace).Get(role.Name) - if err != nil { - if errors.IsNotFound(err) { - r := role.DeepCopy() - r.Namespace = namespace - _, err = c.clientset.RbacV1().Roles(namespace).Create(r) - if err != nil && !errors.IsAlreadyExists(err) { - return err - } - } else { - return err - } - } - } - return nil -} diff --git a/pkg/utils/iputils.go b/pkg/utils/iputil/iputils.go similarity index 98% rename from pkg/utils/iputils.go rename to pkg/utils/iputil/iputils.go index 207c761ee..201b531be 100644 --- a/pkg/utils/iputils.go +++ b/pkg/utils/iputil/iputils.go @@ -15,7 +15,7 @@ limitations under the License. */ -package utils +package iputil import ( "net" diff --git a/pkg/utils/jsonutils.go b/pkg/utils/jsonutil/jsonutils.go similarity index 98% rename from pkg/utils/jsonutils.go rename to pkg/utils/jsonutil/jsonutils.go index ffeaa4439..59cfd5aef 100644 --- a/pkg/utils/jsonutils.go +++ b/pkg/utils/jsonutil/jsonutils.go @@ -15,7 +15,7 @@ limitations under the License. */ -package utils +package jsonutil import ( "encoding/json" diff --git a/pkg/utils/jwt/jwt.go b/pkg/utils/jwtutil/jwt.go similarity index 79% rename from pkg/utils/jwt/jwt.go rename to pkg/utils/jwtutil/jwt.go index be7bc9820..fe95334b0 100644 --- a/pkg/utils/jwt/jwt.go +++ b/pkg/utils/jwtutil/jwt.go @@ -15,29 +15,33 @@ limitations under the License. */ -package jwt +package jwtutil import ( "fmt" - "os" - "github.com/dgrijalva/jwt-go" ) const secretEnv = "JWT_SECRET" -var Secret []byte +var secret []byte -func init() { - if env := os.Getenv(secretEnv); env != "" { - Secret = []byte(env) - } else { - fmt.Printf("Environment variable %s not set\n", secretEnv) - } +func Setup(key string) { + secret = []byte(key) } + +func MustSigned(claims jwt.MapClaims) string { + uToken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + token, err := uToken.SignedString(secret) + if err != nil { + panic(err) + } + return token +} + func provideKey(token *jwt.Token) (interface{}, error) { if _, ok := token.Method.(*jwt.SigningMethodHMAC); ok { - return Secret, nil + return secret, nil } else { return nil, fmt.Errorf("expect token signed with HMAC but got %v", token.Header["alg"]) } diff --git a/pkg/utils/k8sutil/k8sutil.go b/pkg/utils/k8sutil/k8sutil.go new file mode 100644 index 000000000..7d01fa424 --- /dev/null +++ b/pkg/utils/k8sutil/k8sutil.go @@ -0,0 +1,73 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ +package k8sutil + +import ( + "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "kubesphere.io/kubesphere/pkg/models" +) + +func IsControlledBy(reference []metav1.OwnerReference, kind string, name string) bool { + for _, ref := range reference { + if ref.Kind == kind && (name == "" || ref.Name == name) { + return true + } + } + return false +} + +func GetControlledWorkspace(reference []metav1.OwnerReference) string { + for _, ref := range reference { + if ref.Kind == "Workspace" { + return ref.Name + } + } + return "" +} + +func ContainsUser(subjects interface{}, username string) bool { + switch subjects.(type) { + case []*v1.Subject: + for _, subject := range subjects.([]*v1.Subject) { + if subject.Kind == v1.UserKind && subject.Name == username { + return true + } + } + case []v1.Subject: + for _, subject := range subjects.([]v1.Subject) { + if subject.Kind == v1.UserKind && subject.Name == username { + return true + } + } + case []models.User: + for _, u := range subjects.([]models.User) { + if u.Username == username { + return true + } + } + + case []*models.User: + for _, u := range subjects.([]*models.User) { + if u.Username == username { + return true + } + } + } + return false +} diff --git a/pkg/utils/sliceutils.go b/pkg/utils/sliceutil/sliceutils.go similarity index 98% rename from pkg/utils/sliceutils.go rename to pkg/utils/sliceutil/sliceutils.go index e93a81440..0f1270dbc 100644 --- a/pkg/utils/sliceutils.go +++ b/pkg/utils/sliceutil/sliceutils.go @@ -15,7 +15,7 @@ limitations under the License. */ -package utils +package sliceutil func RemoveString(slice []string, remove func(item string) bool) []string { for i := 0; i < len(slice); i++ { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go deleted file mode 100644 index 3fe9c0865..000000000 --- a/pkg/utils/utils.go +++ /dev/null @@ -1,7 +0,0 @@ -package utils - -func CheckError(err error) { - if err != nil { - panic(err) - } -} diff --git a/tools/cmd/doc-gen/main.go b/tools/cmd/doc-gen/main.go index 2c3be77ac..29161dd13 100644 --- a/tools/cmd/doc-gen/main.go +++ b/tools/cmd/doc-gen/main.go @@ -27,6 +27,7 @@ import ( "github.com/go-openapi/spec" "io/ioutil" _ "kubesphere.io/kubesphere/pkg/apis/iam/install" + _ "kubesphere.io/kubesphere/pkg/apis/logging/install" "kubesphere.io/kubesphere/pkg/apiserver/runtime" "log" // Install apis @@ -35,6 +36,7 @@ import ( _ "kubesphere.io/kubesphere/pkg/apis/operations/install" _ "kubesphere.io/kubesphere/pkg/apis/resources/install" _ "kubesphere.io/kubesphere/pkg/apis/servicemesh/metrics/install" + _ "kubesphere.io/kubesphere/pkg/apis/tenant/install" ) var output string diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go b/vendor/github.com/docker/spdystream/LICENSE similarity index 84% rename from vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go rename to vendor/github.com/docker/spdystream/LICENSE index 38393d541..9e4bd4dbe 100644 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go +++ b/vendor/github.com/docker/spdystream/LICENSE @@ -1,37 +1,4 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initApache2() { - Licenses["apache"] = License{ - Name: "Apache 2.0", - PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"}, - Header: ` -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.`, - Text: ` Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -209,18 +176,7 @@ limitations under the License.`, END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2014-2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -233,6 +189,3 @@ limitations under the License.`, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -`, - } -} diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/spdystream/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go new file mode 100644 index 000000000..2023ecf84 --- /dev/null +++ b/vendor/github.com/docker/spdystream/connection.go @@ -0,0 +1,959 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occurred") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + writeLock sync.Mutex + resetChan chan struct{} + setTimeoutLock sync.Mutex + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about + // the same time the connection is being closed + setTimeoutChan: make(chan time.Duration, 1), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + resetChan = i.resetChan + setTimeoutChan = i.setTimeoutChan + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + i.conn.streamCond.L.Lock() + streams := i.conn.streams + i.conn.streams = make(map[spdy.StreamId]*Stream) + i.conn.streamCond.Broadcast() + i.conn.streamCond.L.Unlock() + go func() { + for _, stream := range streams { + stream.resetStream() + } + i.conn.Close() + }() + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + + // Start a goroutine to drain resetChan. This is needed because we've seen + // some unit tests with large numbers of goroutines get into a situation + // where resetChan fills up, at least 1 call to Write() is still trying to + // send to resetChan, the connection gets closed, and this case statement + // attempts to grab the write lock that Write() already has, causing a + // deadlock. + // + // See https://github.com/docker/spdystream/issues/49 for more details. + go func() { + for _ = range resetChan { + } + }() + + go func() { + for _ = range setTimeoutChan { + } + }() + + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + + i.setTimeoutLock.Lock() + close(i.setTimeoutChan) + i.setTimeoutChan = nil + i.setTimeoutLock.Unlock() + + break Loop + } + } + + // Drain resetChan + for _ = range resetChan { + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return io.EOF + } + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + // resetChan should never be closed since it is only closed + // when the connection has closed its closeChan. This closure + // only occurs after all Reads have finished + // TODO (dmcgowan): refactor relationship into connection + i.resetChan <- struct{}{} + + return frame, nil +} + +func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { + i.setTimeoutLock.Lock() + defer i.setTimeoutLock.Unlock() + + if i.setTimeoutChan == nil { + return + } + + i.setTimeoutChan <- timeout +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool + + // for testing https://github.com/docker/spdystream/pull/56 + dataFrameHandler func(*spdy.DataFrame) error +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + session.dataFrameHandler = session.handleDataFrame + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + writeErr := s.framer.WriteFrame(frame) + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Now().Sub(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // use a WaitGroup to wait for all frames to be drained after receiving + // go-away. + var wg sync.WaitGroup + + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + wg.Add(1) + go func(frameQueue *PriorityFrameQueue) { + // let the WaitGroup know this worker is done + defer wg.Done() + + s.frameHandler(frameQueue, newHandler) + }(frameQueues[i]) + } + + var ( + partitionRoundRobin int + goAwayFrame *spdy.GoAwayFrame + ) +Loop: + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + debugMessage("frame read error: %s", err) + } else { + debugMessage("(%p) EOF received", s) + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + // hold on to the go away frame and exit the loop + goAwayFrame = frame + break Loop + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + // wait for all frame handler workers to indicate they've drained their queues + // before handling the go away frame + wg.Wait() + + if goAwayFrame != nil { + s.handleGoAwayFrame(goAwayFrame) + } + + // now it's safe to close remote channels and empty s.streams + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.dataFrameHandler(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + debugMessage("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + priority: frame.Priority, + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + debugMessage("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + // MUST synchronize stream creation (all the way to writing the frame) + // as stream IDs **MUST** increase monotonically. + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + debugMessage("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) + + return +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + err := s.framer.WriteFrame(goAwayFrame) + if err != nil { + return err + } + + go s.shutdown(s.closeTimeout) + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setIdleTimeout(timeout) +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go new file mode 100644 index 000000000..d4ee7be81 --- /dev/null +++ b/vendor/github.com/docker/spdystream/handlers.go @@ -0,0 +1,36 @@ +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go new file mode 100644 index 000000000..fc8582b5c --- /dev/null +++ b/vendor/github.com/docker/spdystream/priority.go @@ -0,0 +1,98 @@ +package spdystream + +import ( + "container/heap" + "sync" + + "github.com/docker/spdystream/spdy" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go new file mode 100644 index 000000000..5a5ff0e14 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go new file mode 100644 index 000000000..9359a9501 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/read.go @@ -0,0 +1,348 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go new file mode 100644 index 000000000..7b6ee9c6f --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/types.go @@ -0,0 +1,275 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply = 0x0002 + TypeRstStream = 0x0003 + TypeSettings = 0x0004 + TypePing = 0x0006 + TypeGoAway = 0x0007 + TypeHeaders = 0x0008 + TypeWindowUpdate = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional = 0x02 + ControlFlagSettingsClearSettings = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders = "multiple headers with same name" + WrongCompressedPayloadSize = "compressed payload size was incorrect" + UnknownFrameType = "unknown frame type" + InvalidControlFrame = "invalid control frame" + InvalidDataFrame = "invalid data frame" + InvalidHeaderPresent = "frame contained invalid header" + ZeroStreamId = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go new file mode 100644 index 000000000..b212f66a2 --- /dev/null +++ b/vendor/github.com/docker/spdystream/spdy/write.go @@ -0,0 +1,318 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go new file mode 100644 index 000000000..f9e9ee267 --- /dev/null +++ b/vendor/github.com/docker/spdystream/stream.go @@ -0,0 +1,327 @@ +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + return s.resetStream() +} + +func (s *Stream) resetStream() error { + // Always call closeRemoteChannels, even if s.finished is already true. + // This makes it so that stream.Close() followed by stream.Reset() allows + // stream.Read() to unblock. + s.closeRemoteChannels() + + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + } +} diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go new file mode 100644 index 000000000..1b2c199a4 --- /dev/null +++ b/vendor/github.com/docker/spdystream/utils.go @@ -0,0 +1,16 @@ +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2ibuilder_types.go b/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2ibuilder_types.go index 676f3a629..daced0248 100644 --- a/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2ibuilder_types.go +++ b/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2ibuilder_types.go @@ -36,10 +36,11 @@ const ( Unknown = "Unknown" ) const ( - AutoScaleAnnotations = "devops.kubesphere.io/autoscale" - WorkloadLatestS2iRunTemplateLabel = "devops.kubesphere.io/s2ir" - S2irCompletedScaleAnnotations = "devops.kubesphere.io/completedscale" - WorkLoadCompletedInitAnnotations = "devops.kubesphere.io/inithasbeencomplted" + AutoScaleAnnotations = "devops.kubesphere.io/autoscale" + S2iRunLabel = "devops.kubesphere.io/s2ir" + S2irCompletedScaleAnnotations = "devops.kubesphere.io/completedscale" + WorkLoadCompletedInitAnnotations = "devops.kubesphere.io/inithasbeencomplted" + DescriptionAnnotations = "desc" ) const ( KindDeployment = "Deployment" @@ -48,14 +49,14 @@ const ( // EnvironmentSpec specifies a single environment variable. type EnvironmentSpec struct { - Name string - Value string + Name string `json:"name"` + Value string `json:"value"` } // ProxyConfig holds proxy configuration. type ProxyConfig struct { - HTTPProxy string - HTTPSProxy string + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` } // CGroupLimits holds limits used to constrain container resources. @@ -423,6 +424,8 @@ type S2iBuilderStatus struct { LastRunState RunState `json:"lastRunState,omitempty"` //LastRunState return the name of the newest run of this builder LastRunName *string `json:"lastRunName,omitempty"` + //LastRunStartTime return the startTime of the newest run of this builder + LastRunStartTime *metav1.Time `json:"lastRunStartTime,omitempty"` } // +genclient @@ -434,6 +437,7 @@ type S2iBuilderStatus struct { // +kubebuilder:printcolumn:name="RunCount",type="integer",JSONPath=".status.runCount" // +kubebuilder:printcolumn:name="LastRunState",type="string",JSONPath=".status.lastRunState" // +kubebuilder:printcolumn:name="LastRunName",type="string",JSONPath=".status.lastRunName" +// +kubebuilder:printcolumn:name="LastRunStartTime",type="date",JSONPath=".status.lastRunStartTime" // +kubebuilder:resource:shortName=s2ib type S2iBuilder struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2irun_types.go b/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2irun_types.go index fcda717b1..0dcf71066 100644 --- a/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2irun_types.go +++ b/vendor/github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1/s2irun_types.go @@ -51,6 +51,8 @@ type S2iRunStatus struct { LogURL string `json:"logURL,omitempty"` //KubernetesJobName is the job name in k8s KubernetesJobName string `json:"kubernetesJobName,omitempty"` + //ImageName is the name of artifact + ImageName string `json:"imageName,omitempty"` } // +genclient @@ -64,7 +66,7 @@ type S2iRunStatus struct { // +kubebuilder:printcolumn:name="K8sJobName",type="string",JSONPath=".status.kubernetesJobName" // +kubebuilder:printcolumn:name="StartTime",type="date",JSONPath=".status.startTime" // +kubebuilder:printcolumn:name="CompletionTime",type="date",JSONPath=".status.completionTime" -// +kubebuilder:printcolumn:name="LogURL",type="string",JSONPath=".status.logURL" +// +kubebuilder:printcolumn:name="ImageName",type="string",JSONPath=".status.imageName" type S2iRun struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 25378537e..000000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,167 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -// Reset clears the cache, forcing the next call to Dir to re-detect -// the home directory. This generally never has to be called, but can be -// useful in tests if you're modifying the home directory via the HOME -// env var or something. -func Reset() { - cacheLock.Lock() - defer cacheLock.Unlock() - homedirCache = "" -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go deleted file mode 100644 index bc22e9732..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go +++ /dev/null @@ -1,683 +0,0 @@ -package cmd - -func initAgpl() { - Licenses["agpl"] = License{ - Name: "GNU Affero General Public License", - PossibleMatches: []string{"agpl", "affero gpl", "gnu agpl"}, - Header: ` -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Affero General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Affero General Public License for more details. - -You should have received a copy of the GNU Affero General Public License -along with this program. If not, see .`, - Text: ` GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. -`, - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go deleted file mode 100644 index 4a847e04a..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initBsdClause2() { - Licenses["freebsd"] = License{ - Name: "Simplified BSD License", - PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2-clause bsd", - "2 clause bsd", "simplified bsd license"}, - Header: `All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE.`, - Text: `{{ .copyright }} -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -`, - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go deleted file mode 100644 index c7476b31f..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initBsdClause3() { - Licenses["bsd"] = License{ - Name: "NewBSD", - PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd", "3-clause bsd"}, - Header: `All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE.`, - Text: `{{ .copyright }} -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -`, - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go deleted file mode 100644 index 03e05b3a7..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initGpl2() { - Licenses["gpl2"] = License{ - Name: "GNU General Public License 2.0", - PossibleMatches: []string{"gpl2", "gnu gpl2", "gplv2"}, - Header: ` -This program is free software; you can redistribute it and/or -modify it under the terms of the GNU General Public License -as published by the Free Software Foundation; either version 2 -of the License, or (at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see .`, - Text: ` GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type 'show c' for details. - -The hypothetical commands 'show w' and 'show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than 'show w' and 'show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - 'Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. -`, - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go deleted file mode 100644 index ce07679c7..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go +++ /dev/null @@ -1,711 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initGpl3() { - Licenses["gpl3"] = License{ - Name: "GNU General Public License 3.0", - PossibleMatches: []string{"gpl3", "gplv3", "gpl", "gnu gpl3", "gnu gpl"}, - Header: ` -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program. If not, see .`, - Text: ` GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type 'show c' for details. - -The hypothetical commands 'show w' and 'show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. -`, - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go deleted file mode 100644 index 0f8b96cad..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go +++ /dev/null @@ -1,186 +0,0 @@ -package cmd - -func initLgpl() { - Licenses["lgpl"] = License{ - Name: "GNU Lesser General Public License", - PossibleMatches: []string{"lgpl", "lesser gpl", "gnu lgpl"}, - Header: ` -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see .`, - Text: ` GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library.`, - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go deleted file mode 100644 index bd2d0c4fa..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -func initMit() { - Licenses["mit"] = License{ - Name: "MIT License", - PossibleMatches: []string{"mit"}, - Header: ` -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE.`, - Text: `The MIT License (MIT) - -{{ .copyright }} - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -`, - } -} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go deleted file mode 100644 index a070134dd..000000000 --- a/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Parts inspired by https://github.com/ryanuber/go-license - -package cmd - -import ( - "strings" - "time" - - "github.com/spf13/viper" -) - -// Licenses contains all possible licenses a user can choose from. -var Licenses = make(map[string]License) - -// License represents a software license agreement, containing the Name of -// the license, its possible matches (on the command line as given to cobra), -// the header to be used with each file on the file's creating, and the text -// of the license -type License struct { - Name string // The type of license in use - PossibleMatches []string // Similar names to guess - Text string // License text data - Header string // License header for source files -} - -func init() { - // Allows a user to not use a license. - Licenses["none"] = License{"None", []string{"none", "false"}, "", ""} - - initApache2() - initMit() - initBsdClause3() - initBsdClause2() - initGpl2() - initGpl3() - initLgpl() - initAgpl() -} - -// getLicense returns license specified by user in flag or in config. -// If user didn't specify the license, it returns Apache License 2.0. -// -// TODO: Inspect project for existing license -func getLicense() License { - // If explicitly flagged, use that. - if userLicense != "" { - return findLicense(userLicense) - } - - // If user wants to have custom license, use that. - if viper.IsSet("license.header") || viper.IsSet("license.text") { - return License{Header: viper.GetString("license.header"), - Text: viper.GetString("license.text")} - } - - // If user wants to have built-in license, use that. - if viper.IsSet("license") { - return findLicense(viper.GetString("license")) - } - - // If user didn't set any license, use Apache 2.0 by default. - return Licenses["apache"] -} - -func copyrightLine() string { - author := viper.GetString("author") - - year := viper.GetString("year") // For tests. - if year == "" { - year = time.Now().Format("2006") - } - - return "Copyright © " + year + " " + author -} - -// findLicense looks for License object of built-in licenses. -// If it didn't find license, then the app will be terminated and -// error will be printed. -func findLicense(name string) License { - found := matchLicense(name) - if found == "" { - er("unknown license: " + name) - } - return Licenses[found] -} - -// matchLicense compares the given a license name -// to PossibleMatches of all built-in licenses. -// It returns blank string, if name is blank string or it didn't find -// then appropriate match to name. -func matchLicense(name string) string { - if name == "" { - return "" - } - - for key, lic := range Licenses { - for _, match := range lic.PossibleMatches { - if strings.EqualFold(name, match) { - return key - } - } - } - - return "" -} diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go b/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go deleted file mode 100644 index 99ea813d2..000000000 --- a/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!appengine,!gccgo - -package intsets - -func popcnt(x word) int -func havePOPCNT() bool - -var hasPOPCNT = havePOPCNT() - -// popcount returns the population count (number of set bits) of x. -func popcount(x word) int { - if hasPOPCNT { - return popcnt(x) - } - return popcountTable(x) // faster than Hacker's Delight -} diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s b/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s deleted file mode 100644 index 05c3d6fb5..000000000 --- a/vendor/golang.org/x/tools/container/intsets/popcnt_amd64.s +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!appengine,!gccgo - -#include "textflag.h" - -// func havePOPCNT() bool -TEXT ·havePOPCNT(SB),4,$0 - MOVQ $1, AX - CPUID - SHRQ $23, CX - ANDQ $1, CX - MOVB CX, ret+0(FP) - RET - -// func popcnt(word) int -TEXT ·popcnt(SB),NOSPLIT,$0-8 - XORQ AX, AX - MOVQ x+0(FP), SI - // POPCNT (SI), AX is not recognized by Go assembler, - // so we assemble it ourselves. - BYTE $0xf3 - BYTE $0x48 - BYTE $0x0f - BYTE $0xb8 - BYTE $0xc6 - MOVQ AX, ret+8(FP) - RET diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go b/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go deleted file mode 100644 index 82a8875c8..000000000 --- a/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo - -package intsets - -func popcount(x word) int diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c b/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c deleted file mode 100644 index 08abb32ec..000000000 --- a/vendor/golang.org/x/tools/container/intsets/popcnt_gccgo_c.c +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gccgo - -#include -#include -#include - -#define _STRINGIFY2_(x) #x -#define _STRINGIFY_(x) _STRINGIFY2_(x) -#define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__) - -extern intptr_t popcount(uintptr_t x) __asm__(GOSYM_PREFIX GOPKGPATH ".popcount"); - -intptr_t popcount(uintptr_t x) { - return __builtin_popcountl((unsigned long)(x)); -} diff --git a/vendor/golang.org/x/tools/container/intsets/popcnt_generic.go b/vendor/golang.org/x/tools/container/intsets/popcnt_generic.go deleted file mode 100644 index 3985a1da1..000000000 --- a/vendor/golang.org/x/tools/container/intsets/popcnt_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine -// +build !gccgo - -package intsets - -import "runtime" - -// We compared three algorithms---Hacker's Delight, table lookup, -// and AMD64's SSE4.1 hardware POPCNT---on a 2.67GHz Xeon X5550. -// -// % GOARCH=amd64 go test -run=NONE -bench=Popcount -// POPCNT 5.12 ns/op -// Table 8.53 ns/op -// HackersDelight 9.96 ns/op -// -// % GOARCH=386 go test -run=NONE -bench=Popcount -// Table 10.4 ns/op -// HackersDelight 5.23 ns/op -// -// (AMD64's ABM1 hardware supports ntz and nlz too, -// but they aren't critical.) - -// popcount returns the population count (number of set bits) of x. -func popcount(x word) int { - if runtime.GOARCH == "386" { - return popcountHD(uint32(x)) - } - return popcountTable(x) -} diff --git a/vendor/golang.org/x/tools/container/intsets/sparse.go b/vendor/golang.org/x/tools/container/intsets/sparse.go deleted file mode 100644 index 5db01c1a4..000000000 --- a/vendor/golang.org/x/tools/container/intsets/sparse.go +++ /dev/null @@ -1,1091 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package intsets provides Sparse, a compact and fast representation -// for sparse sets of int values. -// -// The time complexity of the operations Len, Insert, Remove and Has -// is in O(n) but in practice those methods are faster and more -// space-efficient than equivalent operations on sets based on the Go -// map type. The IsEmpty, Min, Max, Clear and TakeMin operations -// require constant time. -// -package intsets // import "golang.org/x/tools/container/intsets" - -// TODO(adonovan): -// - Add InsertAll(...int), RemoveAll(...int) -// - Add 'bool changed' results for {Intersection,Difference}With too. -// -// TODO(adonovan): implement Dense, a dense bit vector with a similar API. -// The space usage would be proportional to Max(), not Len(), and the -// implementation would be based upon big.Int. -// -// TODO(adonovan): opt: make UnionWith and Difference faster. -// These are the hot-spots for go/pointer. - -import ( - "bytes" - "fmt" -) - -// A Sparse is a set of int values. -// Sparse operations (even queries) are not concurrency-safe. -// -// The zero value for Sparse is a valid empty set. -// -// Sparse sets must be copied using the Copy method, not by assigning -// a Sparse value. -// -type Sparse struct { - // An uninitialized Sparse represents an empty set. - // An empty set may also be represented by - // root.next == root.prev == &root. - // - // The root is always the block with the smallest offset. - // It can be empty, but only if it is the only block; in that case, offset is - // MaxInt (which is not a valid offset). - root block -} - -type word uintptr - -const ( - _m = ^word(0) - bitsPerWord = 8 << (_m>>8&1 + _m>>16&1 + _m>>32&1) - bitsPerBlock = 256 // optimal value for go/pointer solver performance - wordsPerBlock = bitsPerBlock / bitsPerWord -) - -// Limit values of implementation-specific int type. -const ( - MaxInt = int(^uint(0) >> 1) - MinInt = -MaxInt - 1 -) - -// -- block ------------------------------------------------------------ - -// A set is represented as a circular doubly-linked list of blocks, -// each containing an offset and a bit array of fixed size -// bitsPerBlock; the blocks are ordered by increasing offset. -// -// The set contains an element x iff the block whose offset is x - (x -// mod bitsPerBlock) has the bit (x mod bitsPerBlock) set, where mod -// is the Euclidean remainder. -// -// A block may only be empty transiently. -// -type block struct { - offset int // offset mod bitsPerBlock == 0 - bits [wordsPerBlock]word // contains at least one set bit - next, prev *block // doubly-linked list of blocks -} - -// wordMask returns the word index (in block.bits) -// and single-bit mask for the block's ith bit. -func wordMask(i uint) (w uint, mask word) { - w = i / bitsPerWord - mask = 1 << (i % bitsPerWord) - return -} - -// insert sets the block b's ith bit and -// returns true if it was not already set. -// -func (b *block) insert(i uint) bool { - w, mask := wordMask(i) - if b.bits[w]&mask == 0 { - b.bits[w] |= mask - return true - } - return false -} - -// remove clears the block's ith bit and -// returns true if the bit was previously set. -// NB: may leave the block empty. -// -func (b *block) remove(i uint) bool { - w, mask := wordMask(i) - if b.bits[w]&mask != 0 { - b.bits[w] &^= mask - return true - } - return false -} - -// has reports whether the block's ith bit is set. -func (b *block) has(i uint) bool { - w, mask := wordMask(i) - return b.bits[w]&mask != 0 -} - -// empty reports whether b.len()==0, but more efficiently. -func (b *block) empty() bool { - for _, w := range b.bits { - if w != 0 { - return false - } - } - return true -} - -// len returns the number of set bits in block b. -func (b *block) len() int { - var l int - for _, w := range b.bits { - l += popcount(w) - } - return l -} - -// max returns the maximum element of the block. -// The block must not be empty. -func (b *block) max() int { - bi := b.offset + bitsPerBlock - // Decrement bi by number of high zeros in last.bits. - for i := len(b.bits) - 1; i >= 0; i-- { - if w := b.bits[i]; w != 0 { - return bi - nlz(w) - 1 - } - bi -= bitsPerWord - } - panic("BUG: empty block") -} - -// min returns the minimum element of the block, -// and also removes it if take is set. -// The block must not be initially empty. -// NB: may leave the block empty. -func (b *block) min(take bool) int { - for i, w := range b.bits { - if w != 0 { - tz := ntz(w) - if take { - b.bits[i] = w &^ (1 << uint(tz)) - } - return b.offset + int(i*bitsPerWord) + tz - } - } - panic("BUG: empty block") -} - -// lowerBound returns the smallest element of the block that is greater than or -// equal to the element corresponding to the ith bit. If there is no such -// element, the second return value is false. -func (b *block) lowerBound(i uint) (int, bool) { - w := i / bitsPerWord - bit := i % bitsPerWord - - if val := b.bits[w] >> bit; val != 0 { - return b.offset + int(i) + ntz(val), true - } - - for w++; w < wordsPerBlock; w++ { - if val := b.bits[w]; val != 0 { - return b.offset + int(w*bitsPerWord) + ntz(val), true - } - } - - return 0, false -} - -// forEach calls f for each element of block b. -// f must not mutate b's enclosing Sparse. -func (b *block) forEach(f func(int)) { - for i, w := range b.bits { - offset := b.offset + i*bitsPerWord - for bi := 0; w != 0 && bi < bitsPerWord; bi++ { - if w&1 != 0 { - f(offset) - } - offset++ - w >>= 1 - } - } -} - -// offsetAndBitIndex returns the offset of the block that would -// contain x and the bit index of x within that block. -// -func offsetAndBitIndex(x int) (int, uint) { - mod := x % bitsPerBlock - if mod < 0 { - // Euclidean (non-negative) remainder - mod += bitsPerBlock - } - return x - mod, uint(mod) -} - -// -- Sparse -------------------------------------------------------------- - -// none is a shared, empty, sentinel block that indicates the end of a block -// list. -var none block - -// Dummy type used to generate an implicit panic. This must be defined at the -// package level; if it is defined inside a function, it prevents the inlining -// of that function. -type to_copy_a_sparse_you_must_call_its_Copy_method struct{} - -// init ensures s is properly initialized. -func (s *Sparse) init() { - root := &s.root - if root.next == nil { - root.offset = MaxInt - root.next = root - root.prev = root - } else if root.next.prev != root { - // Copying a Sparse x leads to pernicious corruption: the - // new Sparse y shares the old linked list, but iteration - // on y will never encounter &y.root so it goes into a - // loop. Fail fast before this occurs. - // We don't want to call panic here because it prevents the - // inlining of this function. - _ = (interface{}(nil)).(to_copy_a_sparse_you_must_call_its_Copy_method) - } -} - -func (s *Sparse) first() *block { - s.init() - if s.root.offset == MaxInt { - return &none - } - return &s.root -} - -// next returns the next block in the list, or end if b is the last block. -func (s *Sparse) next(b *block) *block { - if b.next == &s.root { - return &none - } - return b.next -} - -// prev returns the previous block in the list, or end if b is the first block. -func (s *Sparse) prev(b *block) *block { - if b.prev == &s.root { - return &none - } - return b.prev -} - -// IsEmpty reports whether the set s is empty. -func (s *Sparse) IsEmpty() bool { - return s.root.next == nil || s.root.offset == MaxInt -} - -// Len returns the number of elements in the set s. -func (s *Sparse) Len() int { - var l int - for b := s.first(); b != &none; b = s.next(b) { - l += b.len() - } - return l -} - -// Max returns the maximum element of the set s, or MinInt if s is empty. -func (s *Sparse) Max() int { - if s.IsEmpty() { - return MinInt - } - return s.root.prev.max() -} - -// Min returns the minimum element of the set s, or MaxInt if s is empty. -func (s *Sparse) Min() int { - if s.IsEmpty() { - return MaxInt - } - return s.root.min(false) -} - -// LowerBound returns the smallest element >= x, or MaxInt if there is no such -// element. -func (s *Sparse) LowerBound(x int) int { - offset, i := offsetAndBitIndex(x) - for b := s.first(); b != &none; b = s.next(b) { - if b.offset > offset { - return b.min(false) - } - if b.offset == offset { - if y, ok := b.lowerBound(i); ok { - return y - } - } - } - return MaxInt -} - -// block returns the block that would contain offset, -// or nil if s contains no such block. -// Precondition: offset is a multiple of bitsPerBlock. -func (s *Sparse) block(offset int) *block { - for b := s.first(); b != &none && b.offset <= offset; b = s.next(b) { - if b.offset == offset { - return b - } - } - return nil -} - -// Insert adds x to the set s, and reports whether the set grew. -func (s *Sparse) Insert(x int) bool { - offset, i := offsetAndBitIndex(x) - - b := s.first() - for ; b != &none && b.offset <= offset; b = s.next(b) { - if b.offset == offset { - return b.insert(i) - } - } - - // Insert new block before b. - new := s.insertBlockBefore(b) - new.offset = offset - return new.insert(i) -} - -// removeBlock removes a block and returns the block that followed it (or end if -// it was the last block). -func (s *Sparse) removeBlock(b *block) *block { - if b != &s.root { - b.prev.next = b.next - b.next.prev = b.prev - if b.next == &s.root { - return &none - } - return b.next - } - - first := s.root.next - if first == &s.root { - // This was the only block. - s.Clear() - return &none - } - s.root.offset = first.offset - s.root.bits = first.bits - if first.next == &s.root { - // Single block remaining. - s.root.next = &s.root - s.root.prev = &s.root - } else { - s.root.next = first.next - first.next.prev = &s.root - } - return &s.root -} - -// Remove removes x from the set s, and reports whether the set shrank. -func (s *Sparse) Remove(x int) bool { - offset, i := offsetAndBitIndex(x) - if b := s.block(offset); b != nil { - if !b.remove(i) { - return false - } - if b.empty() { - s.removeBlock(b) - } - return true - } - return false -} - -// Clear removes all elements from the set s. -func (s *Sparse) Clear() { - s.root = block{ - offset: MaxInt, - next: &s.root, - prev: &s.root, - } -} - -// If set s is non-empty, TakeMin sets *p to the minimum element of -// the set s, removes that element from the set and returns true. -// Otherwise, it returns false and *p is undefined. -// -// This method may be used for iteration over a worklist like so: -// -// var x int -// for worklist.TakeMin(&x) { use(x) } -// -func (s *Sparse) TakeMin(p *int) bool { - if s.IsEmpty() { - return false - } - *p = s.root.min(true) - if s.root.empty() { - s.removeBlock(&s.root) - } - return true -} - -// Has reports whether x is an element of the set s. -func (s *Sparse) Has(x int) bool { - offset, i := offsetAndBitIndex(x) - if b := s.block(offset); b != nil { - return b.has(i) - } - return false -} - -// forEach applies function f to each element of the set s in order. -// -// f must not mutate s. Consequently, forEach is not safe to expose -// to clients. In any case, using "range s.AppendTo()" allows more -// natural control flow with continue/break/return. -// -func (s *Sparse) forEach(f func(int)) { - for b := s.first(); b != &none; b = s.next(b) { - b.forEach(f) - } -} - -// Copy sets s to the value of x. -func (s *Sparse) Copy(x *Sparse) { - if s == x { - return - } - - xb := x.first() - sb := s.first() - for xb != &none { - if sb == &none { - sb = s.insertBlockBefore(sb) - } - sb.offset = xb.offset - sb.bits = xb.bits - xb = x.next(xb) - sb = s.next(sb) - } - s.discardTail(sb) -} - -// insertBlockBefore returns a new block, inserting it before next. -// If next is the root, the root is replaced. If next is end, the block is -// inserted at the end. -func (s *Sparse) insertBlockBefore(next *block) *block { - if s.IsEmpty() { - if next != &none { - panic("BUG: passed block with empty set") - } - return &s.root - } - - if next == &s.root { - // Special case: we need to create a new block that will become the root - // block.The old root block becomes the second block. - second := s.root - s.root = block{ - next: &second, - } - if second.next == &s.root { - s.root.prev = &second - } else { - s.root.prev = second.prev - second.next.prev = &second - second.prev = &s.root - } - return &s.root - } - if next == &none { - // Insert before root. - next = &s.root - } - b := new(block) - b.next = next - b.prev = next.prev - b.prev.next = b - next.prev = b - return b -} - -// discardTail removes block b and all its successors from s. -func (s *Sparse) discardTail(b *block) { - if b != &none { - if b == &s.root { - s.Clear() - } else { - b.prev.next = &s.root - s.root.prev = b.prev - } - } -} - -// IntersectionWith sets s to the intersection s ∩ x. -func (s *Sparse) IntersectionWith(x *Sparse) { - if s == x { - return - } - - xb := x.first() - sb := s.first() - for xb != &none && sb != &none { - switch { - case xb.offset < sb.offset: - xb = x.next(xb) - - case xb.offset > sb.offset: - sb = s.removeBlock(sb) - - default: - var sum word - for i := range sb.bits { - r := xb.bits[i] & sb.bits[i] - sb.bits[i] = r - sum |= r - } - if sum != 0 { - sb = s.next(sb) - } else { - // sb will be overwritten or removed - } - - xb = x.next(xb) - } - } - - s.discardTail(sb) -} - -// Intersection sets s to the intersection x ∩ y. -func (s *Sparse) Intersection(x, y *Sparse) { - switch { - case s == x: - s.IntersectionWith(y) - return - case s == y: - s.IntersectionWith(x) - return - case x == y: - s.Copy(x) - return - } - - xb := x.first() - yb := y.first() - sb := s.first() - for xb != &none && yb != &none { - switch { - case xb.offset < yb.offset: - xb = x.next(xb) - continue - case xb.offset > yb.offset: - yb = y.next(yb) - continue - } - - if sb == &none { - sb = s.insertBlockBefore(sb) - } - sb.offset = xb.offset - - var sum word - for i := range sb.bits { - r := xb.bits[i] & yb.bits[i] - sb.bits[i] = r - sum |= r - } - if sum != 0 { - sb = s.next(sb) - } else { - // sb will be overwritten or removed - } - - xb = x.next(xb) - yb = y.next(yb) - } - - s.discardTail(sb) -} - -// Intersects reports whether s ∩ x ≠ ∅. -func (s *Sparse) Intersects(x *Sparse) bool { - sb := s.first() - xb := x.first() - for sb != &none && xb != &none { - switch { - case xb.offset < sb.offset: - xb = x.next(xb) - case xb.offset > sb.offset: - sb = s.next(sb) - default: - for i := range sb.bits { - if sb.bits[i]&xb.bits[i] != 0 { - return true - } - } - sb = s.next(sb) - xb = x.next(xb) - } - } - return false -} - -// UnionWith sets s to the union s ∪ x, and reports whether s grew. -func (s *Sparse) UnionWith(x *Sparse) bool { - if s == x { - return false - } - - var changed bool - xb := x.first() - sb := s.first() - for xb != &none { - if sb != &none && sb.offset == xb.offset { - for i := range xb.bits { - if sb.bits[i] != xb.bits[i] { - sb.bits[i] |= xb.bits[i] - changed = true - } - } - xb = x.next(xb) - } else if sb == &none || sb.offset > xb.offset { - sb = s.insertBlockBefore(sb) - sb.offset = xb.offset - sb.bits = xb.bits - changed = true - - xb = x.next(xb) - } - sb = s.next(sb) - } - return changed -} - -// Union sets s to the union x ∪ y. -func (s *Sparse) Union(x, y *Sparse) { - switch { - case x == y: - s.Copy(x) - return - case s == x: - s.UnionWith(y) - return - case s == y: - s.UnionWith(x) - return - } - - xb := x.first() - yb := y.first() - sb := s.first() - for xb != &none || yb != &none { - if sb == &none { - sb = s.insertBlockBefore(sb) - } - switch { - case yb == &none || (xb != &none && xb.offset < yb.offset): - sb.offset = xb.offset - sb.bits = xb.bits - xb = x.next(xb) - - case xb == &none || (yb != &none && yb.offset < xb.offset): - sb.offset = yb.offset - sb.bits = yb.bits - yb = y.next(yb) - - default: - sb.offset = xb.offset - for i := range xb.bits { - sb.bits[i] = xb.bits[i] | yb.bits[i] - } - xb = x.next(xb) - yb = y.next(yb) - } - sb = s.next(sb) - } - - s.discardTail(sb) -} - -// DifferenceWith sets s to the difference s ∖ x. -func (s *Sparse) DifferenceWith(x *Sparse) { - if s == x { - s.Clear() - return - } - - xb := x.first() - sb := s.first() - for xb != &none && sb != &none { - switch { - case xb.offset > sb.offset: - sb = s.next(sb) - - case xb.offset < sb.offset: - xb = x.next(xb) - - default: - var sum word - for i := range sb.bits { - r := sb.bits[i] & ^xb.bits[i] - sb.bits[i] = r - sum |= r - } - if sum == 0 { - sb = s.removeBlock(sb) - } else { - sb = s.next(sb) - } - xb = x.next(xb) - } - } -} - -// Difference sets s to the difference x ∖ y. -func (s *Sparse) Difference(x, y *Sparse) { - switch { - case x == y: - s.Clear() - return - case s == x: - s.DifferenceWith(y) - return - case s == y: - var y2 Sparse - y2.Copy(y) - s.Difference(x, &y2) - return - } - - xb := x.first() - yb := y.first() - sb := s.first() - for xb != &none && yb != &none { - if xb.offset > yb.offset { - // y has block, x has &none - yb = y.next(yb) - continue - } - - if sb == &none { - sb = s.insertBlockBefore(sb) - } - sb.offset = xb.offset - - switch { - case xb.offset < yb.offset: - // x has block, y has &none - sb.bits = xb.bits - - sb = s.next(sb) - - default: - // x and y have corresponding blocks - var sum word - for i := range sb.bits { - r := xb.bits[i] & ^yb.bits[i] - sb.bits[i] = r - sum |= r - } - if sum != 0 { - sb = s.next(sb) - } else { - // sb will be overwritten or removed - } - - yb = y.next(yb) - } - xb = x.next(xb) - } - - for xb != &none { - if sb == &none { - sb = s.insertBlockBefore(sb) - } - sb.offset = xb.offset - sb.bits = xb.bits - sb = s.next(sb) - - xb = x.next(xb) - } - - s.discardTail(sb) -} - -// SymmetricDifferenceWith sets s to the symmetric difference s ∆ x. -func (s *Sparse) SymmetricDifferenceWith(x *Sparse) { - if s == x { - s.Clear() - return - } - - sb := s.first() - xb := x.first() - for xb != &none && sb != &none { - switch { - case sb.offset < xb.offset: - sb = s.next(sb) - case xb.offset < sb.offset: - nb := s.insertBlockBefore(sb) - nb.offset = xb.offset - nb.bits = xb.bits - xb = x.next(xb) - default: - var sum word - for i := range sb.bits { - r := sb.bits[i] ^ xb.bits[i] - sb.bits[i] = r - sum |= r - } - if sum == 0 { - sb = s.removeBlock(sb) - } else { - sb = s.next(sb) - } - xb = x.next(xb) - } - } - - for xb != &none { // append the tail of x to s - sb = s.insertBlockBefore(sb) - sb.offset = xb.offset - sb.bits = xb.bits - sb = s.next(sb) - xb = x.next(xb) - } -} - -// SymmetricDifference sets s to the symmetric difference x ∆ y. -func (s *Sparse) SymmetricDifference(x, y *Sparse) { - switch { - case x == y: - s.Clear() - return - case s == x: - s.SymmetricDifferenceWith(y) - return - case s == y: - s.SymmetricDifferenceWith(x) - return - } - - sb := s.first() - xb := x.first() - yb := y.first() - for xb != &none && yb != &none { - if sb == &none { - sb = s.insertBlockBefore(sb) - } - switch { - case yb.offset < xb.offset: - sb.offset = yb.offset - sb.bits = yb.bits - sb = s.next(sb) - yb = y.next(yb) - case xb.offset < yb.offset: - sb.offset = xb.offset - sb.bits = xb.bits - sb = s.next(sb) - xb = x.next(xb) - default: - var sum word - for i := range sb.bits { - r := xb.bits[i] ^ yb.bits[i] - sb.bits[i] = r - sum |= r - } - if sum != 0 { - sb.offset = xb.offset - sb = s.next(sb) - } - xb = x.next(xb) - yb = y.next(yb) - } - } - - for xb != &none { // append the tail of x to s - if sb == &none { - sb = s.insertBlockBefore(sb) - } - sb.offset = xb.offset - sb.bits = xb.bits - sb = s.next(sb) - xb = x.next(xb) - } - - for yb != &none { // append the tail of y to s - if sb == &none { - sb = s.insertBlockBefore(sb) - } - sb.offset = yb.offset - sb.bits = yb.bits - sb = s.next(sb) - yb = y.next(yb) - } - - s.discardTail(sb) -} - -// SubsetOf reports whether s ∖ x = ∅. -func (s *Sparse) SubsetOf(x *Sparse) bool { - if s == x { - return true - } - - sb := s.first() - xb := x.first() - for sb != &none { - switch { - case xb == &none || xb.offset > sb.offset: - return false - case xb.offset < sb.offset: - xb = x.next(xb) - default: - for i := range sb.bits { - if sb.bits[i]&^xb.bits[i] != 0 { - return false - } - } - sb = s.next(sb) - xb = x.next(xb) - } - } - return true -} - -// Equals reports whether the sets s and t have the same elements. -func (s *Sparse) Equals(t *Sparse) bool { - if s == t { - return true - } - sb := s.first() - tb := t.first() - for { - switch { - case sb == &none && tb == &none: - return true - case sb == &none || tb == &none: - return false - case sb.offset != tb.offset: - return false - case sb.bits != tb.bits: - return false - } - - sb = s.next(sb) - tb = t.next(tb) - } -} - -// String returns a human-readable description of the set s. -func (s *Sparse) String() string { - var buf bytes.Buffer - buf.WriteByte('{') - s.forEach(func(x int) { - if buf.Len() > 1 { - buf.WriteByte(' ') - } - fmt.Fprintf(&buf, "%d", x) - }) - buf.WriteByte('}') - return buf.String() -} - -// BitString returns the set as a string of 1s and 0s denoting the sum -// of the i'th powers of 2, for each i in s. A radix point, always -// preceded by a digit, appears if the sum is non-integral. -// -// Examples: -// {}.BitString() = "0" -// {4,5}.BitString() = "110000" -// {-3}.BitString() = "0.001" -// {-3,0,4,5}.BitString() = "110001.001" -// -func (s *Sparse) BitString() string { - if s.IsEmpty() { - return "0" - } - - min, max := s.Min(), s.Max() - var nbytes int - if max > 0 { - nbytes = max - } - nbytes++ // zero bit - radix := nbytes - if min < 0 { - nbytes += len(".") - min - } - - b := make([]byte, nbytes) - for i := range b { - b[i] = '0' - } - if radix < nbytes { - b[radix] = '.' - } - s.forEach(func(x int) { - if x >= 0 { - x += len(".") - } - b[radix-x] = '1' - }) - return string(b) -} - -// GoString returns a string showing the internal representation of -// the set s. -// -func (s *Sparse) GoString() string { - var buf bytes.Buffer - for b := s.first(); b != &none; b = s.next(b) { - fmt.Fprintf(&buf, "block %p {offset=%d next=%p prev=%p", - b, b.offset, b.next, b.prev) - for _, w := range b.bits { - fmt.Fprintf(&buf, " 0%016x", w) - } - fmt.Fprintf(&buf, "}\n") - } - return buf.String() -} - -// AppendTo returns the result of appending the elements of s to slice -// in order. -func (s *Sparse) AppendTo(slice []int) []int { - s.forEach(func(x int) { - slice = append(slice, x) - }) - return slice -} - -// -- Testing/debugging ------------------------------------------------ - -// check returns an error if the representation invariants of s are violated. -func (s *Sparse) check() error { - s.init() - if s.root.empty() { - // An empty set must have only the root block with offset MaxInt. - if s.root.next != &s.root { - return fmt.Errorf("multiple blocks with empty root block") - } - if s.root.offset != MaxInt { - return fmt.Errorf("empty set has offset %d, should be MaxInt", s.root.offset) - } - return nil - } - for b := s.first(); ; b = s.next(b) { - if b.offset%bitsPerBlock != 0 { - return fmt.Errorf("bad offset modulo: %d", b.offset) - } - if b.empty() { - return fmt.Errorf("empty block") - } - if b.prev.next != b { - return fmt.Errorf("bad prev.next link") - } - if b.next.prev != b { - return fmt.Errorf("bad next.prev link") - } - if b.next == &s.root { - break - } - if b.offset >= b.next.offset { - return fmt.Errorf("bad offset order: b.offset=%d, b.next.offset=%d", - b.offset, b.next.offset) - } - } - return nil -} diff --git a/vendor/golang.org/x/tools/container/intsets/util.go b/vendor/golang.org/x/tools/container/intsets/util.go deleted file mode 100644 index dd1db86b1..000000000 --- a/vendor/golang.org/x/tools/container/intsets/util.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package intsets - -// From Hacker's Delight, fig 5.2. -func popcountHD(x uint32) int { - x -= (x >> 1) & 0x55555555 - x = (x & 0x33333333) + ((x >> 2) & 0x33333333) - x = (x + (x >> 4)) & 0x0f0f0f0f - x = x + (x >> 8) - x = x + (x >> 16) - return int(x & 0x0000003f) -} - -var a [1 << 8]byte - -func init() { - for i := range a { - var n byte - for x := i; x != 0; x >>= 1 { - if x&1 != 0 { - n++ - } - } - a[i] = n - } -} - -func popcountTable(x word) int { - return int(a[byte(x>>(0*8))] + - a[byte(x>>(1*8))] + - a[byte(x>>(2*8))] + - a[byte(x>>(3*8))] + - a[byte(x>>(4*8))] + - a[byte(x>>(5*8))] + - a[byte(x>>(6*8))] + - a[byte(x>>(7*8))]) -} - -// nlz returns the number of leading zeros of x. -// From Hacker's Delight, fig 5.11. -func nlz(x word) int { - x |= (x >> 1) - x |= (x >> 2) - x |= (x >> 4) - x |= (x >> 8) - x |= (x >> 16) - x |= (x >> 32) - return popcount(^x) -} - -// ntz returns the number of trailing zeros of x. -// From Hacker's Delight, fig 5.13. -func ntz(x word) int { - if x == 0 { - return bitsPerWord - } - n := 1 - if bitsPerWord == 64 { - if (x & 0xffffffff) == 0 { - n = n + 32 - x = x >> 32 - } - } - if (x & 0x0000ffff) == 0 { - n = n + 16 - x = x >> 16 - } - if (x & 0x000000ff) == 0 { - n = n + 8 - x = x >> 8 - } - if (x & 0x0000000f) == 0 { - n = n + 4 - x = x >> 4 - } - if (x & 0x00000003) == 0 { - n = n + 2 - x = x >> 2 - } - return n - int(x&1) -} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/LICENSE b/vendor/gopkg.in/igm/sockjs-go.v2/LICENSE new file mode 100644 index 000000000..9bdc05fa3 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2012-2014, sockjs-go authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/doc.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/doc.go new file mode 100644 index 000000000..a1a46e7dd --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/doc.go @@ -0,0 +1,5 @@ +/* +Package sockjs is a server side implementation of sockjs protocol. +*/ + +package sockjs diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/eventsource.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/eventsource.go new file mode 100644 index 000000000..41c460fa8 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/eventsource.go @@ -0,0 +1,32 @@ +package sockjs + +import ( + "fmt" + "io" + "net/http" +) + +func (h *handler) eventSource(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("content-type", "text/event-stream; charset=UTF-8") + fmt.Fprintf(rw, "\r\n") + rw.(http.Flusher).Flush() + + recv := newHTTPReceiver(rw, h.options.ResponseLimit, new(eventSourceFrameWriter)) + sess, _ := h.sessionByRequest(req) + if err := sess.attachReceiver(recv); err != nil { + recv.sendFrame(cFrame) + recv.close() + return + } + + select { + case <-recv.doneNotify(): + case <-recv.interruptedNotify(): + } +} + +type eventSourceFrameWriter struct{} + +func (*eventSourceFrameWriter) write(w io.Writer, frame string) (int, error) { + return fmt.Fprintf(w, "data: %s\r\n\r\n", frame) +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/frame.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/frame.go new file mode 100644 index 000000000..fb32e3238 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/frame.go @@ -0,0 +1,11 @@ +package sockjs + +import ( + "encoding/json" + "fmt" +) + +func closeFrame(status uint32, reason string) string { + bytes, _ := json.Marshal([]interface{}{status, reason}) + return fmt.Sprintf("c%s", string(bytes)) +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/handler.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/handler.go new file mode 100644 index 000000000..89da5d216 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/handler.go @@ -0,0 +1,133 @@ +package sockjs + +import ( + "errors" + "net/http" + "net/url" + "regexp" + "strings" + "sync" +) + +var ( + prefixRegexp = make(map[string]*regexp.Regexp) + prefixRegexpMu sync.Mutex // protects prefixRegexp +) + +type handler struct { + prefix string + options Options + handlerFunc func(Session) + mappings []*mapping + + sessionsMux sync.Mutex + sessions map[string]*session +} + +// NewHandler creates new HTTP handler that conforms to the basic net/http.Handler interface. +// It takes path prefix, options and sockjs handler function as parameters +func NewHandler(prefix string, opts Options, handleFunc func(Session)) http.Handler { + return newHandler(prefix, opts, handleFunc) +} + +func newHandler(prefix string, opts Options, handlerFunc func(Session)) *handler { + h := &handler{ + prefix: prefix, + options: opts, + handlerFunc: handlerFunc, + sessions: make(map[string]*session), + } + + sessionPrefix := prefix + "/[^/.]+/[^/.]+" + h.mappings = []*mapping{ + newMapping("GET", prefix+"[/]?$", welcomeHandler), + newMapping("OPTIONS", prefix+"/info$", opts.cookie, xhrCors, cacheFor, opts.info), + newMapping("GET", prefix+"/info$", xhrCors, noCache, opts.info), + // XHR + newMapping("POST", sessionPrefix+"/xhr_send$", opts.cookie, xhrCors, noCache, h.xhrSend), + newMapping("OPTIONS", sessionPrefix+"/xhr_send$", opts.cookie, xhrCors, cacheFor, xhrOptions), + newMapping("POST", sessionPrefix+"/xhr$", opts.cookie, xhrCors, noCache, h.xhrPoll), + newMapping("OPTIONS", sessionPrefix+"/xhr$", opts.cookie, xhrCors, cacheFor, xhrOptions), + newMapping("POST", sessionPrefix+"/xhr_streaming$", opts.cookie, xhrCors, noCache, h.xhrStreaming), + newMapping("OPTIONS", sessionPrefix+"/xhr_streaming$", opts.cookie, xhrCors, cacheFor, xhrOptions), + // EventStream + newMapping("GET", sessionPrefix+"/eventsource$", opts.cookie, xhrCors, noCache, h.eventSource), + // Htmlfile + newMapping("GET", sessionPrefix+"/htmlfile$", opts.cookie, xhrCors, noCache, h.htmlFile), + // JsonP + newMapping("GET", sessionPrefix+"/jsonp$", opts.cookie, xhrCors, noCache, h.jsonp), + newMapping("OPTIONS", sessionPrefix+"/jsonp$", opts.cookie, xhrCors, cacheFor, xhrOptions), + newMapping("POST", sessionPrefix+"/jsonp_send$", opts.cookie, xhrCors, noCache, h.jsonpSend), + // IFrame + newMapping("GET", prefix+"/iframe[0-9-.a-z_]*.html$", cacheFor, h.iframe), + } + if opts.Websocket { + h.mappings = append(h.mappings, newMapping("GET", sessionPrefix+"/websocket$", h.sockjsWebsocket)) + } + return h +} + +func (h *handler) Prefix() string { return h.prefix } + +func (h *handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // iterate over mappings + allowedMethods := []string{} + for _, mapping := range h.mappings { + if match, method := mapping.matches(req); match == fullMatch { + for _, hf := range mapping.chain { + hf(rw, req) + } + return + } else if match == pathMatch { + allowedMethods = append(allowedMethods, method) + } + } + if len(allowedMethods) > 0 { + rw.Header().Set("allow", strings.Join(allowedMethods, ", ")) + rw.Header().Set("Content-Type", "") + rw.WriteHeader(http.StatusMethodNotAllowed) + return + } + http.NotFound(rw, req) +} + +func (h *handler) parseSessionID(url *url.URL) (string, error) { + // cache compiled regexp objects for most used prefixes + prefixRegexpMu.Lock() + session, ok := prefixRegexp[h.prefix] + if !ok { + session = regexp.MustCompile(h.prefix + "/(?P[^/.]+)/(?P[^/.]+)/.*") + prefixRegexp[h.prefix] = session + } + prefixRegexpMu.Unlock() + + matches := session.FindStringSubmatch(url.Path) + if len(matches) == 3 { + return matches[2], nil + } + return "", errors.New("unable to parse URL for session") +} + +func (h *handler) sessionByRequest(req *http.Request) (*session, error) { + h.sessionsMux.Lock() + defer h.sessionsMux.Unlock() + sessionID, err := h.parseSessionID(req.URL) + if err != nil { + return nil, err + } + sess, exists := h.sessions[sessionID] + if !exists { + sess = newSession(sessionID, h.options.DisconnectDelay, h.options.HeartbeatDelay) + h.sessions[sessionID] = sess + if h.handlerFunc != nil { + go h.handlerFunc(sess) + } + go func() { + <-sess.closedNotify() + h.sessionsMux.Lock() + delete(h.sessions, sessionID) + h.sessionsMux.Unlock() + }() + } + return sess, nil +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/htmlfile.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/htmlfile.go new file mode 100644 index 000000000..c7510ee76 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/htmlfile.go @@ -0,0 +1,58 @@ +package sockjs + +import ( + "fmt" + "io" + "net/http" + "strings" +) + +var iframeTemplate = ` + + + +

Don't panic!

+ +` + +func init() { + iframeTemplate += strings.Repeat(" ", 1024-len(iframeTemplate)+14) + iframeTemplate += "\r\n\r\n" +} + +func (h *handler) htmlFile(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("content-type", "text/html; charset=UTF-8") + + req.ParseForm() + callback := req.Form.Get("c") + if callback == "" { + http.Error(rw, `"callback" parameter required`, http.StatusInternalServerError) + return + } + rw.WriteHeader(http.StatusOK) + fmt.Fprintf(rw, iframeTemplate, callback) + rw.(http.Flusher).Flush() + sess, _ := h.sessionByRequest(req) + recv := newHTTPReceiver(rw, h.options.ResponseLimit, new(htmlfileFrameWriter)) + if err := sess.attachReceiver(recv); err != nil { + recv.sendFrame(cFrame) + recv.close() + return + } + select { + case <-recv.doneNotify(): + case <-recv.interruptedNotify(): + } +} + +type htmlfileFrameWriter struct{} + +func (*htmlfileFrameWriter) write(w io.Writer, frame string) (int, error) { + return fmt.Fprintf(w, "\r\n", quote(frame)) +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/httpreceiver.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/httpreceiver.go new file mode 100644 index 000000000..290288319 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/httpreceiver.go @@ -0,0 +1,105 @@ +package sockjs + +import ( + "fmt" + "io" + "net/http" + "strings" + "sync" +) + +type frameWriter interface { + write(writer io.Writer, frame string) (int, error) +} + +type httpReceiverState int + +const ( + stateHTTPReceiverActive httpReceiverState = iota + stateHTTPReceiverClosed +) + +type httpReceiver struct { + sync.Mutex + state httpReceiverState + + frameWriter frameWriter + rw http.ResponseWriter + maxResponseSize uint32 + currentResponseSize uint32 + doneCh chan struct{} + interruptCh chan struct{} +} + +func newHTTPReceiver(rw http.ResponseWriter, maxResponse uint32, frameWriter frameWriter) *httpReceiver { + recv := &httpReceiver{ + rw: rw, + frameWriter: frameWriter, + maxResponseSize: maxResponse, + doneCh: make(chan struct{}), + interruptCh: make(chan struct{}), + } + if closeNotifier, ok := rw.(http.CloseNotifier); ok { + // if supported check for close notifications from http.RW + closeNotifyCh := closeNotifier.CloseNotify() + go func() { + select { + case <-closeNotifyCh: + recv.Lock() + defer recv.Unlock() + if recv.state < stateHTTPReceiverClosed { + recv.state = stateHTTPReceiverClosed + close(recv.interruptCh) + } + case <-recv.doneCh: + // ok, no action needed here, receiver closed in correct way + // just finish the routine + } + }() + } + return recv +} + +func (recv *httpReceiver) sendBulk(messages ...string) { + if len(messages) > 0 { + recv.sendFrame(fmt.Sprintf("a[%s]", + strings.Join( + transform(messages, quote), + ",", + ), + )) + } +} + +func (recv *httpReceiver) sendFrame(value string) { + recv.Lock() + defer recv.Unlock() + + if recv.state == stateHTTPReceiverActive { + // TODO(igm) check err, possibly act as if interrupted + n, _ := recv.frameWriter.write(recv.rw, value) + recv.currentResponseSize += uint32(n) + if recv.currentResponseSize >= recv.maxResponseSize { + recv.state = stateHTTPReceiverClosed + close(recv.doneCh) + } else { + recv.rw.(http.Flusher).Flush() + } + } +} + +func (recv *httpReceiver) doneNotify() <-chan struct{} { return recv.doneCh } +func (recv *httpReceiver) interruptedNotify() <-chan struct{} { return recv.interruptCh } +func (recv *httpReceiver) close() { + recv.Lock() + defer recv.Unlock() + if recv.state < stateHTTPReceiverClosed { + recv.state = stateHTTPReceiverClosed + close(recv.doneCh) + } +} +func (recv *httpReceiver) canSend() bool { + recv.Lock() + defer recv.Unlock() + return recv.state != stateHTTPReceiverClosed +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/iframe.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/iframe.go new file mode 100644 index 000000000..0975334be --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/iframe.go @@ -0,0 +1,42 @@ +package sockjs + +import ( + "crypto/md5" + "fmt" + "net/http" + "text/template" +) + +var tmpl = template.Must(template.New("iframe").Parse(iframeBody)) + +func (h *handler) iframe(rw http.ResponseWriter, req *http.Request) { + etagReq := req.Header.Get("If-None-Match") + hash := md5.New() + hash.Write([]byte(iframeBody)) + etag := fmt.Sprintf("%x", hash.Sum(nil)) + if etag == etagReq { + rw.WriteHeader(http.StatusNotModified) + return + } + + rw.Header().Set("Content-Type", "text/html; charset=UTF-8") + rw.Header().Add("ETag", etag) + tmpl.Execute(rw, h.options.SockJSURL) +} + +var iframeBody = ` + + + + + + + + +

Don't panic!

+

This is a SockJS hidden iframe. It's used for cross domain magic.

+ +` diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/jsonp.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/jsonp.go new file mode 100644 index 000000000..c02803cf3 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/jsonp.go @@ -0,0 +1,77 @@ +package sockjs + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" +) + +func (h *handler) jsonp(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("content-type", "application/javascript; charset=UTF-8") + + req.ParseForm() + callback := req.Form.Get("c") + if callback == "" { + http.Error(rw, `"callback" parameter required`, http.StatusInternalServerError) + return + } + rw.WriteHeader(http.StatusOK) + rw.(http.Flusher).Flush() + + sess, _ := h.sessionByRequest(req) + recv := newHTTPReceiver(rw, 1, &jsonpFrameWriter{callback}) + if err := sess.attachReceiver(recv); err != nil { + recv.sendFrame(cFrame) + recv.close() + return + } + select { + case <-recv.doneNotify(): + case <-recv.interruptedNotify(): + } +} + +func (h *handler) jsonpSend(rw http.ResponseWriter, req *http.Request) { + req.ParseForm() + var data io.Reader + data = req.Body + + formReader := strings.NewReader(req.PostFormValue("d")) + if formReader.Len() != 0 { + data = formReader + } + if data == nil { + http.Error(rw, "Payload expected.", http.StatusInternalServerError) + return + } + var messages []string + err := json.NewDecoder(data).Decode(&messages) + if err == io.EOF { + http.Error(rw, "Payload expected.", http.StatusInternalServerError) + return + } + if err != nil { + http.Error(rw, "Broken JSON encoding.", http.StatusInternalServerError) + return + } + sessionID, _ := h.parseSessionID(req.URL) + h.sessionsMux.Lock() + defer h.sessionsMux.Unlock() + if sess, ok := h.sessions[sessionID]; !ok { + http.NotFound(rw, req) + } else { + _ = sess.accept(messages...) // TODO(igm) reponse with http.StatusInternalServerError in case of err? + rw.Header().Set("content-type", "text/plain; charset=UTF-8") + rw.Write([]byte("ok")) + } +} + +type jsonpFrameWriter struct { + callback string +} + +func (j *jsonpFrameWriter) write(w io.Writer, frame string) (int, error) { + return fmt.Fprintf(w, "%s(%s);\r\n", j.callback, quote(frame)) +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/mapping.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/mapping.go new file mode 100644 index 000000000..9b1cbdf76 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/mapping.go @@ -0,0 +1,36 @@ +package sockjs + +import ( + "net/http" + "regexp" +) + +type mapping struct { + method string + path *regexp.Regexp + chain []http.HandlerFunc +} + +func newMapping(method string, re string, handlers ...http.HandlerFunc) *mapping { + return &mapping{method, regexp.MustCompile(re), handlers} +} + +type matchType uint32 + +const ( + fullMatch matchType = iota + pathMatch + noMatch +) + +// matches checks if given req.URL is a match with a mapping. Match can be either full, partial (http method mismatch) or no match. +func (m *mapping) matches(req *http.Request) (match matchType, method string) { + if !m.path.MatchString(req.URL.Path) { + match, method = noMatch, "" + } else if m.method != req.Method { + match, method = pathMatch, m.method + } else { + match, method = fullMatch, m.method + } + return +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/options.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/options.go new file mode 100644 index 000000000..a41c4565b --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/options.go @@ -0,0 +1,114 @@ +package sockjs + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/http" + "sync" + "time" +) + +var ( + entropy *rand.Rand + entropyMutex sync.Mutex +) + +func init() { + entropy = rand.New(rand.NewSource(time.Now().UnixNano())) +} + +// Options type is used for defining various sockjs options +type Options struct { + // Transports which don't support cross-domain communication natively ('eventsource' to name one) use an iframe trick. + // A simple page is served from the SockJS server (using its foreign domain) and is placed in an invisible iframe. + // Code run from this iframe doesn't need to worry about cross-domain issues, as it's being run from domain local to the SockJS server. + // This iframe also does need to load SockJS javascript client library, and this option lets you specify its url (if you're unsure, + // point it to the latest minified SockJS client release, this is the default). You must explicitly specify this url on the server + // side for security reasons - we don't want the possibility of running any foreign javascript within the SockJS domain (aka cross site scripting attack). + // Also, sockjs javascript library is probably already cached by the browser - it makes sense to reuse the sockjs url you're using in normally. + SockJSURL string + // Most streaming transports save responses on the client side and don't free memory used by delivered messages. + // Such transports need to be garbage-collected once in a while. `response_limit` sets a minimum number of bytes that can be send + // over a single http streaming request before it will be closed. After that client needs to open new request. + // Setting this value to one effectively disables streaming and will make streaming transports to behave like polling transports. + // The default value is 128K. + ResponseLimit uint32 + // Some load balancers don't support websockets. This option can be used to disable websockets support by the server. By default websockets are enabled. + Websocket bool + // In order to keep proxies and load balancers from closing long running http requests we need to pretend that the connection is active + // and send a heartbeat packet once in a while. This setting controls how often this is done. + // By default a heartbeat packet is sent every 25 seconds. + HeartbeatDelay time.Duration + // The server closes a session when a client receiving connection have not been seen for a while. + // This delay is configured by this setting. + // By default the session is closed when a receiving connection wasn't seen for 5 seconds. + DisconnectDelay time.Duration + // Some hosting providers enable sticky sessions only to requests that have JSessionID cookie set. + // This setting controls if the server should set this cookie to a dummy value. + // By default setting JSessionID cookie is disabled. More sophisticated behaviour can be achieved by supplying a function. + JSessionID func(http.ResponseWriter, *http.Request) +} + +// DefaultOptions is a convenient set of options to be used for sockjs +var DefaultOptions = Options{ + Websocket: true, + JSessionID: nil, + SockJSURL: "http://cdn.sockjs.org/sockjs-0.3.min.js", + HeartbeatDelay: 25 * time.Second, + DisconnectDelay: 5 * time.Second, + ResponseLimit: 128 * 1024, +} + +type info struct { + Websocket bool `json:"websocket"` + CookieNeeded bool `json:"cookie_needed"` + Origins []string `json:"origins"` + Entropy int32 `json:"entropy"` +} + +func (options *Options) info(rw http.ResponseWriter, req *http.Request) { + switch req.Method { + case "GET": + rw.Header().Set("Content-Type", "application/json; charset=UTF-8") + json.NewEncoder(rw).Encode(info{ + Websocket: options.Websocket, + CookieNeeded: options.JSessionID != nil, + Origins: []string{"*:*"}, + Entropy: generateEntropy(), + }) + case "OPTIONS": + rw.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET") + rw.Header().Set("Access-Control-Max-Age", fmt.Sprintf("%d", 365*24*60*60)) + rw.WriteHeader(http.StatusNoContent) // 204 + default: + http.NotFound(rw, req) + } +} + +// DefaultJSessionID is a default behaviour function to be used in options for JSessionID if JSESSIONID is needed +func DefaultJSessionID(rw http.ResponseWriter, req *http.Request) { + cookie, err := req.Cookie("JSESSIONID") + if err == http.ErrNoCookie { + cookie = &http.Cookie{ + Name: "JSESSIONID", + Value: "dummy", + } + } + cookie.Path = "/" + header := rw.Header() + header.Add("Set-Cookie", cookie.String()) +} + +func (options *Options) cookie(rw http.ResponseWriter, req *http.Request) { + if options.JSessionID != nil { // cookie is needed + options.JSessionID(rw, req) + } +} + +func generateEntropy() int32 { + entropyMutex.Lock() + entropy := entropy.Int31() + entropyMutex.Unlock() + return entropy +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/session.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/session.go new file mode 100644 index 000000000..0957cfb28 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/session.go @@ -0,0 +1,219 @@ +package sockjs + +import ( + "encoding/gob" + "errors" + "io" + "sync" + "time" +) + +type sessionState uint32 + +const ( + // brand new session, need to send "h" to receiver + sessionOpening sessionState = iota + // active session + sessionActive + // session being closed, sending "closeFrame" to receivers + sessionClosing + // closed session, no activity at all, should be removed from handler completely and not reused + sessionClosed +) + +var ( + // ErrSessionNotOpen error is used to denote session not in open state. + // Recv() and Send() operations are not suppored if session is closed. + ErrSessionNotOpen = errors.New("sockjs: session not in open state") + errSessionReceiverAttached = errors.New("sockjs: another receiver already attached") +) + +type session struct { + sync.Mutex + id string + state sessionState + // protocol dependent receiver (xhr, eventsource, ...) + recv receiver + // messages to be sent to client + sendBuffer []string + // messages received from client to be consumed by application + // receivedBuffer chan string + msgReader *io.PipeReader + msgWriter *io.PipeWriter + msgEncoder *gob.Encoder + msgDecoder *gob.Decoder + + // closeFrame to send after session is closed + closeFrame string + + // internal timer used to handle session expiration if no receiver is attached, or heartbeats if recevier is attached + sessionTimeoutInterval time.Duration + heartbeatInterval time.Duration + timer *time.Timer + // once the session timeouts this channel also closes + closeCh chan struct{} +} + +type receiver interface { + // sendBulk send multiple data messages in frame frame in format: a["msg 1", "msg 2", ....] + sendBulk(...string) + // sendFrame sends given frame over the wire (with possible chunking depending on receiver) + sendFrame(string) + // close closes the receiver in a "done" way (idempotent) + close() + canSend() bool + // done notification channel gets closed whenever receiver ends + doneNotify() <-chan struct{} + // interrupted channel gets closed whenever receiver is interrupted (i.e. http connection drops,...) + interruptedNotify() <-chan struct{} +} + +// Session is a central component that handles receiving and sending frames. It maintains internal state +func newSession(sessionID string, sessionTimeoutInterval, heartbeatInterval time.Duration) *session { + r, w := io.Pipe() + s := &session{ + id: sessionID, + msgReader: r, + msgWriter: w, + msgEncoder: gob.NewEncoder(w), + msgDecoder: gob.NewDecoder(r), + sessionTimeoutInterval: sessionTimeoutInterval, + heartbeatInterval: heartbeatInterval, + closeCh: make(chan struct{})} + s.Lock() // "go test -race" complains if ommited, not sure why as no race can happen here + s.timer = time.AfterFunc(sessionTimeoutInterval, s.close) + s.Unlock() + return s +} + +func (s *session) sendMessage(msg string) error { + s.Lock() + defer s.Unlock() + if s.state > sessionActive { + return ErrSessionNotOpen + } + s.sendBuffer = append(s.sendBuffer, msg) + if s.recv != nil && s.recv.canSend() { + s.recv.sendBulk(s.sendBuffer...) + s.sendBuffer = nil + } + return nil +} + +func (s *session) attachReceiver(recv receiver) error { + s.Lock() + defer s.Unlock() + if s.recv != nil { + return errSessionReceiverAttached + } + s.recv = recv + go func(r receiver) { + select { + case <-r.doneNotify(): + s.detachReceiver() + case <-r.interruptedNotify(): + s.detachReceiver() + s.close() + } + }(recv) + + if s.state == sessionClosing { + s.recv.sendFrame(s.closeFrame) + s.recv.close() + return nil + } + if s.state == sessionOpening { + s.recv.sendFrame("o") + s.state = sessionActive + } + s.recv.sendBulk(s.sendBuffer...) + s.sendBuffer = nil + s.timer.Stop() + if s.heartbeatInterval > 0 { + s.timer = time.AfterFunc(s.heartbeatInterval, s.heartbeat) + } + return nil +} + +func (s *session) detachReceiver() { + s.Lock() + defer s.Unlock() + s.timer.Stop() + s.timer = time.AfterFunc(s.sessionTimeoutInterval, s.close) + s.recv = nil +} + +func (s *session) heartbeat() { + s.Lock() + defer s.Unlock() + if s.recv != nil { // timer could have fired between Lock and timer.Stop in detachReceiver + s.recv.sendFrame("h") + s.timer = time.AfterFunc(s.heartbeatInterval, s.heartbeat) + } +} + +func (s *session) accept(messages ...string) error { + for _, msg := range messages { + if err := s.msgEncoder.Encode(msg); err != nil { + return err + } + } + return nil +} + +// idempotent operation +func (s *session) closing() { + s.Lock() + defer s.Unlock() + if s.state < sessionClosing { + s.msgReader.Close() + s.msgWriter.Close() + s.state = sessionClosing + if s.recv != nil { + s.recv.sendFrame(s.closeFrame) + s.recv.close() + } + } +} + +// idempotent operation +func (s *session) close() { + s.closing() + s.Lock() + defer s.Unlock() + if s.state < sessionClosed { + s.state = sessionClosed + s.timer.Stop() + close(s.closeCh) + } +} + +func (s *session) closedNotify() <-chan struct{} { return s.closeCh } + +// Conn interface implementation +func (s *session) Close(status uint32, reason string) error { + s.Lock() + if s.state < sessionClosing { + s.closeFrame = closeFrame(status, reason) + s.Unlock() + s.closing() + return nil + } + s.Unlock() + return ErrSessionNotOpen +} + +func (s *session) Recv() (string, error) { + var msg string + err := s.msgDecoder.Decode(&msg) + if err == io.ErrClosedPipe { + err = ErrSessionNotOpen + } + return msg, err +} + +func (s *session) Send(msg string) error { + return s.sendMessage(msg) +} + +func (s *session) ID() string { return s.id } diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/sockjs.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/sockjs.go new file mode 100644 index 000000000..b5523b067 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/sockjs.go @@ -0,0 +1,13 @@ +package sockjs + +// Session represents a connection between server and client. +type Session interface { + // Id returns a session id + ID() string + // Recv reads one text frame from session + Recv() (string, error) + // Send sends one text frame to session + Send(string) error + // Close closes the session with provided code and reason. + Close(status uint32, reason string) error +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/utils.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/utils.go new file mode 100644 index 000000000..4f61139c9 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/utils.go @@ -0,0 +1,16 @@ +package sockjs + +import "encoding/json" + +func quote(in string) string { + quoted, _ := json.Marshal(in) + return string(quoted) +} + +func transform(values []string, transformFn func(string) string) []string { + ret := make([]string, len(values)) + for i, msg := range values { + ret[i] = transformFn(msg) + } + return ret +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/web.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/web.go new file mode 100644 index 000000000..2fc20edee --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/web.go @@ -0,0 +1,47 @@ +package sockjs + +import ( + "fmt" + "net/http" + "time" +) + +func xhrCors(rw http.ResponseWriter, req *http.Request) { + header := rw.Header() + origin := req.Header.Get("origin") + if origin == "" || origin == "null" { + origin = "*" + } + header.Set("Access-Control-Allow-Origin", origin) + + if allowHeaders := req.Header.Get("Access-Control-Request-Headers"); allowHeaders != "" && allowHeaders != "null" { + header.Add("Access-Control-Allow-Headers", allowHeaders) + } + header.Add("Access-Control-Allow-Credentials", "true") +} + +func xhrOptions(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("Access-Control-Allow-Methods", "OPTIONS, POST") + rw.WriteHeader(http.StatusNoContent) // 204 +} + +func cacheFor(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", 365*24*60*60)) + rw.Header().Set("Expires", time.Now().AddDate(1, 0, 0).Format(time.RFC1123)) + rw.Header().Set("Access-Control-Max-Age", fmt.Sprintf("%d", 365*24*60*60)) +} + +func noCache(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate, max-age=0") +} + +func welcomeHandler(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("content-type", "text/plain;charset=UTF-8") + fmt.Fprintf(rw, "Welcome to SockJS!\n") +} + +func httpError(w http.ResponseWriter, error string, code int) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(code) + fmt.Fprintf(w, error) +} diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/websocket.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/websocket.go new file mode 100644 index 000000000..0321a48b6 --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/websocket.go @@ -0,0 +1,97 @@ +package sockjs + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/websocket" +) + +// WebSocketReadBufSize is a parameter that is used for WebSocket Upgrader. +// https://github.com/gorilla/websocket/blob/master/server.go#L230 +var WebSocketReadBufSize = 4096 + +// WebSocketWriteBufSize is a parameter that is used for WebSocket Upgrader +// https://github.com/gorilla/websocket/blob/master/server.go#L230 +var WebSocketWriteBufSize = 4096 + +func (h *handler) sockjsWebsocket(rw http.ResponseWriter, req *http.Request) { + conn, err := websocket.Upgrade(rw, req, nil, WebSocketReadBufSize, WebSocketWriteBufSize) + if _, ok := err.(websocket.HandshakeError); ok { + http.Error(rw, `Can "Upgrade" only to "WebSocket".`, http.StatusBadRequest) + return + } else if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + return + } + sessID, _ := h.parseSessionID(req.URL) + sess := newSession(sessID, h.options.DisconnectDelay, h.options.HeartbeatDelay) + if h.handlerFunc != nil { + go h.handlerFunc(sess) + } + + receiver := newWsReceiver(conn) + sess.attachReceiver(receiver) + readCloseCh := make(chan struct{}) + go func() { + var d []string + for { + err := conn.ReadJSON(&d) + if err != nil { + close(readCloseCh) + return + } + sess.accept(d...) + } + }() + + select { + case <-readCloseCh: + case <-receiver.doneNotify(): + } + sess.close() + conn.Close() +} + +type wsReceiver struct { + conn *websocket.Conn + closeCh chan struct{} +} + +func newWsReceiver(conn *websocket.Conn) *wsReceiver { + return &wsReceiver{ + conn: conn, + closeCh: make(chan struct{}), + } +} + +func (w *wsReceiver) sendBulk(messages ...string) { + if len(messages) > 0 { + w.sendFrame(fmt.Sprintf("a[%s]", strings.Join(transform(messages, quote), ","))) + } +} + +func (w *wsReceiver) sendFrame(frame string) { + if err := w.conn.WriteMessage(websocket.TextMessage, []byte(frame)); err != nil { + w.close() + } +} + +func (w *wsReceiver) close() { + select { + case <-w.closeCh: // already closed + default: + close(w.closeCh) + } +} +func (w *wsReceiver) canSend() bool { + select { + case <-w.closeCh: // already closed + return false + default: + return true + } +} +func (w *wsReceiver) doneNotify() <-chan struct{} { return w.closeCh } +func (w *wsReceiver) interruptedNotify() <-chan struct{} { return nil } diff --git a/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/xhr.go b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/xhr.go new file mode 100644 index 000000000..986fe20ba --- /dev/null +++ b/vendor/gopkg.in/igm/sockjs-go.v2/sockjs/xhr.go @@ -0,0 +1,88 @@ +package sockjs + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" +) + +var ( + cFrame = closeFrame(2010, "Another connection still open") + xhrStreamingPrelude = strings.Repeat("h", 2048) +) + +func (h *handler) xhrSend(rw http.ResponseWriter, req *http.Request) { + if req.Body == nil { + httpError(rw, "Payload expected.", http.StatusInternalServerError) + return + } + var messages []string + err := json.NewDecoder(req.Body).Decode(&messages) + if err == io.EOF { + httpError(rw, "Payload expected.", http.StatusInternalServerError) + return + } + if _, ok := err.(*json.SyntaxError); ok || err == io.ErrUnexpectedEOF { + httpError(rw, "Broken JSON encoding.", http.StatusInternalServerError) + return + } + sessionID, err := h.parseSessionID(req.URL) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + h.sessionsMux.Lock() + defer h.sessionsMux.Unlock() + if sess, ok := h.sessions[sessionID]; !ok { + http.NotFound(rw, req) + } else { + _ = sess.accept(messages...) // TODO(igm) reponse with SISE in case of err? + rw.Header().Set("content-type", "text/plain; charset=UTF-8") // Ignored by net/http (but protocol test complains), see https://code.google.com/p/go/source/detail?r=902dc062bff8 + rw.WriteHeader(http.StatusNoContent) + } +} + +type xhrFrameWriter struct{} + +func (*xhrFrameWriter) write(w io.Writer, frame string) (int, error) { + return fmt.Fprintf(w, "%s\n", frame) +} + +func (h *handler) xhrPoll(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("content-type", "application/javascript; charset=UTF-8") + sess, _ := h.sessionByRequest(req) // TODO(igm) add err handling, although err should not happen as handler should not pass req in that case + receiver := newHTTPReceiver(rw, 1, new(xhrFrameWriter)) + if err := sess.attachReceiver(receiver); err != nil { + receiver.sendFrame(cFrame) + receiver.close() + return + } + + select { + case <-receiver.doneNotify(): + case <-receiver.interruptedNotify(): + } +} + +func (h *handler) xhrStreaming(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("content-type", "application/javascript; charset=UTF-8") + fmt.Fprintf(rw, "%s\n", xhrStreamingPrelude) + rw.(http.Flusher).Flush() + + sess, _ := h.sessionByRequest(req) + receiver := newHTTPReceiver(rw, h.options.ResponseLimit, new(xhrFrameWriter)) + + if err := sess.attachReceiver(receiver); err != nil { + receiver.sendFrame(cFrame) + receiver.close() + return + } + + select { + case <-receiver.doneNotify(): + case <-receiver.interruptedNotify(): + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go new file mode 100644 index 000000000..5893df5bd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package httpstream adds multiplexed streaming support to HTTP requests and +// responses via connection upgrades. +package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream" diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go new file mode 100644 index 000000000..50d9a366f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package httpstream + +import ( + "fmt" + "io" + "net/http" + "strings" + "time" +) + +const ( + HeaderConnection = "Connection" + HeaderUpgrade = "Upgrade" + HeaderProtocolVersion = "X-Stream-Protocol-Version" + HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions" +) + +// NewStreamHandler defines a function that is called when a new Stream is +// received. If no error is returned, the Stream is accepted; otherwise, +// the stream is rejected. After the reply frame has been sent, replySent is closed. +type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error + +// NoOpNewStreamHandler is a stream handler that accepts a new stream and +// performs no other logic. +func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil } + +// Dialer knows how to open a streaming connection to a server. +type Dialer interface { + + // Dial opens a streaming connection to a server using one of the protocols + // specified (in order of most preferred to least preferred). + Dial(protocols ...string) (Connection, string, error) +} + +// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade +// HTTP requests to support multiplexed bidirectional streams. After RoundTrip() +// is invoked, if the upgrade is successful, clients may retrieve the upgraded +// connection by calling UpgradeRoundTripper.Connection(). +type UpgradeRoundTripper interface { + http.RoundTripper + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (Connection, error) +} + +// ResponseUpgrader knows how to upgrade HTTP requests and responses to +// add streaming support to them. +type ResponseUpgrader interface { + // UpgradeResponse upgrades an HTTP response to one that supports multiplexed + // streams. newStreamHandler will be called asynchronously whenever the + // other end of the upgraded connection creates a new stream. + UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection +} + +// Connection represents an upgraded HTTP connection. +type Connection interface { + // CreateStream creates a new Stream with the supplied headers. + CreateStream(headers http.Header) (Stream, error) + // Close resets all streams and closes the connection. + Close() error + // CloseChan returns a channel that is closed when the underlying connection is closed. + CloseChan() <-chan bool + // SetIdleTimeout sets the amount of time the connection may remain idle before + // it is automatically closed. + SetIdleTimeout(timeout time.Duration) +} + +// Stream represents a bidirectional communications channel that is part of an +// upgraded connection. +type Stream interface { + io.ReadWriteCloser + // Reset closes both directions of the stream, indicating that neither client + // or server can use it any more. + Reset() error + // Headers returns the headers used to create the stream. + Headers() http.Header + // Identifier returns the stream's ID. + Identifier() uint32 +} + +// IsUpgradeRequest returns true if the given request is a connection upgrade request +func IsUpgradeRequest(req *http.Request) bool { + for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] { + if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) { + return true + } + } + return false +} + +func negotiateProtocol(clientProtocols, serverProtocols []string) string { + for i := range clientProtocols { + for j := range serverProtocols { + if clientProtocols[i] == serverProtocols[j] { + return clientProtocols[i] + } + } + } + return "" +} + +// Handshake performs a subprotocol negotiation. If the client did request a +// subprotocol, Handshake will select the first common value found in +// serverProtocols. If a match is found, Handshake adds a response header +// indicating the chosen subprotocol. If no match is found, HTTP forbidden is +// returned, along with a response header containing the list of protocols the +// server can accept. +func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) { + clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)] + if len(clientProtocols) == 0 { + // Kube 1.0 clients didn't support subprotocol negotiation. + // TODO require clientProtocols once Kube 1.0 is no longer supported + return "", nil + } + + if len(serverProtocols) == 0 { + // Kube 1.0 servers didn't support subprotocol negotiation. This is mainly for testing. + // TODO require serverProtocols once Kube 1.0 is no longer supported + return "", nil + } + + negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols) + if len(negotiatedProtocol) == 0 { + for i := range serverProtocols { + w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i]) + } + err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols) + http.Error(w, err.Error(), http.StatusForbidden) + return "", err + } + + w.Header().Add(HeaderProtocolVersion, negotiatedProtocol) + return negotiatedProtocol, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go new file mode 100644 index 000000000..9d222faa8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -0,0 +1,145 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net" + "net/http" + "sync" + "time" + + "github.com/docker/spdystream" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" +) + +// connection maintains state about a spdystream.Connection and its associated +// streams. +type connection struct { + conn *spdystream.Connection + streams []httpstream.Stream + streamLock sync.Mutex + newStreamHandler httpstream.NewStreamHandler +} + +// NewClientConnection creates a new SPDY client connection. +func NewClientConnection(conn net.Conn) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil +} + +// NewServerConnection creates a new SPDY server connection. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, newStreamHandler), nil +} + +// newConnection returns a new connection wrapping conn. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + c := &connection{conn: conn, newStreamHandler: newStreamHandler} + go conn.Serve(c.newSpdyStream) + return c +} + +// createStreamResponseTimeout indicates how long to wait for the other side to +// acknowledge the new stream before timing out. +const createStreamResponseTimeout = 30 * time.Second + +// Close first sends a reset for all of the connection's streams, and then +// closes the underlying spdystream.Connection. +func (c *connection) Close() error { + c.streamLock.Lock() + for _, s := range c.streams { + // calling Reset instead of Close ensures that all streams are fully torn down + s.Reset() + } + c.streams = make([]httpstream.Stream, 0) + c.streamLock.Unlock() + + // now that all streams are fully torn down, it's safe to call close on the underlying connection, + // which should be able to terminate immediately at this point, instead of waiting for any + // remaining graceful stream termination. + return c.conn.Close() +} + +// CreateStream creates a new stream with the specified headers and registers +// it with the connection. +func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { + stream, err := c.conn.CreateStream(headers, nil, false) + if err != nil { + return nil, err + } + if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil { + return nil, err + } + + c.registerStream(stream) + return stream, nil +} + +// registerStream adds the stream s to the connection's list of streams that +// it owns. +func (c *connection) registerStream(s httpstream.Stream) { + c.streamLock.Lock() + c.streams = append(c.streams, s) + c.streamLock.Unlock() +} + +// CloseChan returns a channel that, when closed, indicates that the underlying +// spdystream.Connection has been closed. +func (c *connection) CloseChan() <-chan bool { + return c.conn.CloseChan() +} + +// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve. +// It calls connection's newStreamHandler, giving it the opportunity to accept or reject +// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the +// stream is accepted and registered with the connection. +func (c *connection) newSpdyStream(stream *spdystream.Stream) { + replySent := make(chan struct{}) + err := c.newStreamHandler(stream, replySent) + rejectStream := (err != nil) + if rejectStream { + klog.Warningf("Stream rejected: %v", err) + stream.Reset() + return + } + + c.registerStream(stream) + stream.SendReply(http.Header{}, rejectStream) + close(replySent) +} + +// SetIdleTimeout sets the amount of time the connection may remain idle before +// it is automatically closed. +func (c *connection) SetIdleTimeout(timeout time.Duration) { + c.conn.SetIdleTimeout(timeout) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go new file mode 100644 index 000000000..2699597e7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -0,0 +1,335 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/third_party/forked/golang/netutil" +) + +// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports +// multiplexed streams. After RoundTrip() is invoked, Conn will be set +// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface. +type SpdyRoundTripper struct { + //tlsConfig holds the TLS configuration settings to use when connecting + //to the remote server. + tlsConfig *tls.Config + + /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper + must be safe for use by multiple concurrent goroutines. If this is absolutely + necessary, we could keep a map from http.Request to net.Conn. In practice, + a client will create an http.Client, set the transport to a new insteace of + SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue. + */ + // conn is the underlying network connection to the remote server. + conn net.Conn + + // Dialer is the dialer used to connect. Used if non-nil. + Dialer *net.Dialer + + // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment + // Used primarily for mocking the proxy discovery in tests. + proxier func(req *http.Request) (*url.URL, error) + + // followRedirects indicates if the round tripper should examine responses for redirects and + // follow them. + followRedirects bool + // requireSameHostRedirects restricts redirect following to only follow redirects to the same host + // as the original request. + requireSameHostRedirects bool +} + +var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} +var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{} +var _ utilnet.Dialer = &SpdyRoundTripper{} + +// NewRoundTripper creates a new SpdyRoundTripper that will use +// the specified tlsConfig. +func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper { + return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects) +} + +// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use +// the specified tlsConfig. This function is mostly meant for unit tests. +func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper { + return &SpdyRoundTripper{ + tlsConfig: tlsConfig, + followRedirects: followRedirects, + requireSameHostRedirects: requireSameHostRedirects, + } +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during +// proxying with a spdy roundtripper. +func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { + return s.tlsConfig +} + +// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. +func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { + conn, err := s.dial(req) + if err != nil { + return nil, err + } + + if err := req.Write(conn); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +// dial dials the host specified by req, using TLS if appropriate, optionally +// using a proxy server if one is configured via environment variables. +func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { + proxier := s.proxier + if proxier == nil { + proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + proxyURL, err := proxier(req) + if err != nil { + return nil, err + } + + if proxyURL == nil { + return s.dialWithoutProxy(req.Context(), req.URL) + } + + // ensure we use a canonical host with proxyReq + targetHost := netutil.CanonicalAddr(req.URL) + + // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support + proxyReq := http.Request{ + Method: "CONNECT", + URL: &url.URL{}, + Host: targetHost, + } + + if pa := s.proxyAuth(proxyURL); pa != "" { + proxyReq.Header = http.Header{} + proxyReq.Header.Set("Proxy-Authorization", pa) + } + + proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL) + if err != nil { + return nil, err + } + + proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil) + _, err = proxyClientConn.Do(&proxyReq) + if err != nil && err != httputil.ErrPersistEOF { + return nil, err + } + + rwc, _ := proxyClientConn.Hijack() + + if req.URL.Scheme != "https" { + return rwc, nil + } + + host, _, err := net.SplitHostPort(targetHost) + if err != nil { + return nil, err + } + + tlsConfig := s.tlsConfig + switch { + case tlsConfig == nil: + tlsConfig = &tls.Config{ServerName: host} + case len(tlsConfig.ServerName) == 0: + tlsConfig = tlsConfig.Clone() + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(rwc, tlsConfig) + + // need to manually call Handshake() so we can call VerifyHostname() below + if err := tlsConn.Handshake(); err != nil { + return nil, err + } + + // Return if we were configured to skip validation + if tlsConfig.InsecureSkipVerify { + return tlsConn, nil + } + + if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil { + return nil, err + } + + return tlsConn, nil +} + +// dialWithoutProxy dials the host specified by url, using TLS if appropriate. +func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + + if url.Scheme == "http" { + if s.Dialer == nil { + var d net.Dialer + return d.DialContext(ctx, "tcp", dialAddr) + } else { + return s.Dialer.DialContext(ctx, "tcp", dialAddr) + } + } + + // TODO validate the TLSClientConfig is set up? + var conn *tls.Conn + var err error + if s.Dialer == nil { + conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig) + } else { + conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig) + } + if err != nil { + return nil, err + } + + // Return if we were configured to skip validation + if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify { + return conn, nil + } + + host, _, err := net.SplitHostPort(dialAddr) + if err != nil { + return nil, err + } + if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 { + host = s.tlsConfig.ServerName + } + err = conn.VerifyHostname(host) + if err != nil { + return nil, err + } + + return conn, nil +} + +// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header +func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string { + if proxyURL == nil || proxyURL.User == nil { + return "" + } + credentials := proxyURL.User.String() + encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials)) + return fmt.Sprintf("Basic %s", encodedAuth) +} + +// RoundTrip executes the Request and upgrades it. After a successful upgrade, +// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded +// connection. +func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + header := utilnet.CloneHeader(req.Header) + header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + header.Add(httpstream.HeaderUpgrade, HeaderSpdy31) + + var ( + conn net.Conn + rawResponse []byte + err error + ) + + if s.followRedirects { + conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects) + } else { + clone := utilnet.CloneRequest(req) + clone.Header = header + conn, err = s.Dial(clone) + } + if err != nil { + return nil, err + } + + responseReader := bufio.NewReader( + io.MultiReader( + bytes.NewBuffer(rawResponse), + conn, + ), + ) + + resp, err := http.ReadResponse(responseReader, nil) + if err != nil { + if conn != nil { + conn.Close() + } + return nil, err + } + + s.conn = conn + + return resp, nil +} + +// NewConnection validates the upgrade response, creating and returning a new +// httpstream.Connection if there were no errors. +func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade)) + if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + defer resp.Body.Close() + responseError := "" + responseErrorBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + responseError = "unable to read error from server response" + } else { + // TODO: I don't belong here, I should be abstracted from this class + if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil { + if status, ok := obj.(*metav1.Status); ok { + return nil, &apierrors.StatusError{ErrStatus: *status} + } + } + responseError = string(responseErrorBytes) + responseError = strings.TrimSpace(responseError) + } + + return nil, fmt.Errorf("unable to upgrade connection: %s", responseError) + } + + return NewClientConnection(s.conn) +} + +// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection +var statusScheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var statusCodecs = serializer.NewCodecFactory(statusScheme) + +func init() { + statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion, + &metav1.Status{}, + ) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go new file mode 100644 index 000000000..045d214d2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/runtime" +) + +const HeaderSpdy31 = "SPDY/3.1" + +// responseUpgrader knows how to upgrade HTTP responses. It +// implements the httpstream.ResponseUpgrader interface. +type responseUpgrader struct { +} + +// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All +// calls will be handled directly by the underlying net.Conn with the exception +// of Read and Close calls, which will consider data in the bufio.Reader. This +// ensures that data already inside the used bufio.Reader instance is also +// read. +type connWrapper struct { + net.Conn + closed int32 + bufReader *bufio.Reader +} + +func (w *connWrapper) Read(b []byte) (n int, err error) { + if atomic.LoadInt32(&w.closed) == 1 { + return 0, io.EOF + } + return w.bufReader.Read(b) +} + +func (w *connWrapper) Close() error { + err := w.Conn.Close() + atomic.StoreInt32(&w.closed, 1) + return err +} + +// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is +// capable of upgrading HTTP responses using SPDY/3.1 via the +// spdystream package. +func NewResponseUpgrader() httpstream.ResponseUpgrader { + return responseUpgrader{} +} + +// UpgradeResponse upgrades an HTTP response to one that supports multiplexed +// streams. newStreamHandler will be called synchronously whenever the +// other end of the upgraded connection creates a new stream. +func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) + if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header) + http.Error(w, errorMsg, http.StatusBadRequest) + return nil + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response") + http.Error(w, errorMsg, http.StatusInternalServerError) + return nil + } + + w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) + w.WriteHeader(http.StatusSwitchingProtocols) + + conn, bufrw, err := hijacker.Hijack() + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err)) + return nil + } + + connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader} + spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err)) + return nil + } + + return spdyConn +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go new file mode 100644 index 000000000..acfeb827c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + DefaultStreamCreationTimeout = 30 * time.Second + + // The SPDY subprotocol "channel.k8s.io" is used for remote command + // attachment/execution. This represents the initial unversioned subprotocol, + // which has the known bugs http://issues.k8s.io/13394 and + // http://issues.k8s.io/13395. + StreamProtocolV1Name = "channel.k8s.io" + + // The SPDY subprotocol "v2.channel.k8s.io" is used for remote command + // attachment/execution. It is the second version of the subprotocol and + // resolves the issues present in the first version. + StreamProtocolV2Name = "v2.channel.k8s.io" + + // The SPDY subprotocol "v3.channel.k8s.io" is used for remote command + // attachment/execution. It is the third version of the subprotocol and + // adds support for resizing container terminals. + StreamProtocolV3Name = "v3.channel.k8s.io" + + // The SPDY subprotocol "v4.channel.k8s.io" is used for remote command + // attachment/execution. It is the 4th version of the subprotocol and + // adds support for exit codes. + StreamProtocolV4Name = "v4.channel.k8s.io" + + NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode") + ExitCodeCauseType = metav1.CauseType("ExitCode") +) + +var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go new file mode 100644 index 000000000..c70f431c2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go @@ -0,0 +1,27 @@ +package netutil + +import ( + "net/url" + "strings" +) + +// FROM: http://golang.org/src/net/http/client.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// FROM: http://golang.org/src/net/http/transport.go +// canonicalAddr returns url.Host but always with a ":port" suffix +func CanonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/doc.go b/vendor/k8s.io/client-go/tools/remotecommand/doc.go new file mode 100644 index 000000000..ac06a9cd3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package remotecommand adds support for executing commands in containers, +// with support for separate stdin, stdout, and stderr streams, as well as +// TTY. +package remotecommand // import "k8s.io/client-go/tools/remotecommand" diff --git a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go new file mode 100644 index 000000000..360276b65 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "io/ioutil" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// errorStreamDecoder interprets the data on the error channel and creates a go error object from it. +type errorStreamDecoder interface { + decode(message []byte) error +} + +// watchErrorStream watches the errorStream for remote command error data, +// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote +// command exited successfully) to the returned error channel, and closes it. +// This function returns immediately. +func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error { + errorChan := make(chan error) + + go func() { + defer runtime.HandleCrash() + + message, err := ioutil.ReadAll(errorStream) + switch { + case err != nil && err != io.EOF: + errorChan <- fmt.Errorf("error reading from error stream: %s", err) + case len(message) > 0: + errorChan <- d.decode(message) + default: + errorChan <- nil + } + close(errorChan) + }() + + return errorChan +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/vendor/k8s.io/client-go/tools/remotecommand/reader.go new file mode 100644 index 000000000..d1f1be34c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/reader.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "io" +) + +// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented, +// to keep io.Copy from doing things we don't want when copying from the reader to the data stream. +// +// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]), +// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call. +// That results in an oversized call to spdystream.Stream#Write [3], +// which results in a single oversized data frame[4] that is too large. +// +// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo +// [2] https://golang.org/pkg/io/#Copy +// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73 +// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304 +type readerWrapper struct { + reader io.Reader +} + +func (r readerWrapper) Read(p []byte) (int, error) { + return r.reader.Read(p) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go new file mode 100644 index 000000000..892d8d105 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -0,0 +1,142 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + "net/url" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + spdy "k8s.io/client-go/transport/spdy" +) + +// StreamOptions holds information pertaining to the current streaming session: +// input/output streams, if the client is requesting a TTY, and a terminal size queue to +// support terminal resizing. +type StreamOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Tty bool + TerminalSizeQueue TerminalSizeQueue +} + +// Executor is an interface for transporting shell-style streams. +type Executor interface { + // Stream initiates the transport of the standard shell streams. It will transport any + // non-nil stream to a remote system, and return an error if a problem occurs. If tty + // is set, the stderr stream is not used (raw TTY manages stdout and stderr over the + // stdout stream). + Stream(options StreamOptions) error +} + +type streamCreator interface { + CreateStream(headers http.Header) (httpstream.Stream, error) +} + +type streamProtocolHandler interface { + stream(conn streamCreator) error +} + +// streamExecutor handles transporting standard shell streams over an httpstream connection. +type streamExecutor struct { + upgrader spdy.Upgrader + transport http.RoundTripper + + method string + url *url.URL + protocols []string +} + +// NewSPDYExecutor connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams. +func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + return NewSPDYExecutorForProtocols( + transport, upgrader, method, url, + remotecommand.StreamProtocolV4Name, + remotecommand.StreamProtocolV3Name, + remotecommand.StreamProtocolV2Name, + remotecommand.StreamProtocolV1Name, + ) +} + +// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { + return &streamExecutor{ + upgrader: upgrader, + transport: transport, + method: method, + url: url, + protocols: protocols, + }, nil +} + +// Stream opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects. +func (e *streamExecutor) Stream(options StreamOptions) error { + req, err := http.NewRequest(e.method, e.url.String(), nil) + if err != nil { + return fmt.Errorf("error creating request: %v", err) + } + + conn, protocol, err := spdy.Negotiate( + e.upgrader, + &http.Client{Transport: e.transport}, + req, + e.protocols..., + ) + if err != nil { + return err + } + defer conn.Close() + + var streamer streamProtocolHandler + + switch protocol { + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + return streamer.stream(conn) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/resize.go b/vendor/k8s.io/client-go/tools/remotecommand/resize.go new file mode 100644 index 000000000..c838f21ba --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/resize.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term +// and were moved in order to decouple client from other term dependencies + +// TerminalSize represents the width and height of a terminal. +type TerminalSize struct { + Width uint16 + Height uint16 +} + +// TerminalSizeQueue is capable of returning terminal resize events as they occur. +type TerminalSizeQueue interface { + // Next returns the new terminal size after the terminal has been resized. It returns nil when + // monitoring has been stopped. + Next() *TerminalSize +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go new file mode 100644 index 000000000..4120f1f5f --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v1.go @@ -0,0 +1,160 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog" +) + +// streamProtocolV1 implements the first version of the streaming exec & attach +// protocol. This version has some bugs, such as not being able to detect when +// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and +// http://issues.k8s.io/13395 for more details. +type streamProtocolV1 struct { + StreamOptions + + errorStream httpstream.Stream + remoteStdin httpstream.Stream + remoteStdout httpstream.Stream + remoteStderr httpstream.Stream +} + +var _ streamProtocolHandler = &streamProtocolV1{} + +func newStreamProtocolV1(options StreamOptions) streamProtocolHandler { + return &streamProtocolV1{ + StreamOptions: options, + } +} + +func (p *streamProtocolV1) stream(conn streamCreator) error { + doneChan := make(chan struct{}, 2) + errorChan := make(chan error) + + cp := func(s string, dst io.Writer, src io.Reader) { + klog.V(6).Infof("Copying %s", s) + defer klog.V(6).Infof("Done copying %s", s) + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + klog.Errorf("Error copying %s: %v", s, err) + } + if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { + doneChan <- struct{}{} + } + } + + // set up all the streams first + var err error + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.errorStream.Reset() + + // Create all the streams first, then start the copy goroutines. The server doesn't start its copy + // goroutines until it's received all of the streams. If the client creates the stdin stream and + // immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the + // spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't + // getting processed because the server hasn't started its copying, and it won't do that until it + // gets all the streams. By creating all the streams first, we ensure that the server is ready to + // process data before the client starts sending any. See https://issues.k8s.io/16373 for more info. + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdin.Reset() + } + + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdout.Reset() + } + + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStderr.Reset() + } + + // now that all the streams have been created, proceed with reading & copying + + // always read from errorStream + go func() { + message, err := ioutil.ReadAll(p.errorStream) + if err != nil && err != io.EOF { + errorChan <- fmt.Errorf("Error reading from error stream: %s", err) + return + } + if len(message) > 0 { + errorChan <- fmt.Errorf("Error executing remote command: %s", message) + return + } + }() + + if p.Stdin != nil { + // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) + // because stdin is not closed until the process exits. If we try to call + // stdin.Close(), it returns no error but doesn't unblock the copy. It will + // exit when the process exits, instead. + go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin}) + } + + waitCount := 0 + completedStreams := 0 + + if p.Stdout != nil { + waitCount++ + go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout) + } + + if p.Stderr != nil && !p.Tty { + waitCount++ + go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr) + } + +Loop: + for { + select { + case <-doneChan: + completedStreams++ + if completedStreams == waitCount { + break Loop + } + case err := <-errorChan: + return err + } + } + + return nil +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go new file mode 100644 index 000000000..4b0001502 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v2.go @@ -0,0 +1,195 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/runtime" +) + +// streamProtocolV2 implements version 2 of the streaming protocol for attach +// and exec. The original streaming protocol was metav1. As a result, this +// version is referred to as version 2, even though it is the first actual +// numbered version. +type streamProtocolV2 struct { + StreamOptions + + errorStream io.Reader + remoteStdin io.ReadWriteCloser + remoteStdout io.Reader + remoteStderr io.Reader +} + +var _ streamProtocolHandler = &streamProtocolV2{} + +func newStreamProtocolV2(options StreamOptions) streamProtocolHandler { + return &streamProtocolV2{ + StreamOptions: options, + } +} + +func (p *streamProtocolV2) createStreams(conn streamCreator) error { + var err error + headers := http.Header{} + + // set up error stream + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + + // set up stdin stream + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stdout stream + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stderr stream + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + return nil +} + +func (p *streamProtocolV2) copyStdin() { + if p.Stdin != nil { + var once sync.Once + + // copy from client's stdin to container's stdin + go func() { + defer runtime.HandleCrash() + + // if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i -- cat`, make sure + // we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise + // the executed command will remain running. + defer once.Do(func() { p.remoteStdin.Close() }) + + if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil { + runtime.HandleError(err) + } + }() + + // read from remoteStdin until the stream is closed. this is essential to + // be able to exit interactive sessions cleanly and not leak goroutines or + // hang the client's terminal. + // + // TODO we aren't using go-dockerclient any more; revisit this to determine if it's still + // required by engine-api. + // + // go-dockerclient's current hijack implementation + // (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564) + // waits for all three streams (stdin/stdout/stderr) to finish copying + // before returning. When hijack finishes copying stdout/stderr, it calls + // Close() on its side of remoteStdin, which allows this copy to complete. + // When that happens, we must Close() on our side of remoteStdin, to + // allow the copy in hijack to complete, and hijack to return. + go func() { + defer runtime.HandleCrash() + defer once.Do(func() { p.remoteStdin.Close() }) + + // this "copy" doesn't actually read anything - it's just here to wait for + // the server to close remoteStdin. + if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil { + runtime.HandleError(err) + } + }() + } +} + +func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) { + if p.Stdout == nil { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + + if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) { + if p.Stderr == nil || p.Tty { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + + if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{}) + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV2 interprets the error channel data as plain text. +type errorDecoderV2 struct{} + +func (d *errorDecoderV2) decode(message []byte) error { + return fmt.Errorf("error executing remote command: %s", message) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v3.go b/vendor/k8s.io/client-go/tools/remotecommand/v3.go new file mode 100644 index 000000000..846dd24a5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v3.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "io" + "net/http" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/runtime" +) + +// streamProtocolV3 implements version 3 of the streaming protocol for attach +// and exec. This version adds support for resizing the container's terminal. +type streamProtocolV3 struct { + *streamProtocolV2 + + resizeStream io.Writer +} + +var _ streamProtocolHandler = &streamProtocolV3{} + +func newStreamProtocolV3(options StreamOptions) streamProtocolHandler { + return &streamProtocolV3{ + streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2), + } +} + +func (p *streamProtocolV3) createStreams(conn streamCreator) error { + // set up the streams from v2 + if err := p.streamProtocolV2.createStreams(conn); err != nil { + return err + } + + // set up resize stream + if p.Tty { + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeResize) + var err error + p.resizeStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + return nil +} + +func (p *streamProtocolV3) handleResizes() { + if p.resizeStream == nil || p.TerminalSizeQueue == nil { + return + } + go func() { + defer runtime.HandleCrash() + + encoder := json.NewEncoder(p.resizeStream) + for { + size := p.TerminalSizeQueue.Next() + if size == nil { + return + } + if err := encoder.Encode(&size); err != nil { + runtime.HandleError(err) + } + } + }() +} + +func (p *streamProtocolV3) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +type errorDecoderV3 struct { + errorDecoderV2 +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/vendor/k8s.io/client-go/tools/remotecommand/v4.go new file mode 100644 index 000000000..69ca934a0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v4.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/util/exec" +) + +// streamProtocolV4 implements version 4 of the streaming protocol for attach +// and exec. This version adds support for exit codes on the error stream through +// the use of metav1.Status instead of plain text messages. +type streamProtocolV4 struct { + *streamProtocolV3 +} + +var _ streamProtocolHandler = &streamProtocolV4{} + +func newStreamProtocolV4(options StreamOptions) streamProtocolHandler { + return &streamProtocolV4{ + streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3), + } +} + +func (p *streamProtocolV4) createStreams(conn streamCreator) error { + return p.streamProtocolV3.createStreams(conn) +} + +func (p *streamProtocolV4) handleResizes() { + p.streamProtocolV3.handleResizes() +} + +func (p *streamProtocolV4) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel +// and creates an exec.ExitError from it. +type errorDecoderV4 struct{} + +func (d *errorDecoderV4) decode(message []byte) error { + status := metav1.Status{} + err := json.Unmarshal(message, &status) + if err != nil { + return fmt.Errorf("error stream protocol error: %v in %q", err, string(message)) + } + switch status.Status { + case metav1.StatusSuccess: + return nil + case metav1.StatusFailure: + if status.Reason == remotecommand.NonZeroExitCodeReason { + if status.Details == nil { + return errors.New("error stream protocol error: details must be set") + } + for i := range status.Details.Causes { + c := &status.Details.Causes[i] + if c.Type != remotecommand.ExitCodeCauseType { + continue + } + + rc, err := strconv.ParseUint(c.Message, 10, 8) + if err != nil { + return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message) + } + return exec.CodeExitError{ + Err: fmt.Errorf("command terminated with exit code %d", rc), + Code: int(rc), + } + } + + return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType) + } + default: + return errors.New("error stream protocol error: unknown error") + } + + return fmt.Errorf(status.Message) +} diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go new file mode 100644 index 000000000..53cc7ee18 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "fmt" + "net/http" + "net/url" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + restclient "k8s.io/client-go/rest" +) + +// Upgrader validates a response from the server after a SPDY upgrade. +type Upgrader interface { + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (httpstream.Connection, error) +} + +// RoundTripperFor returns a round tripper and upgrader to use with SPDY. +func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) { + tlsConfig, err := restclient.TLSConfigFor(config) + if err != nil { + return nil, nil, err + } + upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false) + wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) + if err != nil { + return nil, nil, err + } + return wrapper, upgradeRoundTripper, nil +} + +// dialer implements the httpstream.Dialer interface. +type dialer struct { + client *http.Client + upgrader Upgrader + method string + url *url.URL +} + +var _ httpstream.Dialer = &dialer{} + +// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY. +func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer { + return &dialer{ + client: client, + upgrader: upgrader, + method: method, + url: url, + } +} + +func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) { + req, err := http.NewRequest(d.method, d.url.String(), nil) + if err != nil { + return nil, "", fmt.Errorf("error creating request: %v", err) + } + return Negotiate(d.upgrader, d.client, req, protocols...) +} + +// Negotiate opens a connection to a remote server and attempts to negotiate +// a SPDY connection. Upon success, it returns the connection and the protocol selected by +// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor. +func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) { + for i := range protocols { + req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i]) + } + resp, err := client.Do(req) + if err != nil { + return nil, "", fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + conn, err := upgrader.NewConnection(resp) + if err != nil { + return nil, "", err + } + return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil +} diff --git a/vendor/k8s.io/client-go/util/exec/exec.go b/vendor/k8s.io/client-go/util/exec/exec.go new file mode 100644 index 000000000..d170badb6 --- /dev/null +++ b/vendor/k8s.io/client-go/util/exec/exec.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +func (e CodeExitError) Exited() bool { + return true +} + +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/kubernetes/third_party/forked/godep/license.go b/vendor/k8s.io/kubernetes/third_party/forked/godep/license.go deleted file mode 100644 index 5ce82496f..000000000 --- a/vendor/k8s.io/kubernetes/third_party/forked/godep/license.go +++ /dev/null @@ -1,59 +0,0 @@ -package main - -import ( - "strings" -) - -// LicenseFilePrefix is a list of filename prefixes that indicate it -// might contain a software license -var LicenseFilePrefix = []string{ - "licence", // UK spelling - "license", // US spelling - "copying", - "unlicense", - "copyright", - "copyleft", - "authors", - "contributors", -} - -// LegalFileSubstring are substrings that indicate the file is likely -// to contain some type of legal declaration. "legal" is often used -// that it might moved to LicenseFilePrefix -var LegalFileSubstring = []string{ - "legal", - "notice", - "disclaimer", - "patent", - "third-party", - "thirdparty", -} - -// IsLicenseFile returns true if the filename might be contain a -// software license -func IsLicenseFile(filename string) bool { - lowerfile := strings.ToLower(filename) - for _, prefix := range LicenseFilePrefix { - if strings.HasPrefix(lowerfile, prefix) { - return true - } - } - return false -} - -// IsLegalFile returns true if the file is likely to contain some type -// of of legal declaration or licensing information -func IsLegalFile(filename string) bool { - lowerfile := strings.ToLower(filename) - for _, prefix := range LicenseFilePrefix { - if strings.HasPrefix(lowerfile, prefix) { - return true - } - } - for _, substring := range LegalFileSubstring { - if strings.Contains(lowerfile, substring) { - return true - } - } - return false -} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go new file mode 100644 index 000000000..d918eeaa4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go @@ -0,0 +1,178 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllerutil + +import ( + "context" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// AlreadyOwnedError is an error returned if the object you are trying to assign +// a controller reference is already owned by another controller Object is the +// subject and Owner is the reference for the current owner +type AlreadyOwnedError struct { + Object v1.Object + Owner v1.OwnerReference +} + +func (e *AlreadyOwnedError) Error() string { + return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) +} + +func newAlreadyOwnedError(Object v1.Object, Owner v1.OwnerReference) *AlreadyOwnedError { + return &AlreadyOwnedError{ + Object: Object, + Owner: Owner, + } +} + +// SetControllerReference sets owner as a Controller OwnerReference on owned. +// This is used for garbage collection of the owned object and for +// reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). +// Since only one OwnerReference can be a controller, it returns an error if +// there is another OwnerReference with Controller flag set. +func SetControllerReference(owner, object v1.Object, scheme *runtime.Scheme) error { + ro, ok := owner.(runtime.Object) + if !ok { + return fmt.Errorf("is not a %T a runtime.Object, cannot call SetControllerReference", owner) + } + + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return err + } + + // Create a new ref + ref := *v1.NewControllerRef(owner, schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind}) + + existingRefs := object.GetOwnerReferences() + fi := -1 + for i, r := range existingRefs { + if referSameObject(ref, r) { + fi = i + } else if r.Controller != nil && *r.Controller { + return newAlreadyOwnedError(object, r) + } + } + if fi == -1 { + existingRefs = append(existingRefs, ref) + } else { + existingRefs[fi] = ref + } + + // Update owner references + object.SetOwnerReferences(existingRefs) + return nil +} + +// Returns true if a and b point to the same object +func referSameObject(a, b v1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV == bGV && a.Kind == b.Kind && a.Name == b.Name +} + +// OperationResult is the action result of a CreateOrUpdate call +type OperationResult string + +const ( // They should complete the sentence "Deployment default/foo has been ..." + // OperationResultNone means that the resource has not been changed + OperationResultNone OperationResult = "unchanged" + // OperationResultCreated means that a new resource is created + OperationResultCreated OperationResult = "created" + // OperationResultUpdated means that an existing resource is updated + OperationResultUpdated OperationResult = "updated" +) + +// CreateOrUpdate creates or updates the given object obj in the Kubernetes +// cluster. The object's desired state should be reconciled with the existing +// state using the passed in ReconcileFn. obj must be a struct pointer so that +// obj can be updated with the content returned by the Server. +// +// It returns the executed operation and an error. +func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f MutateFn) (OperationResult, error) { + // op is the operation we are going to attempt + op := OperationResultNone + + // get the existing object meta + metaObj, ok := obj.(v1.Object) + if !ok { + return OperationResultNone, fmt.Errorf("%T does not implement metav1.Object interface", obj) + } + + // retrieve the existing object + key := client.ObjectKey{ + Name: metaObj.GetName(), + Namespace: metaObj.GetNamespace(), + } + err := c.Get(ctx, key, obj) + + // reconcile the existing object + existing := obj.DeepCopyObject() + existingObjMeta := existing.(v1.Object) + existingObjMeta.SetName(metaObj.GetName()) + existingObjMeta.SetNamespace(metaObj.GetNamespace()) + + if e := f(obj); e != nil { + return OperationResultNone, e + } + + if metaObj.GetName() != existingObjMeta.GetName() { + return OperationResultNone, fmt.Errorf("ReconcileFn cannot mutate objects name") + } + + if metaObj.GetNamespace() != existingObjMeta.GetNamespace() { + return OperationResultNone, fmt.Errorf("ReconcileFn cannot mutate objects namespace") + } + + if errors.IsNotFound(err) { + err = c.Create(ctx, obj) + op = OperationResultCreated + } else if err == nil { + if reflect.DeepEqual(existing, obj) { + return OperationResultNone, nil + } + err = c.Update(ctx, obj) + op = OperationResultUpdated + } else { + return OperationResultNone, err + } + + if err != nil { + op = OperationResultNone + } + return op, err +} + +// MutateFn is a function which mutates the existing object into it's desired state. +type MutateFn func(existing runtime.Object) error diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go new file mode 100644 index 000000000..ab386b29c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package controllerutil contains utility functions for working with and implementing Controllers. +*/ +package controllerutil