Compare commits

...

74 Commits

Author SHA1 Message Date
KubeSphere CI Bot
5c1f73134a Merge pull request #4473 from ks-ci-bot/cherry-pick-4471-to-release-3.2
[release-3.2] fix groupbinding controller unittest
2021-11-29 16:03:52 +08:00
hongming
20cb04aedf fix groupbinding controller unittest 2021-11-25 06:41:54 +00:00
KubeSphere CI Bot
e029adfb84 Merge pull request #4458 from ks-ci-bot/cherry-pick-4457-to-release-3.2
[release-3.2] Namespace should not be filterd for Cluster Gateway
2021-11-18 20:13:13 +08:00
Roland.Ma
7a8712bda1 Namespace should not be filterd for Cluster Gateway
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-11-18 11:54:42 +00:00
KubeSphere CI Bot
f1a99bd1d8 Merge pull request #4439 from ks-ci-bot/cherry-pick-4436-to-release-3.2
[release-3.2] fix: users can't login with ldap provider
2021-11-15 14:21:11 +08:00
KubeSphere CI Bot
a8b93b9abf Merge pull request #4438 from ks-ci-bot/cherry-pick-4434-to-release-3.2
[release-3.2] Support query pods by status
2021-11-12 10:45:34 +08:00
Roland.Ma
0efb3c671f fixs users can't login with ldap provider
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-11-12 01:59:41 +00:00
Xinzhao Xu
a8b79e85b6 Support query pods by status 2021-11-11 09:17:44 +00:00
KubeSphere CI Bot
579d45465a Merge pull request #4422 from ks-ci-bot/cherry-pick-4418-to-release-3.2
[release-3.2] Allows to override nginx ingress controller image in kubesphere config
2021-11-08 16:14:30 +08:00
Roland.Ma
fab6336e91 allow to override docker image in kubesphere config
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-11-05 06:55:05 +00:00
KubeSphere CI Bot
18527f895e Merge pull request #4385 from liuan1986/master
if error is not NULL,Maybe return error would be better.
2021-10-22 12:30:11 +08:00
KubeSphere CI Bot
8ab489c51b Merge pull request #4377 from 123liubao/hotfix/fix-4367
Refactor the Chinese version of README
2021-10-22 12:20:11 +08:00
KubeSphere CI Bot
5a855a8306 Merge pull request #4387 from RolandMa1986/fix-e2e
fix e2e auth error
2021-10-22 11:19:11 +08:00
123liubao
d865305983 Modify the document 2021-10-22 10:10:18 +08:00
Roland.Ma
64a4b65d66 fix e2e auth error
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-10-22 01:27:17 +00:00
KubeSphere CI Bot
d341e55b8b Merge pull request #4382 from wansir/master
Fix failed to delete workspace in multi-cluster enviroment
2021-10-21 17:40:02 +08:00
liuan
8657cd31b2 if error is not NULL,return error 2021-10-21 17:23:42 +08:00
hongming
20163dd795 Fix no kind FederatedGroupBinding is registered 2021-10-21 11:49:47 +08:00
123liubao
3a50e1c244 Optimized code 2021-10-20 15:58:29 +08:00
123liubao
738a451433 URL modified to Chinese 2021-10-20 15:29:15 +08:00
KubeSphere CI Bot
744233b3a3 Merge pull request #4374 from iawia002/bump-controller-runtime-version-0.9
Bump controller-runtime version to the latest of release-0.9
2021-10-20 14:55:03 +08:00
iawia002
dae7d92dab Bump controller-runtime version to the latest of release-0.9 2021-10-20 10:11:21 +08:00
iawia002
2eeb7262c6 update vendor 2021-10-20 10:09:48 +08:00
123liubao
840a3d25f8 fix:Refactor the Chinese version of README 2021-10-19 23:17:31 +08:00
KubeSphere CI Bot
9720aa9806 Merge pull request #4365 from wansir/fix-4364
Fix namespace cannot be deleted after workspace has been deleted
2021-10-18 09:16:02 +08:00
hongming
0e3159e1e8 Fix namespace cannot be deleted after workspace has been deleted 2021-10-15 15:12:28 +08:00
zryfish
edcd8e1449 fix gitMajor and gitMinor missing in version api (#4361)
* fix version missing
* suppress shellcheck warnings
2021-10-13 17:40:54 +08:00
KubeSphere CI Bot
98b9009a58 Merge pull request #4351 from wansir/fix-regression-bug
Fix NPE in user_controller
2021-10-13 15:04:06 +08:00
KubeSphere CI Bot
e82236366d Merge pull request #4356 from RolandMa1986/feat-gateway-log-export
Supports to export gateway logs
2021-10-13 15:04:00 +08:00
hongming
776593001e Fix NPE in user_controller 2021-10-13 11:02:43 +08:00
Roland.Ma
6dc99e181e supports to export gateway logs
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-10-12 08:15:58 +00:00
KubeSphere CI Bot
735f0c7731 Merge pull request #4337 from RolandMa1986/fix-4333
Fix: support fuzzy query with ObjectMeta for gateway query api
2021-10-11 13:27:59 +08:00
KubeSphere CI Bot
2af76aff79 Merge pull request #4325 from wansir/fix-regression-bug
Fix failed to delete rolebindings
2021-10-11 09:43:59 +08:00
KubeSphere CI Bot
7a0dafd59d Merge pull request #4331 from wansir/fix-remote-ip
Fix incorrect source IP
2021-10-10 22:09:59 +08:00
KubeSphere CI Bot
813c341574 Merge pull request #4342 from RolandMa1986/fix-metric
Use regex query instead of equal in promethues query
2021-10-09 19:40:56 +08:00
Roland.Ma
59e03a0c19 append status to all gateways
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-10-09 10:01:49 +00:00
Roland.Ma
2d066f86d4 use regex query instead of equal
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-10-09 08:35:09 +00:00
KubeSphere CI Bot
b94c7966d1 Merge pull request #4336 from LinuxSuRen/fix-buildx-err
Fix the docker build error due to missing buildx setting
2021-10-09 09:39:55 +08:00
Roland.Ma
6b3af2d19d fuzzy query with defautl ObjectMeta
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-10-09 01:13:44 +00:00
rick
d8fc168948 Fix the docker build error due to missing buildx setting
Signed-off-by: rick <1450685+LinuxSuRen@users.noreply.github.com>
2021-10-08 23:06:59 +08:00
hongming
1641b9920b Fix incorrect source IP 2021-10-08 16:19:27 +08:00
hongming
1956f83af0 Fix failed to delete rolebindings 2021-10-08 11:17:02 +08:00
KubeSphere CI Bot
22a8bebcae Merge pull request #4321 from mazak-ui/master
Updating readme CNCF deprecated links
2021-10-07 16:57:54 +08:00
mazak-ui
d0ae0f210f Update CNCF Landscape URL
Updating the deprecated link, as it is currently redirecting to the new one and an orange banner appears.
2021-10-03 09:11:37 -05:00
mazak-ui
8ef0605858 Update CNCF Landscape URL
Updating the deprecated link, as it is currently redirecting to the new one and an orange banner appears.
2021-10-03 09:10:36 -05:00
zryfish
6f434252df fix missing constants (#4308) 2021-09-29 14:25:01 +08:00
zryfish
185ab75053 update vendor directory (#4305) 2021-09-29 13:50:34 +08:00
zryfish
84f66199ed add kind-e2e to Makefile (#4304) 2021-09-29 12:11:29 +08:00
KubeSphere CI Bot
22ae83a431 Merge pull request #4250 from xyz-li/app_store
Dynamically load helm app into app-store
2021-09-29 10:14:54 +08:00
LiHui
09fc2867c4 remove mathutil.Max 2021-09-29 09:35:18 +08:00
KubeSphere CI Bot
60af01c10d Merge pull request #4302 from pixiake/master
Add scheduling parameters for ks-core chart
2021-09-28 19:45:54 +08:00
pixiake
1bd8ede93d Add scheduling parameters for ks-core chart
Signed-off-by: pixiake <guofeng@yunify.com>
2021-09-28 18:18:43 +08:00
KubeSphere CI Bot
1cf3493e8f Merge pull request #4290 from f10atin9/capability
update capability_controller.go, make sure that annotations is genera…
2021-09-28 18:13:55 +08:00
zryfish
e83b77dc3a fix workflow syntax error (#4301) 2021-09-28 18:12:11 +08:00
zryfish
ef398dd56f simply github workflow since we migrate most of CI steps to prow (#4299) 2021-09-28 17:54:07 +08:00
f10atin9
60cd523a0f [fix] fix update logic
Now controller will judge whether the storageClassClient need to send the update request.

Signed-off-by: f10atin9 <f10atin9@kubesphere.io>
2021-09-28 16:20:10 +08:00
KubeSphere CI Bot
09ef175e90 Merge pull request #4286 from RolandMa1986/feat-gateway-logging
Search gateway logs with ES
2021-09-28 15:25:55 +08:00
f10atin9
be5421f00b update capability_controller.go, make sure that annotations is generated correctly.StorageClass without csiDriver will no longer generate false annotations.
Signed-off-by: f10atin9 <f10atin9@kubesphere.io>
2021-09-27 16:42:19 +08:00
LiHui
ad69b08a75 add display fields
Signed-off-by: LiHui <andrewli@kubesphere.io>
2021-09-27 15:44:00 +08:00
LiHui
4eb5401f76 calculate the category for the app
Signed-off-by: LiHui <andrewli@kubesphere.io>
2021-09-27 15:35:29 +08:00
LiHui
745ca088a7 add built-in repo to dynamiclly load app into app-store
Signed-off-by: LiHui <andrewli@kubesphere.io>
2021-09-27 15:35:29 +08:00
LiHui
b8d85fb75c add sync period to helm repo
Signed-off-by: LiHui <andrewli@kubesphere.io>
2021-09-27 15:35:29 +08:00
KubeSphere CI Bot
bab5cf27e3 Merge pull request #4284 from RolandMa1986/fix-ingress
update ingress version in ks-core chart
2021-09-27 15:19:53 +08:00
Roland.Ma
fb2e557baa remove docker mount
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-09-27 03:23:42 +00:00
Roland.Ma
28f6784aff Search gateway logs with ES
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-09-26 10:33:34 +00:00
Roland.Ma
5ad7d16788 update ingress version in ks-core chart
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-09-26 07:36:52 +00:00
KubeSphere CI Bot
15205cbc40 Merge pull request #4271 from LinuxSuRen/fix-am-devops-client
Fix the devopsProjectLister is nil
2021-09-24 11:51:54 +08:00
KubeSphere CI Bot
9734c99508 Merge pull request #4273 from RolandMa1986/fix-4265
fix: duplicated items show in the list api
2021-09-24 11:49:54 +08:00
Roland.Ma
49204f64be fix: duplicated items show in the list api
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-09-24 02:08:13 +00:00
KubeSphere CI Bot
cbfdb545ab Merge pull request #4254 from wansir/fix-regression-bug
fix regression bug failed to create federateduser
2021-09-24 08:59:54 +08:00
rick
182c4acbbb Fix the devopsProjectLister is nil
Signed-off-by: rick <1450685+LinuxSuRen@users.noreply.github.com>
2021-09-23 19:32:20 +08:00
KubeSphere CI Bot
a56368e125 Merge pull request #4264 from RolandMa1986/feat-gateway-affinity
feat: add default gateway pod affinity
2021-09-23 13:59:54 +08:00
Roland.Ma
949b1c790b add default pod antiaffinity
Signed-off-by: Roland.Ma <rolandma@kubesphere.io>
2021-09-22 09:28:11 +00:00
hongming
b087afed65 fix regression bug failed to create federateduser
Signed-off-by: hongming <hongming@kubesphere.io>
2021-09-18 16:02:13 +08:00
81 changed files with 1524 additions and 731 deletions

View File

@@ -1,16 +1,16 @@
name: BuildMultiArch
name: BuildContainerImage
on:
push:
branches:
- 'master'
- 'release*'
- 'release-*'
tags:
- 'v*'
pull_request:
branches:
- 'master'
- 'release*'
- 'release-*'
jobs:
build:
@@ -24,13 +24,13 @@ jobs:
with:
fetch-depth: 0
- name: "Set up QEMU"
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: "Set up Docker buildx"
uses: "docker/setup-buildx-action@v1"
- name: Set up Docker buildx
uses: docker/setup-buildx-action@v1
- name: Build and push docker images
env:
@@ -39,4 +39,4 @@ jobs:
if: github.event_name == 'push'
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
REPO=kubespheredev TAG="${GITHUB_REF#refs/*/}-multiarch" make container-cross-push
REPO=kubespheredev TAG="${GITHUB_REF#refs/*/}" make container-cross-push

View File

@@ -1,64 +0,0 @@
name: Go
on:
push:
branches:
- 'master'
- 'release*'
tags:
- 'v*'
pull_request:
branches:
- 'master'
- 'release*'
jobs:
build:
name: Build
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
- name: Set up Go 1.16
uses: actions/setup-go@v2
with:
go-version: 1.16
id: go
- name: Check out code into the Go module directory
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Check pr is properly formatted
run: diff -u <(echo -n) <(gofmt -d ./pkg ./cmd ./tools ./test)
- name: Verify goimports
run: go get -u golang.org/x/tools/cmd/goimports && bash hack/verify-goimports.sh
- name: Downloading go dependencies
run: go mod vendor
- name: Build
run: make all
- name: Make OpenAPI Spec
run: make openapi
- name: Uploading code coverage
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: ./coverage.txt
flags: unittests
name: codecov-umbrella
fail_ci_if_error: false
- name: Build and push docker images
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
if: github.event_name == 'push'
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
REPO=kubespheredev TAG=${GITHUB_REF#refs/*/} make container-push

View File

@@ -22,9 +22,6 @@ jobs:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Downloading go dependencies
run: go mod vendor
- name: Create kind cluster
uses: helm/kind-action@v1.2.0
with:
@@ -43,4 +40,5 @@ jobs:
with:
status: ${{ job.status }}
fields: repo,message,commit,author,action,eventName,ref,workflow,job,took
if: failure()
if: failure()

View File

@@ -26,14 +26,13 @@ jobs:
with:
fetch-depth: 0
- name: Downloading go dependencies
run: go mod vendor
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Build
run: make all
- name: Make OpenAPI Spec
run: make openapi
- name: Set up Docker buildx
uses: docker/setup-buildx-action@v1
- name: Build and push docker images
env:
@@ -41,13 +40,8 @@ jobs:
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
run: |
echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin
if [[ $? == 0 ]]; then
tag=nightly-$(date '+%Y%m%d')
REPO=kubespheredev TAG=${tag} make container-push
else
exit -1
fi
tag=nightly-$(date '+%Y%m%d')
REPO=kubespheredev TAG=${tag} make container-cross-push
- name: slack
uses: 8398a7/action-slack@v3

View File

@@ -66,6 +66,9 @@ verify-all: ; $(info $(M)...Begin to run all verify scripts hack/verify-*.sh.)
e2e: ;$(info $(M)...Begin to build e2e binary.) @ ## Build e2e binary.
hack/build_e2e.sh test/e2e
kind-e2e: ;$(info $(M)...Run e2e test.) @ ## Run e2e test in kind.
hack/kind_e2e.sh
# Run go fmt against code
fmt: ;$(info $(M)...Begin to run go fmt against code.) @ ## Run go fmt against code.
gofmt -w ./pkg ./cmd ./tools ./api

View File

@@ -171,6 +171,6 @@ The [user case studies](https://kubesphere.io/case/) page includes the user list
<img src="https://landscape.cncf.io/images/left-logo.svg" width="150"/>&nbsp;&nbsp;<img src="https://landscape.cncf.io/images/right-logo.svg" width="200"/>&nbsp;&nbsp;
<br/><br/>
KubeSphere is a member of CNCF and a <a href="https://www.cncf.io/certification/software-conformance/#logos">Kubernetes Conformance Certified platform
</a>, which enriches the <a href="https://landscape.cncf.io/landscape=observability-and-analysis&license=apache-license-2-0">CNCF CLOUD NATIVE Landscape.
</a>, which enriches the <a href="https://landscape.cncf.io/?landscape=observability-and-analysis&license=apache-license-2-0">CNCF CLOUD NATIVE Landscape.
</a>
</p>

View File

@@ -1,10 +1,20 @@
# KubeSphere 容器平台
<p align="center">
<img src="docs/images/kubesphere-logo.png" alt="banner" width="200px">
</p>
[![License](http://img.shields.io/badge/license-apache%20v2-blue.svg)](https://github.com/KubeSphere/KubeSphere/blob/master/LICENSE)
[![Build Status](https://travis-ci.org/kubesphere/kubesphere.svg?branch=master)](https://travis-ci.org/kubesphere/kubesphere)
[![KubeSphere release](https://img.shields.io/github/release/kubesphere/kubesphere.svg?color=release&label=release&logo=release&logoColor=release)](https://github.com/kubesphere/kubesphere/releases/tag/v3.1.0)
<p align="center">
<b>为<i> Kubernetes 多云、数据中心和边缘 </i>管理而定制的容器平台</b>
</p>
<p align=center>
<a href="https://goreportcard.com/report/github.com/kubesphere/kubesphere"><img src="https://goreportcard.com/badge/github.com/kubesphere/kubesphere" alt="A+"></a>
<a href="https://hub.docker.com/r/kubesphere/ks-installer"><img src="https://img.shields.io/docker/pulls/kubesphere/ks-installer"></a>
<a href="https://github.com/search?q=user%3Akubesphere+user%3Akubesphere-sigs+label%3A%22good+first+issue%22+state%3Aopen&type=Issues&ref=advsearch&l=&l="><img src="https://img.shields.io/github/issues/badges/shields/good%20first%20issue" alt="good first"></a>
<a href="https://twitter.com/intent/follow?screen_name=KubeSphere"><img src="https://img.shields.io/twitter/follow/KubeSphere?style=social" alt="follow on Twitter"></a>
<a href="https://join.slack.com/t/kubesphere/shared_invite/enQtNTE3MDIxNzUxNzQ0LTZkNTdkYWNiYTVkMTM5ZThhODY1MjAyZmVlYWEwZmQ3ODQ1NmM1MGVkNWEzZTRhNzk0MzM5MmY4NDc3ZWVhMjE"><img src="https://img.shields.io/badge/Slack-600%2B-blueviolet?logo=slack&amp;logoColor=white"></a>
<a href="https://www.youtube.com/channel/UCyTdUQUYjf7XLjxECx63Hpw"><img src="https://img.shields.io/youtube/channel/subscribers/UCyTdUQUYjf7XLjxECx63Hpw?style=social"></a>
</p>
![logo](docs/images/kubesphere-logo.png)
----
@@ -12,9 +22,9 @@
> [English](README.md) | 中文
[KubeSphere](https://kubesphere.com.cn) 是在 [Kubernetes](https://kubernetes.io) 之上构建的面向云原生应用的 **容器混合云**,支持多云与多集群管理,提供全栈的 IT 自动化运维的能力,简化企业的 DevOps 工作流。KubeSphere 提供了运维友好的向导式操作界面,帮助企业快速构建一个强大和功能丰富的容器云平台。KubeSphere 愿景是打造一个基于 Kubernetes 的云原生分布式操作系统它的架构可以很方便地与云原生生态进行即插即用plug-and-play的集成
[KubeSphere](https://kubesphere.io/zh/) 愿景是打造一个以 [Kubernetes](https://kubernetes.io/zh/) 为内核的 **云原生分布式操作系统**它的架构可以非常方便地使第三方应用与云原生生态组件进行即插即用plug-and-play的集成支持云原生应用在多云与多集群的统一分发和运维管理。 KubeSphere 也是一个多租户容器平台,提供全栈的 IT 自动化运维的能力,简化企业的 DevOps 工作流。KubeSphere 提供了运维友好的向导式操作界面,帮助企业快速构建一个强大和功能丰富的容器云平台,详情请参阅 [平台功能](#平台功能)
KubeSphere 目前最新的版本为 3.1.0,所有版本 100% 开源,关于 KubeSphere 更详细的介绍与说明请参阅 [什么是 KubeSphere](https://kubesphere.com.cn/docs/introduction/what-is-kubesphere/)。
下面的屏幕截图让我们进一步了解 KubeSphere,关于 KubeSphere 更详细的介绍与说明请参阅 [什么是 KubeSphere](https://kubesphere.io/zh/docs/introduction/what-is-kubesphere/)
<table>
<tr>
@@ -35,89 +45,134 @@ KubeSphere 目前最新的版本为 3.1.0,所有版本 100% 开源,关于 Ku
</tr>
</table>
## 快速体验
## Demo 环境
使用体验账号 `demo1 / Demo123` 登录 [Demo 环境](https://demo.kubesphere.io/),该账号仅授予了 view 权限,建议自行安装体验完整的管理功能。您还可以访问 Youtube 查看 [KubeSphere Demo 视频](https://youtu.be/u5lQvhi_Xlc)
🎮 使用账号 `demo1 / Demo123` 登录 [Demo 环境](https://demo.kubesphere.io/) 。请注意,该帐户仅授予了 view 权限。
## 架构
🖥 您还可以快速查看[Demo 视频](https://youtu.be/YxZ1YUv0CYs) 。
KubeSphere 采用了前后端分离的架构设计,后端的各个功能组件可通过 REST API 对接外部系统,详见 [架构说明](https://kubesphere.com.cn/docs/introduction/architecture/)。本仓库仅包含后端代码,前端代码参考 [Console 项目](https://github.com/kubesphere/console)。
## 平台功能
<details>
<summary><b>🕸 部署 Kubernetes 集群</b></summary>
支持在任何基础设施上部署 Kubernetes支持在线安装和离线安装<a href="https://kubesphere.io/zh/docs/installing-on-linux/introduction/intro/">了解更多</a> 。
</details>
<details>
<summary><b>🔗 Kubernetes 多集群管理</b></summary>
提供集中控制平台来管理多个 Kubernetes 集群支持将应用程序发布到跨不同云供应商的多个k8集群上。
</details>
<details>
<summary><b>🤖 Kubernetes DevOps</b></summary>
提供开箱即用的基于 Jenkins 的 CI/CD并内置自动化流水线插件包括Binary-to-Image (B2I) 和Source-to-Image (S2I)<a href="https://kubesphere.io/zh/devops/">了解更多</a> 。
</details>
<details>
<summary><b>🔎 云原生可观测性</b></summary>
支持多维度监控、事件和审计日志;内置多租户日志查询和收集,告警和通知,<a href="https://kubesphere.io/zh/observability/">了解更多</a> 。
</details>
<details>
<summary><b>🧩 基于 Istio 的微服务治理</b></summary>
为分布式微服务应用程序提供细粒度的流量管理、可观测性和服务跟踪,支持可视化的流量拓扑,<a href="https://kubesphere.io/zh/service-mesh/">了解更多</a> 。
</details>
<details>
<summary><b>💻 应用商店</b></summary>
为基于 Helm 的应用程序提供应用商店,并在 Kubernetes 平台上提供应用程序生命周期管理功能,<a href="https://kubesphere.io/zh/docs/pluggable-components/app-store/">了解更多</a> 。
</details>
<details>
<summary><b>💡 Kubernetes 边缘节点管理</b></summary>
基于 <a href="https://kubeedge.io/zh/">KubeEdge</a> 实现应用与工作负载在云端与边缘节点的统一分发与管理,解决在海量边、端设备上完成应用交付、运维、管控的需求,<a href= "https://kubesphere.io/zh/docs/pluggable-components/kubeedge/">了解更多</a> 。
</details>
<details>
<summary><b>📊 多维度计量与计费</b></summary>
提供基于集群与租户的多维度资源计量与计费的监控报表,让 Kubernetes 运营成本更透明,<a href="https://kubesphere.io/zh/docs/toolbox/metering-and-billing/view-resource-consumption/">了解更多</a> 。
</details>
<details>
<summary><b>🗃 支持多种存储和网络解决方案</b></summary>
<li>支持 GlusterFS、CephRBD、NFS、LocalPV ,并提供多个 CSI 插件对接公有云与企业级存储。</li><li>提供Kubernetes在裸机、边缘和虚拟化中的负载均衡器实现 <a href="https://github.com/kubesphere/openelb">OpenELB</a> 。</li><li>提供网络策略和容器组 IP 池管理,支持 Calico、Flannel、Kube-OVN。</li>
</details>
<details>
<summary><b>🏘 多租户</b></summary>
提供统一的认证鉴权与细粒度的基于角色的授权系统,支持对接 AD/LDAP 。
</details>
## 架构说明
KubeSphere 使用前后端分离的架构,将 [前端](https://github.com/kubesphere/console) 与 [后端](https://github.com/kubesphere/kubesphere) 分开。后端的各个功能组件可通过 REST API 对接外部系统。
![Architecture](docs/images/architecture.png)
## 核心功能
|功能 |介绍 |
| --- | ---|
|多云与多集群管理|提供多云与多集群的中央管理面板,支持集群导入,支持应用在多云与多集群一键分发|
| Kubernetes 集群搭建与运维 | 支持在线 & 离线安装、升级与扩容 K8s 集群,支持安装 “云原生全家桶” |
| Kubernetes 资源可视化管理 | 可视化纳管原生 Kubernetes 资源,支持向导式创建与管理 K8s 资源 |
| 基于 Jenkins 的 DevOps 系统 | 支持图形化与脚本两种方式构建 CI/CD 流水线,内置 Source to ImageS2I和 Binary to ImageB2I等 CD 工具 |
| 应用商店与应用生命周期管理 | 提供应用商店,内置 Redis、MySQL 等 15 个常用应用,支持应用的生命周期管理 |
| 基于 Istio 的微服务治理 (Service Mesh) | 提供可视化无代码侵入的 **灰度发布、熔断、流量治理与流量拓扑、分布式 Tracing** |
| 多租户管理 | 提供基于角色的细粒度多租户统一认证,支持 **对接企业 LDAP/AD**,提供多层级的权限管理 |
| 丰富的可观察性功能 | 提供集群/工作负载/Pod/容器等多维度的监控,提供基于多租户的日志查询与日志收集,支持节点与应用层级的告警与通知 |
|基础设施管理|支持 Kubernetes 节点管理,支持节点扩容与集群升级,提供基于节点的多项监控指标与告警规则 |
| 存储管理 | 支持对接 Ceph、GlusterFS、NFS、Local PV支持可视化运维管理 PVC、StorageClass提供 CSI 插件对接云平台存储 |
| 网络管理 | 提供租户网络隔离与 K8s [Network Policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 管理,支持 Calico、Flannel提供 [Porter LB](https://github.com/kubesphere/porter) 用于暴露物理环境 K8s 集群的 LoadBalancer 服务 |
| GPU support | 集群支持添加 GPU 与 vGPU可运行 TensorFlow 等 ML 框架 |
以上功能说明详见 [产品功能](https://kubesphere.com.cn/docs/introduction/features/)。
----
## 最新发布
## 最新版本
KubeSphere 3.1.0 已于 2021 年 4 月 29 日正式 GA点击 [Release Notes For 3.1.0](https://kubesphere.com.cn/docs/release/release-v310/) 查看 3.1.0 版本的更新详情
🎉 KubeSphere 3.1.1 全新发布!相关更新信息,请参阅 [Release Notes For 3.1.1](https://kubesphere.io/zh/docs/release/release-v311/) 。
## 安装 3.1.0
## 安装
KubeSphere 支持在任意平台运行从本地数据中心到混合多云再走向边缘。此外KubeSphere 可以部署在任何版本兼容的 Kubernetes 集群上。
### 快速入门
[快速入门系列](https://kubesphere.com.cn/docs/quick-start/) 提供了快速安装与入门示例,供初次安装体验参考。
1. 运行以下命令以在现有 Kubernetes 集群上安装 KubeSphere
### 在已有 Kubernetes 之上安装 KubeSphere
```yaml
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/kubesphere-installer.yaml
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.1.1/cluster-configuration.yaml
```
- [基于 Kubernetes 的安装介绍](https://kubesphere.com.cn/docs/installing-on-kubernetes/introduction/overview/)
- [在阿里云 ACK 安装 KubeSphere](https://kubesphere.com.cn/forum/d/1745-kubesphere-v3-0-0-dev-on-ack)
- [在腾讯云 TKE 安装 KubeSphere](https://kubesphere.com.cn/docs/installing-on-kubernetes/hosted-kubernetes/install-ks-on-tencent-tke/)
- [在华为云 CCE 安装 KubeSphere](https://kubesphere.com.cn/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-huaweicloud-cce/)
- [在 AWS EKS 安装 KubeSphere](https://kubesphere.com.cn/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-eks/)
- [在 Google GKE 安装 KubeSphere](https://kubesphere.com.cn/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-gke/)
- [在 Azure AKS 安装 KubeSphere](https://kubesphere.com.cn/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-aks/)
- [在 DigitalOcean 安装 KubeSphere](https://kubesphere.com.cn/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-do/)
- [在 Oracle OKE 安装 KubeSphere](https://kubesphere.com.cn/docs/installing-on-kubernetes/hosted-kubernetes/install-kubesphere-on-oke/)
2. 您可以运行以下命令查看安装日志。 KubeSphere 安装成功后,您可以使用`http://IP:30880` 以默认账号和密码admin/P@88w0rd访问KubeSphere 控制台。
### 基于 Linux 安装 KubeSphere
```yaml
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f
```
- [多节点安装介绍(以三节点为例)](https://kubesphere.com.cn/docs/installing-on-linux/introduction/multioverview/)
- [在 VMware vSphere 安装高可用集群](https://kubesphere.com.cn/docs/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere/)
- [在青云QingCloud 安装高可用集群](https://kubesphere.com.cn/docs/installing-on-linux/public-cloud/install-kubesphere-on-qingcloud-vms/)
- [在阿里云 ECS 部署高可用集群](https://kubesphere.com.cn/docs/installing-on-linux/public-cloud/install-kubesphere-on-ali-ecs/)
> 👨‍💻 没有 Kubernetes 集群? 可以尝试在 Linux 上以[All-in-one](https://kubesphere.io/zh/docs/quick-start/all-in-one-on-linux/) 模式来安装单节点 Kubernetes 和 KubeSphere。
- [在华为云 VM 部署高可用集群](https://kubesphere.com.cn/docs/installing-on-linux/public-cloud/install-kubesphere-on-huaweicloud-ecs/)
- [在 Azure VM 安装高可用集群](https://kubesphere.com.cn/docs/installing-on-linux/public-cloud/install-kubesphere-on-azure-vms/)
### 在托管 Kubernetes 上部署 KubeSphere
## 技术社区
KubeSphere 托管在以下云供应商上,您可以通过在其托管的 Kubernetes 服务上一键安装来部署 KubeSphere。
[KubeSphere 社区](https://github.com/kubesphere/community) 包含所有社区的信息,包括如何开发,兴趣小组(SIG)等。比如[开发指南](https://github.com/kubesphere/community/tree/master/developer-guide/development) 详细说明了如何从源码编译、KubeSphere 的 GitHub 工作流、如何贡献代码以及如何测试等。
- [在 Amazon EKS 上部署 KubeSphere](https://aws.amazon.com/quickstart/architecture/qingcloud-kubesphere/)
- [在 Azure AKS 上部署 KubeSphere](https://market.azure.cn/marketplace/apps/qingcloud.kubesphere)
- [在 DigitalOcean 上部署 KubeSphere](https://marketplace.digitalocean.com/apps/kubesphere)
- [在青云QingCloud QKE 上部署 KubeSphere](https://www.qingcloud.com/products/kubesphereqke)
您还可以在几分钟内在其他托管的 Kubernetes 服务上安装 KubeSphere请参阅 [官方文档](https://kubesphere.io/zh/docs/installing-on-kubernetes/) 以开始使用。
> 👨‍💻 不能访问网络?参考 [在Kubernetes上离线安装](https://kubesphere.io/zh/docs/installing-on-kubernetes/on-prem-kubernetes/install-ks-on-linux-airgapped/) 或者 [在 Linux 上离线安装](https://kubesphere.io/zh/docs/installing-on-linux/introduction/air-gapped-installation/) 了解如何使用私有仓库来安装 KubeSphere。
## 贡献、支持、讨论和社区
我们 :heart: 您的贡献。[社区](https://github.com/kubesphere/community) 将引导您了解如何开始贡献 KubeSphere。[开发指南](https://github.com/kubesphere/community/tree/master/developer-guide/development) 说明了如何安装开发环境。
- [中文论坛](https://kubesphere.com.cn/forum/)
- [Slack Channel](https://join.slack.com/t/kubesphere/shared_invite/enQtNTE3MDIxNzUxNzQ0LTZkNTdkYWNiYTVkMTM5ZThhODY1MjAyZmVlYWEwZmQ3ODQ1NmM1MGVkNWEzZTRhNzk0MzM5MmY4NDc3ZWVhMjE)
- [社区微信群(见官网底部)](https://kubesphere.com.cn/)
- [Bug 与建议反馈GitHub Issue](https://github.com/kubesphere/kubesphere/issues)
- [Slack Channel](https://join.slack.com/t/kubesphere/shared_invite/enQtNTE3MDIxNzUxNzQ0LTZkNTdkYWNiYTVkMTM5ZThhODY1MjAyZmVlYWEwZmQ3ODQ1NmM1MGVkNWEzZTRhNzk0MzM5MmY4NDc3ZWVhMjE)
- [Youtube](https://www.youtube.com/channel/UCyTdUQUYjf7XLjxECx63Hpw)
- [在推特上关注我们](https://twitter.com/KubeSphere)
请将任何 KubeSphere 错误、问题和功能请求提交到 [KubeSphere GitHub Issue](https://github.com/kubesphere/kubesphere/issues) 。
## 谁在使用 KubeSphere
[Powered by KubeSphere](https://kubesphere.com.cn/case/) 列出了哪些企业在使用 KubeSphere,如果您所在的企业已安装使用了 KubeSphere欢迎[提交 PR](https://github.com/kubesphere/kubesphere/blob/master/docs/powered-by-kubesphere.md)
[用户案例学习](https://kubesphere.com.cn/case/) 列出了哪些企业在使用 KubeSphere欢迎 [发表评论](https://github.com/kubesphere/kubesphere/issues/4123) 来分享您的使用案例
## Landscapes
<p align="center">
<br/><br/>
<img src="https://landscape.cncf.io/images/left-logo.svg" width="150"/>&nbsp;&nbsp;<img src="https://landscape.cncf.io/images/right-logo.svg" width="200"/>&nbsp;&nbsp;<img src="https://www.cncf.io/wp-content/uploads/2017/11/certified_kubernetes_color.png" height="40" width="30"/>
<img src="https://landscape.cncf.io/images/left-logo.svg" width="150"/>&nbsp;&nbsp;<img src="https://landscape.cncf.io/images/right-logo.svg" width="200"/>&nbsp;&nbsp;
<br/><br/>
KubeSphere 是 CNCF 基金会成员并且通过了 <a href="https://www.cncf.io/certification/software-conformance/#logos">Kubernetes 一致性认证
</a>,进一步丰富了 <a href="https://landscape.cncf.io/landscape=observability-and-analysis&license=apache-license-2-0">CNCF 云原生的生态。
</a>,进一步丰富了 <a href="https://landscape.cncf.io/?landscape=observability-and-analysis&license=apache-license-2-0">CNCF 云原生的生态。
</a>
</p>

View File

@@ -315,12 +315,11 @@ func run(s *options.KubeSphereControllerManagerOptions, ctx context.Context) err
klog.Fatalf("Unable to create ResourceQuota controller: %v", err)
}
helmReconciler := helm.Reconciler{}
if !s.GatewayOptions.IsEmpty() {
helmReconciler.WatchFiles = append(helmReconciler.WatchFiles, s.GatewayOptions.WatchesPath)
}
if err := helmReconciler.SetupWithManager(mgr); err != nil {
klog.Fatalf("Unable to create helm controller: %v", err)
helmReconciler := helm.Reconciler{GatewayOptions: s.GatewayOptions}
if err := helmReconciler.SetupWithManager(mgr); err != nil {
klog.Fatalf("Unable to create helm controller: %v", err)
}
}
// TODO(jeff): refactor config with CRD

View File

@@ -97,7 +97,7 @@ func Run(s *options.ServerRunOptions, ctx context.Context) error {
err = apiserver.PrepareRun(ctx.Done())
if err != nil {
return nil
return err
}
return apiserver.Run(ctx)

View File

@@ -20,6 +20,11 @@ spec:
config: {{ toYaml .Values.controller.config | nindent 6 }}
{{- end }}
{{- if hasKey .Values.deployment.annotations "servicemesh.kubesphere.io/enabled" }}
podAnnotations:
sidecar.istio.io/inject: {{ get .Values.deployment.annotations "servicemesh.kubesphere.io/enabled" }}
{{- end }}
## Annotations to be added to the controller config configuration configmap
##
configAnnotations: {}
@@ -126,27 +131,26 @@ spec:
## Affinity and anti-affinity
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
affinity: {}
# # An example of preferred pod anti-affinity, weight is in the range 1-100
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/instance
# operator: In
# values:
# - ingress-nginx
# - key: app.kubernetes.io/component
# operator: In
# values:
# - controller
# topologyKey: kubernetes.io/hostname
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- ingress-nginx
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}-ingress
- key: app.kubernetes.io/component
operator: In
values:
- controller
topologyKey: kubernetes.io/hostname
# # An example of required pod anti-affinity
# podAntiAffinity:

View File

@@ -14,6 +14,7 @@ controller:
repository: kubesphere/nginx-ingress-controller
tag: "v0.48.1"
pullPolicy: IfNotPresent
digest: ""
service:

View File

@@ -41,14 +41,13 @@ spec:
resources:
{{- toYaml .Values.apiserver.resources | nindent 12 }}
volumeMounts:
- mountPath: /var/run/docker.sock
name: docker-sock
- mountPath: /etc/kubesphere/ingress-controller
name: ks-router-config
- mountPath: /etc/kubesphere/
name: kubesphere-config
- mountPath: /etc/localtime
name: host-time
readOnly: true
{{- if .Values.apiserver.extraVolumeMounts }}
{{- toYaml .Values.apiserver.extraVolumeMounts | nindent 8 }}
{{- end }}
@@ -69,34 +68,15 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: In
values:
- ""
{{- if gt .Values.replicaCount 1.0 }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ks-apiserver
namespaces:
- {{ .Release.Namespace }}
{{- end }}
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- hostPath:
path: /var/run/docker.sock
type: ""
name: docker-sock
- configMap:
defaultMode: 420
name: ks-router-config

View File

@@ -36,6 +36,7 @@ spec:
name: sample-bookinfo
- mountPath: /etc/localtime
name: host-time
readOnly: true
{{- if .Values.console.extraVolumeMounts }}
{{- toYaml .Values.console.extraVolumeMounts | nindent 8 }}
{{- end }}
@@ -53,29 +54,14 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: In
values:
- ""
{{- if gt .Values.replicaCount 1.0 }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ks-console
namespaces:
- {{ .Release.Namespace }}
{{- end }}
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- configMap:
defaultMode: 420

View File

@@ -52,6 +52,7 @@ spec:
name: webhook-secret
- mountPath: /etc/localtime
name: host-time
readOnly: true
{{- if .Values.controller.extraVolumeMounts }}
{{- toYaml .Values.controller.extraVolumeMounts | nindent 8 }}
{{- end }}
@@ -84,29 +85,14 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: In
values:
- ""
{{- if gt .Values.replicaCount 1.0 }}
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ks-controller-manager
namespaces:
- {{ .Release.Namespace }}
{{- end }}
{{- toYaml . | nindent 8 }}
{{- end }}
---

View File

@@ -355,7 +355,7 @@ data:
targetPort: 9080
---
apiVersion: extensions/v1beta1
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
namespace: servicemesh
@@ -367,10 +367,13 @@ data:
rules:
- http:
paths:
- path: /
backend:
serviceName: productpage
servicePort: 9080
- backend:
service:
name: productpage
port:
number: 9080
path: /
pathType: ImplementationSpecific
host: productpage.servicemesh.139.198.121.92.nip.io
kind: ConfigMap
metadata:

View File

@@ -74,6 +74,8 @@ securityContext: {}
# Kubernetes Version shows in KubeSphere console
kube_version: "v1.19.4"
env: []
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
@@ -89,7 +91,8 @@ tolerations:
tolerationSeconds: 60
affinity: {}
env: []
nodeSelector: {}
## deployment specific configuration

16
go.mod
View File

@@ -33,6 +33,7 @@ require (
github.com/emicklei/go-restful v2.14.3+incompatible
github.com/emicklei/go-restful-openapi v1.4.1
github.com/emirpasic/gods v1.12.0 // indirect
github.com/evanphx/json-patch v4.11.0+incompatible
github.com/fatih/structs v1.1.0
github.com/form3tech-oss/jwt-go v3.2.2+incompatible
github.com/garyburd/redigo v1.6.0 // indirect
@@ -106,26 +107,26 @@ require (
istio.io/api v0.0.0-20201113182140-d4b7e3fc2b44
istio.io/client-go v0.0.0-20201113183938-0734e976e785
istio.io/gogo-genproto v0.0.0-20201113182723-5b8563d8a012 // indirect
k8s.io/api v0.21.3
k8s.io/apiextensions-apiserver v0.21.3
k8s.io/apimachinery v0.21.3
k8s.io/api v0.21.4
k8s.io/apiextensions-apiserver v0.21.4
k8s.io/apimachinery v0.21.4
k8s.io/apiserver v0.21.2
k8s.io/cli-runtime v0.21.2
k8s.io/client-go v12.0.0+incompatible
k8s.io/code-generator v0.21.2
k8s.io/component-base v0.21.2
k8s.io/component-base v0.21.4
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.8.0
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e
k8s.io/kubectl v0.21.2
k8s.io/metrics v0.21.2
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b
k8s.io/utils v0.0.0-20210802155522-efc7438f0176
kubesphere.io/api v0.0.0
kubesphere.io/client-go v0.0.0
kubesphere.io/monitoring-dashboard v0.2.2
rsc.io/letsencrypt v0.0.1 // indirect
sigs.k8s.io/application v0.8.4-0.20201016185654-c8e2959e57a0
sigs.k8s.io/controller-runtime v0.9.3
sigs.k8s.io/controller-runtime v0.9.8-0.20211019125639-aa2b3e68a52d
sigs.k8s.io/controller-tools v0.6.2
sigs.k8s.io/kubefed v0.8.1
sigs.k8s.io/kustomize/api v0.8.8
@@ -500,6 +501,7 @@ replace (
github.com/kr/text => github.com/kr/text v0.1.0
github.com/kshvakov/clickhouse => github.com/kshvakov/clickhouse v1.3.5
github.com/kubernetes-csi/external-snapshotter/client/v3 => github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0
github.com/kubernetes-csi/external-snapshotter/client/v4 => github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
github.com/kubesphere/sonargo => github.com/kubesphere/sonargo v0.0.2
github.com/kylelemons/go-gypsy => github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28
github.com/kylelemons/godebug => github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb
@@ -811,7 +813,7 @@ replace (
rsc.io/sampler => rsc.io/sampler v1.3.0
sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19
sigs.k8s.io/application => sigs.k8s.io/application v0.8.4-0.20201016185654-c8e2959e57a0
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.3
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.8-0.20211019125639-aa2b3e68a52d
sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.6.2
sigs.k8s.io/kind => sigs.k8s.io/kind v0.8.1
sigs.k8s.io/kubebuilder/v3 => sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210716121009-fde793f20067

4
go.sum
View File

@@ -996,8 +996,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19 h1:0jaDAAxtqIrrq
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/application v0.8.4-0.20201016185654-c8e2959e57a0 h1:cH3Q4uNycL9LgzlyU+/UikIM1T6tx9LKc/Ie/QVIRM8=
sigs.k8s.io/application v0.8.4-0.20201016185654-c8e2959e57a0/go.mod h1:wdTrELsIgKk8lnlRaoKWao9YpLelXpABdEgCM1aEEE4=
sigs.k8s.io/controller-runtime v0.9.3 h1:n075bHQ1wb8hpX7C27pNrqsb0fj8mcfCQfNX+oKTbYE=
sigs.k8s.io/controller-runtime v0.9.3/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk=
sigs.k8s.io/controller-runtime v0.9.8-0.20211019125639-aa2b3e68a52d h1:6S7UHwijq5QDvhmo+dmkdvyo2a6sMnz9ilLbMUEgJ28=
sigs.k8s.io/controller-runtime v0.9.8-0.20211019125639-aa2b3e68a52d/go.mod h1:nExcHcQ2zvLMeoO9K7rOesGCmgu32srN5SENvpAEbGA=
sigs.k8s.io/controller-tools v0.6.2 h1:+Y8L0UsAugDipGRw8lrkPoAi6XqlQVZuf1DQHME3PgU=
sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8=
sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4=

View File

@@ -41,14 +41,10 @@ if [[ "${KIND_LOAD_IMAGE:-}" == "y" ]]; then
kind load docker-image "$REPO/ks-controller-manager:$TAG" --name="${KIND_CLUSTER_NAME:-kind}"
fi
# Download the latest ks-install to deploy KubeSphere
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml
#TODO: override ks-apiserver and ks-controller-manager images with specific tag
kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml
kubectl apply -f https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml
wait_for_installation_finish

65
hack/kind_e2e.sh Executable file
View File

@@ -0,0 +1,65 @@
#!/bin/bash
# Copyright 2021 The KubeSphere Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KIND_LOG_LEVEL="1"
if [ -n "${DEBUG}" ]; then
set -x
KIND_LOG_LEVEL="6"
fi
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}"/hack/lib/init.sh
cleanup() {
kind delete cluster \
--verbosity="${KIND_LOG_LEVEL}" \
--name "${KIND_CLUSTER_NAME}"
}
trap cleanup EXIT
export KIND_CLUSTER_NAME=${KIND_CLUSTER_NAME:-kubesphere-e2e}
if ! command -v kind --version &> /dev/null; then
echo "kind is not installed. Use the package manager or visit the official site https://kind.sigs.k8s.io/"
exit 1
fi
echo "Creating Kubernetes cluster with kind"
export K8S_VERSION=${K8S_VERSION:-v1.21.1}
kind create cluster \
--verbosity="${KIND_LOG_LEVEL}" \
--name "${KIND_CLUSTER_NAME}" \
--config "${KUBE_ROOT}"/test/e2e/kind.yaml \
--retain \
--image kindest/node:"${K8S_VERSION}"
echo "Kubernetes cluster:"
kubectl get nodes -o wide
echo "Deploy KubeSphere"
"${KUBE_ROOT}"/hack/deploy-kubesphere.sh
echo "Run e2e test"
go test ./test/e2e

View File

@@ -80,7 +80,8 @@ kube::version::get_version_vars() {
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
# KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
# shellcheck disable=SC2034
KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
if [[ -n "${BASH_REMATCH[4]}" ]]; then
KUBE_GIT_MINOR+="+"

View File

@@ -24,6 +24,8 @@ import (
rt "runtime"
"time"
"kubesphere.io/kubesphere/pkg/utils/iputil"
"kubesphere.io/kubesphere/pkg/apiserver/authentication/token"
"kubesphere.io/kubesphere/pkg/apiserver/authorization"
@@ -107,7 +109,6 @@ import (
"kubesphere.io/kubesphere/pkg/simple/client/s3"
"kubesphere.io/kubesphere/pkg/simple/client/sonarqube"
"kubesphere.io/kubesphere/pkg/utils/metrics"
utilnet "kubesphere.io/kubesphere/pkg/utils/net"
)
type APIServer struct {
@@ -256,7 +257,7 @@ func (s *APIServer) installKubeSphereAPIs() {
urlruntime.Must(notificationkapisv2beta1.AddToContainer(s.container, s.InformerFactory, s.KubernetesClient.Kubernetes(),
s.KubernetesClient.KubeSphere()))
urlruntime.Must(notificationkapisv2beta2.AddToContainer(s.container, s.Config.NotificationOptions))
urlruntime.Must(gatewayv1alpha1.AddToContainer(s.container, s.Config.GatewayOptions, s.RuntimeCache, s.RuntimeClient, s.InformerFactory, s.KubernetesClient.Kubernetes()))
urlruntime.Must(gatewayv1alpha1.AddToContainer(s.container, s.Config.GatewayOptions, s.RuntimeCache, s.RuntimeClient, s.InformerFactory, s.KubernetesClient.Kubernetes(), s.LoggingClient))
}
func (s *APIServer) Run(ctx context.Context) (err error) {
@@ -322,7 +323,7 @@ func (s *APIServer) buildHandlerChain(stopCh <-chan struct{}) {
case authorization.RBAC:
excludedPaths := []string{"/oauth/*", "/kapis/config.kubesphere.io/*", "/kapis/version", "/kapis/metrics"}
pathAuthorizer, _ := path.NewAuthorizer(excludedPaths)
amOperator := am.NewReadOnlyOperator(s.InformerFactory)
amOperator := am.NewReadOnlyOperator(s.InformerFactory, s.DevopsClient)
authorizers = unionauthorizer.New(pathAuthorizer, rbac.NewRBACAuthorizer(amOperator))
}
@@ -596,7 +597,7 @@ func logRequestAndResponse(req *restful.Request, resp *restful.Response, chain *
}
logWithVerbose.Infof("%s - \"%s %s %s\" %d %d %dms",
utilnet.GetRequestIP(req.Request),
iputil.RemoteIp(req.Request),
req.Request.Method,
req.Request.URL,
req.Request.Proto,

View File

@@ -920,7 +920,7 @@ func newMockRBACAuthorizer(staticRoles *StaticRoles) (*RBACAuthorizer, error) {
return nil, err
}
}
return NewRBACAuthorizer(am.NewReadOnlyOperator(fakeInformerFactory)), nil
return NewRBACAuthorizer(am.NewReadOnlyOperator(fakeInformerFactory, nil)), nil
}
func TestAppliesTo(t *testing.T) {

View File

@@ -25,6 +25,8 @@ import (
"net/http"
"strings"
"kubesphere.io/kubesphere/pkg/utils/iputil"
"k8s.io/apimachinery/pkg/api/validation/path"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme"
@@ -36,7 +38,6 @@ import (
"kubesphere.io/kubesphere/pkg/api"
"kubesphere.io/kubesphere/pkg/constants"
netutils "kubesphere.io/kubesphere/pkg/utils/net"
)
type RequestInfoResolver interface {
@@ -127,7 +128,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
},
Workspace: api.WorkspaceNone,
Cluster: api.ClusterNone,
SourceIP: netutils.GetRequestIP(req),
SourceIP: iputil.RemoteIp(req),
UserAgent: req.UserAgent(),
}

View File

@@ -71,6 +71,8 @@ const (
OpenpitrixAttachmentTag = "Attachment"
OpenpitrixRepositoryTag = "Repository"
OpenpitrixManagementTag = "App Management"
// HelmRepoMinSyncPeriod min sync period in seconds
HelmRepoMinSyncPeriod = 180
CleanupDanglingAppOngoing = "ongoing"
CleanupDanglingAppDone = "done"

View File

@@ -63,8 +63,8 @@ type fixture struct {
fedgroupBindingLister []*fedv1beta1types.FederatedGroupBinding
userLister []*v1alpha2.User
// Actions expected to happen on the client.
kubeactions []core.Action
actions []core.Action
k8sactions []core.Action
ksactions []core.Action
// Objects from here preloaded into NewSimpleFake.
kubeobjects []runtime.Object
objects []runtime.Object
@@ -185,32 +185,32 @@ func (f *fixture) runController(groupBinding string, startInformers bool, expect
actions := filterInformerActions(f.ksclient.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
if len(f.ksactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.ksactions), actions[i:])
break
}
expectedAction := f.actions[i]
expectedAction := f.ksactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
if len(f.ksactions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.ksactions)-len(actions), f.ksactions[len(actions):])
}
k8sActions := filterInformerActions(f.k8sclient.Actions())
for i, action := range k8sActions {
if len(f.kubeactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:])
if len(f.k8sactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.k8sactions), k8sActions[i:])
break
}
expectedAction := f.kubeactions[i]
expectedAction := f.k8sactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.kubeactions) > len(k8sActions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):])
if len(f.k8sactions) > len(k8sActions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.k8sactions)-len(k8sActions), f.k8sactions[len(k8sActions):])
}
}
@@ -269,18 +269,12 @@ func checkAction(expected, actual core.Action, t *testing.T) {
func filterInformerActions(actions []core.Action) []core.Action {
var ret []core.Action
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "groupbindings") ||
action.Matches("watch", "groupbindings") ||
action.Matches("list", "federatedgroupbindings") ||
action.Matches("list", "users") ||
action.Matches("watch", "users") ||
action.Matches("get", "users")) {
// filter out read action
if action.GetVerb() == "watch" || action.GetVerb() == "list" || action.GetVerb() == "get" {
continue
}
ret = append(ret, action)
}
return ret
}
@@ -289,14 +283,14 @@ func (f *fixture) expectUpdateGroupsFinalizerAction(groupBinding *v1alpha2.Group
expect.Finalizers = []string{"finalizers.kubesphere.io/groupsbindings"}
expect.Labels = map[string]string{constants.KubefedManagedLabel: "false"}
action := core.NewUpdateAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, "", expect)
f.actions = append(f.actions, action)
f.ksactions = append(f.ksactions, action)
}
func (f *fixture) expectUpdateGroupsDeleteAction(groupBinding *v1alpha2.GroupBinding) {
expect := groupBinding.DeepCopy()
expect.Finalizers = []string{}
action := core.NewUpdateAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "groupbindings"}, "", expect)
f.actions = append(f.actions, action)
f.ksactions = append(f.ksactions, action)
}
func (f *fixture) expectPatchUserAction(user *v1alpha2.User, groups []string) {
@@ -305,16 +299,16 @@ func (f *fixture) expectPatchUserAction(user *v1alpha2.User, groups []string) {
patch := client.MergeFrom(user)
patchData, _ := patch.Data(newUser)
f.actions = append(f.actions, core.NewPatchAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Resource: "users", Version: "v1alpha2"}, user.Namespace, user.Name, patch.Type(), patchData))
f.ksactions = append(f.ksactions, core.NewPatchAction(schema.GroupVersionResource{Group: "iam.kubesphere.io", Resource: "users", Version: "v1alpha2"}, user.Namespace, user.Name, patch.Type(), patchData))
}
func (f *fixture) expectCreateFederatedGroupBindingsAction(groupBinding *v1alpha2.GroupBinding) {
b := newFederatedGroupBinding(groupBinding)
controllerutil.SetControllerReference(groupBinding, b, scheme.Scheme)
_ = controllerutil.SetControllerReference(groupBinding, b, scheme.Scheme)
actionCreate := core.NewCreateAction(schema.GroupVersionResource{Group: "types.kubefed.io", Version: "v1beta1", Resource: "federatedgroupbindings"}, "", b)
f.actions = append(f.actions, actionCreate)
f.ksactions = append(f.ksactions, actionCreate)
}
func getKey(groupBinding *v1alpha2.GroupBinding, t *testing.T) string {
@@ -341,9 +335,9 @@ func TestCreatesGroupBinding(t *testing.T) {
f.objects = append(f.objects, user)
excepctGroups := []string{"test"}
expectGroups := []string{"test"}
f.expectPatchUserAction(user, excepctGroups)
f.expectPatchUserAction(user, expectGroups)
f.expectCreateFederatedGroupBindingsAction(groupbinding)
f.run(getKey(groupbinding, t))

View File

@@ -23,25 +23,26 @@ import (
"k8s.io/klog"
ctrl "sigs.k8s.io/controller-runtime"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
"github.com/operator-framework/helm-operator-plugins/pkg/annotation"
"github.com/operator-framework/helm-operator-plugins/pkg/reconciler"
"github.com/operator-framework/helm-operator-plugins/pkg/watches"
)
type Reconciler struct {
WatchFiles []string
GatewayOptions *gateway.Options
}
// SetupWithManager creates reconilers for each helm package that defined in the WatchFiles.
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
var watchKinds []watches.Watch
for _, file := range r.WatchFiles {
ws, err := watches.Load(file)
if err != nil {
return err
}
watchKinds = append(watchKinds, ws...)
ws, err := watches.Load(r.GatewayOptions.WatchesPath)
if err != nil {
return err
}
watchKinds = append(watchKinds, ws...)
for _, w := range watchKinds {
// Register controller with the factory
@@ -58,7 +59,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
r, err := reconciler.New(
reconciler.WithChart(*w.Chart),
reconciler.WithGroupVersionKind(w.GroupVersionKind),
reconciler.WithOverrideValues(w.OverrideValues),
reconciler.WithOverrideValues(r.defaultConfiguration()),
reconciler.SkipDependentWatches(w.WatchDependentResources != nil && !*w.WatchDependentResources),
reconciler.WithMaxConcurrentReconciles(maxConcurrentReconciles),
reconciler.WithReconcilePeriod(reconcilePeriod),
@@ -76,3 +77,14 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
}
return nil
}
func (r *Reconciler) defaultConfiguration() map[string]string {
var overrideValues = make(map[string]string)
if r.GatewayOptions.Repository != "" {
overrideValues["controller.image.repository"] = r.GatewayOptions.Repository
}
if r.GatewayOptions.Tag != "" {
overrideValues["controller.image.tag"] = r.GatewayOptions.Tag
}
return overrideValues
}

View File

@@ -27,6 +27,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/envtest"
@@ -79,8 +81,7 @@ var _ = Context("Helm reconcier", func() {
mgr, err := ctrl.NewManager(cfg, ctrl.Options{MetricsBindAddress: "0"})
Expect(err).NotTo(HaveOccurred(), "failed to create a manager")
reconciler := &Reconciler{}
reconciler.WatchFiles = append(reconciler.WatchFiles, f.Name())
reconciler := &Reconciler{GatewayOptions: &gateway.Options{WatchesPath: f.Name()}}
err = reconciler.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup helm reconciler")

View File

@@ -35,6 +35,8 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/api/application/v1alpha1"
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
@@ -42,9 +44,6 @@ import (
)
const (
// min sync period in seconds
MinSyncPeriod = 180
MinRetryDuration = 60
MaxRetryDuration = 600
HelmRepoSyncStateLen = 10
@@ -156,8 +155,8 @@ func (r *ReconcileHelmRepo) Reconcile(ctx context.Context, request reconcile.Req
copyInstance := instance.DeepCopy()
if copyInstance.Spec.SyncPeriod != 0 && copyInstance.Spec.SyncPeriod < MinSyncPeriod {
copyInstance.Spec.SyncPeriod = MinSyncPeriod
if copyInstance.Spec.SyncPeriod != 0 {
copyInstance.Spec.SyncPeriod = int(math.Max(float64(copyInstance.Spec.SyncPeriod), constants.HelmRepoMinSyncPeriod))
}
retryAfter := 0
@@ -197,7 +196,7 @@ func (r *ReconcileHelmRepo) Reconcile(ctx context.Context, request reconcile.Req
RequeueAfter: MinRetryDuration * time.Second,
}, err
} else {
retryAfter = MinSyncPeriod
retryAfter = constants.HelmRepoMinSyncPeriod
if syncErr == nil {
retryAfter = copyInstance.Spec.SyncPeriod
}
@@ -256,9 +255,7 @@ func needReSyncNow(instance *v1alpha1.HelmRepo) (syncNow bool, after int) {
} else {
period = instance.Spec.SyncPeriod
if period != 0 {
if period < MinSyncPeriod {
period = MinSyncPeriod
}
period = int(math.Max(float64(instance.Spec.SyncPeriod), constants.HelmRepoMinSyncPeriod))
if now.After(state.SyncTime.Add(time.Duration(period) * time.Second)) {
return true, 0
}
@@ -296,7 +293,7 @@ func (r *ReconcileHelmRepo) syncRepo(instance *v1alpha1.HelmRepo) error {
}
// 2. merge new index with old index which is stored in crd
savedIndex := helmrepoindex.MergeRepoIndex(index, existsSavedIndex)
savedIndex := helmrepoindex.MergeRepoIndex(instance, index, existsSavedIndex)
// 3. save index in crd
data, err := savedIndex.Bytes()

View File

@@ -21,13 +21,15 @@ package capability
import (
"context"
"fmt"
"reflect"
"strconv"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
@@ -89,7 +91,8 @@ func NewController(
})
csiDriverInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueStorageClassByCSI,
AddFunc: controller.enqueueStorageClassByCSI,
DeleteFunc: controller.enqueueStorageClassByCSI,
})
return controller
@@ -202,23 +205,25 @@ func (c *StorageCapabilityController) syncHandler(key string) error {
// Get StorageClass
storageClass, err := c.storageClassLister.Get(name)
if err != nil {
return err
}
// Cloning and volumeSnapshot support only available for CSI drivers.
isCSIStorage := c.hasCSIDriver(storageClass)
// Annotate storageClass
storageClassUpdated := storageClass.DeepCopy()
err = c.addStorageClassSnapshotAnnotation(storageClassUpdated, isCSIStorage)
if err != nil {
return err
if isCSIStorage {
c.updateSnapshotAnnotation(storageClassUpdated, isCSIStorage)
c.updateCloneVolumeAnnotation(storageClassUpdated, isCSIStorage)
} else {
c.removeAnnotations(storageClassUpdated)
}
err = c.addCloneVolumeAnnotation(storageClassUpdated, isCSIStorage)
if err != nil {
return err
}
_, err = c.storageClassClient.Update(context.Background(), storageClassUpdated, metav1.UpdateOptions{})
if err != nil {
return err
if !reflect.DeepEqual(storageClass, storageClassUpdated) {
_, err = c.storageClassClient.Update(context.Background(), storageClassUpdated, metav1.UpdateOptions{})
if err != nil {
return err
}
}
return nil
}
@@ -234,25 +239,27 @@ func (c *StorageCapabilityController) hasCSIDriver(storageClass *storagev1.Stora
return false
}
func (c *StorageCapabilityController) addStorageClassSnapshotAnnotation(storageClass *storagev1.StorageClass, snapshotAllow bool) error {
func (c *StorageCapabilityController) updateSnapshotAnnotation(storageClass *storagev1.StorageClass, snapshotAllow bool) {
if storageClass.Annotations == nil {
storageClass.Annotations = make(map[string]string)
}
_, err := strconv.ParseBool(storageClass.Annotations[annotationAllowSnapshot])
// err != nil means annotationAllowSnapshot is not illegal, include empty
if err != nil {
if _, err := strconv.ParseBool(storageClass.Annotations[annotationAllowSnapshot]); err != nil {
storageClass.Annotations[annotationAllowSnapshot] = strconv.FormatBool(snapshotAllow)
}
return nil
return
}
func (c *StorageCapabilityController) addCloneVolumeAnnotation(storageClass *storagev1.StorageClass, cloneAllow bool) error {
func (c *StorageCapabilityController) updateCloneVolumeAnnotation(storageClass *storagev1.StorageClass, cloneAllow bool) {
if storageClass.Annotations == nil {
storageClass.Annotations = make(map[string]string)
}
_, err := strconv.ParseBool(storageClass.Annotations[annotationAllowClone])
if err != nil {
if _, err := strconv.ParseBool(storageClass.Annotations[annotationAllowClone]); err != nil {
storageClass.Annotations[annotationAllowClone] = strconv.FormatBool(cloneAllow)
}
return nil
return
}
func (c *StorageCapabilityController) removeAnnotations(storageClass *storagev1.StorageClass) {
delete(storageClass.Annotations, annotationAllowClone)
delete(storageClass.Annotations, annotationAllowSnapshot)
}

View File

@@ -235,14 +235,13 @@ func TestCreateStorageClass(t *testing.T) {
func TestStorageClassHadAnnotation(t *testing.T) {
fixture := newFixture(t, true)
storageClass := newStorageClass("csi-example", "csi.example.com")
storageClass.Annotations = map[string]string{annotationAllowSnapshot: "false", annotationAllowClone: "false"}
storageClass.Annotations = make(map[string]string)
storageClassUpdate := storageClass.DeepCopy()
csiDriver := newCSIDriver("csi.example.com")
storageClass.Annotations = map[string]string{annotationAllowSnapshot: "false", annotationAllowClone: "false"}
// Object exist
fixture.storageObjects = append(fixture.storageObjects, storageClass)
fixture.storageClassLister = append(fixture.storageClassLister, storageClass)
fixture.csiDriverLister = append(fixture.csiDriverLister, csiDriver)
// Action expected
fixture.expectUpdateStorageClassAction(storageClassUpdate)
@@ -270,3 +269,22 @@ func TestStorageClassHadOneAnnotation(t *testing.T) {
// Run test
fixture.run(getKey(storageClass, t))
}
func TestStorageClassHadNoCSIDriver(t *testing.T) {
fixture := newFixture(t, true)
storageClass := newStorageClass("csi-example", "csi.example.com")
storageClass.Annotations = map[string]string{}
storageClassUpdate := storageClass.DeepCopy()
storageClass.Annotations = map[string]string{annotationAllowSnapshot: "false"}
storageClass.Annotations = map[string]string{annotationAllowClone: "false"}
// Object exist
fixture.storageObjects = append(fixture.storageObjects, storageClass)
fixture.storageClassLister = append(fixture.storageClassLister, storageClass)
// Action expected
fixture.expectUpdateStorageClassAction(storageClassUpdate)
// Run test
fixture.run(getKey(storageClass, t))
}

View File

@@ -112,9 +112,8 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
logger := r.Logger.WithValues("user", req.NamespacedName)
rootCtx := context.Background()
user := &iamv1alpha2.User{}
err := r.Get(rootCtx, req.NamespacedName, user)
err := r.Get(ctx, req.NamespacedName, user)
if err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
@@ -124,7 +123,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
// then lets add the finalizer and update the object.
if !sliceutil.HasString(user.Finalizers, finalizer) {
user.ObjectMeta.Finalizers = append(user.ObjectMeta.Finalizers, finalizer)
if err = r.Update(context.Background(), user, &client.UpdateOptions{}); err != nil {
if err = r.Update(ctx, user, &client.UpdateOptions{}); err != nil {
logger.Error(err, "failed to update user")
return ctrl.Result{}, err
}
@@ -168,7 +167,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
return item == finalizer
})
if err = r.Update(context.Background(), user, &client.UpdateOptions{}); err != nil {
if err = r.Update(ctx, user, &client.UpdateOptions{}); err != nil {
klog.Error(err)
r.Recorder.Event(user, corev1.EventTypeWarning, failedSynced, fmt.Sprintf(syncFailMessage, err))
return ctrl.Result{}, err
@@ -199,12 +198,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
// update user status if not managed by kubefed
managedByKubefed := user.Labels[constants.KubefedManagedLabel] == "true"
if !managedByKubefed {
if user, err = r.encryptPassword(user); err != nil {
if err = r.encryptPassword(ctx, user); err != nil {
klog.Error(err)
r.Recorder.Event(user, corev1.EventTypeWarning, failedSynced, fmt.Sprintf(syncFailMessage, err))
return ctrl.Result{}, err
}
if user, err = r.syncUserStatus(ctx, user); err != nil {
if err = r.syncUserStatus(ctx, user); err != nil {
klog.Error(err)
r.Recorder.Event(user, corev1.EventTypeWarning, failedSynced, fmt.Sprintf(syncFailMessage, err))
return ctrl.Result{}, err
@@ -239,15 +238,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
return ctrl.Result{}, nil
}
func (r *Reconciler) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
// encryptPassword Encrypt and update the user password
func (r *Reconciler) encryptPassword(ctx context.Context, user *iamv1alpha2.User) error {
// password is not empty and not encrypted
if user.Spec.EncryptedPassword != "" && !isEncrypted(user.Spec.EncryptedPassword) {
password, err := encrypt(user.Spec.EncryptedPassword)
if err != nil {
klog.Error(err)
return nil, err
return err
}
user = user.DeepCopy()
user.Spec.EncryptedPassword = password
if user.Annotations == nil {
user.Annotations = make(map[string]string)
@@ -255,32 +254,31 @@ func (r *Reconciler) encryptPassword(user *iamv1alpha2.User) (*iamv1alpha2.User,
user.Annotations[iamv1alpha2.LastPasswordChangeTimeAnnotation] = time.Now().UTC().Format(time.RFC3339)
// ensure plain text password won't be kept anywhere
delete(user.Annotations, corev1.LastAppliedConfigAnnotation)
err = r.Update(context.Background(), user, &client.UpdateOptions{})
err = r.Update(ctx, user, &client.UpdateOptions{})
if err != nil {
return nil, err
return err
}
return user, nil
}
return user, nil
return nil
}
func (r *Reconciler) ensureNotControlledByKubefed(user *iamv1alpha2.User) error {
func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, user *iamv1alpha2.User) error {
if user.Labels[constants.KubefedManagedLabel] != "false" {
if user.Labels == nil {
user.Labels = make(map[string]string, 0)
}
user = user.DeepCopy()
user.Labels[constants.KubefedManagedLabel] = "false"
err := r.Update(context.Background(), user, &client.UpdateOptions{})
err := r.Update(ctx, user, &client.UpdateOptions{})
if err != nil {
klog.Error(err)
return err
}
}
return nil
}
func (r *Reconciler) multiClusterSync(ctx context.Context, user *iamv1alpha2.User) error {
if err := r.ensureNotControlledByKubefed(user); err != nil {
if err := r.ensureNotControlledByKubefed(ctx, user); err != nil {
klog.Error(err)
return err
}
@@ -288,6 +286,9 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, user *iamv1alpha2.Use
federatedUser := &typesv1beta1.FederatedUser{}
err := r.Get(ctx, types.NamespacedName{Name: user.Name}, federatedUser)
if err != nil {
if errors.IsNotFound(err) {
return r.createFederatedUser(ctx, user)
}
return err
}
@@ -306,10 +307,6 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, user *iamv1alpha2.Use
func (r *Reconciler) createFederatedUser(ctx context.Context, user *iamv1alpha2.User) error {
federatedUser := &typesv1beta1.FederatedUser{
TypeMeta: metav1.TypeMeta{
Kind: iamv1alpha2.FedUserKind,
APIVersion: iamv1alpha2.FedUserResource.Group + "/" + iamv1alpha2.FedUserResource.Version,
},
ObjectMeta: metav1.ObjectMeta{
Name: user.Name,
},
@@ -435,12 +432,18 @@ func (r *Reconciler) deleteRoleBindings(ctx context.Context, user *iamv1alpha2.U
return err
}
roleBinding := &rbacv1.RoleBinding{}
err = r.Client.DeleteAllOf(ctx, roleBinding, client.MatchingLabels{iamv1alpha2.UserReferenceLabel: user.Name})
roleBindingList := &rbacv1.RoleBindingList{}
err = r.Client.List(ctx, roleBindingList, client.MatchingLabels{iamv1alpha2.UserReferenceLabel: user.Name})
if err != nil {
return err
}
for _, roleBinding := range roleBindingList.Items {
err = r.Client.Delete(ctx, &roleBinding)
if err != nil {
return err
}
}
return nil
}
@@ -449,57 +452,51 @@ func (r *Reconciler) deleteLoginRecords(ctx context.Context, user *iamv1alpha2.U
return r.Client.DeleteAllOf(ctx, loginRecord, client.MatchingLabels{iamv1alpha2.UserReferenceLabel: user.Name})
}
// syncUserStatus will reconcile user state based on user login records
func (r *Reconciler) syncUserStatus(ctx context.Context, user *iamv1alpha2.User) (*iamv1alpha2.User, error) {
// syncUserStatus Update the user status
func (r *Reconciler) syncUserStatus(ctx context.Context, user *iamv1alpha2.User) error {
if user.Spec.EncryptedPassword == "" {
if user.Labels[iamv1alpha2.IdentifyProviderLabel] != "" {
// mapped user from other identity provider always active until disabled
if user.Status.State == nil || *user.Status.State != iamv1alpha2.UserActive {
expected := user.DeepCopy()
active := iamv1alpha2.UserActive
expected.Status = iamv1alpha2.UserStatus{
user.Status = iamv1alpha2.UserStatus{
State: &active,
LastTransitionTime: &metav1.Time{Time: time.Now()},
}
err := r.Update(ctx, expected, &client.UpdateOptions{})
err := r.Update(ctx, user, &client.UpdateOptions{})
if err != nil {
return nil, err
return err
}
return expected, nil
}
} else {
// becomes disabled after setting a blank password
if user.Status.State == nil || *user.Status.State != iamv1alpha2.UserDisabled {
expected := user.DeepCopy()
disabled := iamv1alpha2.UserDisabled
expected.Status = iamv1alpha2.UserStatus{
user.Status = iamv1alpha2.UserStatus{
State: &disabled,
LastTransitionTime: &metav1.Time{Time: time.Now()},
}
err := r.Update(ctx, expected, &client.UpdateOptions{})
err := r.Update(ctx, user, &client.UpdateOptions{})
if err != nil {
return nil, err
return err
}
return expected, nil
}
}
return user, nil
return nil
}
// becomes active after password encrypted
if isEncrypted(user.Spec.EncryptedPassword) {
if user.Status.State == nil || *user.Status.State == iamv1alpha2.UserDisabled {
expected := user.DeepCopy()
active := iamv1alpha2.UserActive
expected.Status = iamv1alpha2.UserStatus{
user.Status = iamv1alpha2.UserStatus{
State: &active,
LastTransitionTime: &metav1.Time{Time: time.Now()},
}
err := r.Update(ctx, expected, &client.UpdateOptions{})
err := r.Update(ctx, user, &client.UpdateOptions{})
if err != nil {
return nil, err
return err
}
return expected, nil
}
}
@@ -507,18 +504,17 @@ func (r *Reconciler) syncUserStatus(ctx context.Context, user *iamv1alpha2.User)
if user.Status.State != nil && *user.Status.State == iamv1alpha2.UserAuthLimitExceeded {
if user.Status.LastTransitionTime != nil &&
user.Status.LastTransitionTime.Add(r.AuthenticationOptions.AuthenticateRateLimiterDuration).Before(time.Now()) {
expected := user.DeepCopy()
// unblock user
active := iamv1alpha2.UserActive
expected.Status = iamv1alpha2.UserStatus{
user.Status = iamv1alpha2.UserStatus{
State: &active,
LastTransitionTime: &metav1.Time{Time: time.Now()},
}
err := r.Update(ctx, expected, &client.UpdateOptions{})
err := r.Update(ctx, user, &client.UpdateOptions{})
if err != nil {
return nil, err
return err
}
return expected, nil
return nil
}
}
@@ -527,7 +523,7 @@ func (r *Reconciler) syncUserStatus(ctx context.Context, user *iamv1alpha2.User)
err := r.List(ctx, records, client.MatchingLabels{iamv1alpha2.UserReferenceLabel: user.Name})
if err != nil {
klog.Error(err)
return nil, err
return err
}
// count failed login attempts during last AuthenticateRateLimiterDuration
@@ -542,22 +538,20 @@ func (r *Reconciler) syncUserStatus(ctx context.Context, user *iamv1alpha2.User)
// block user if failed login attempts exceeds maximum tries setting
if failedLoginAttempts >= r.AuthenticationOptions.AuthenticateRateLimiterMaxTries {
expected := user.DeepCopy()
limitExceed := iamv1alpha2.UserAuthLimitExceeded
expected.Status = iamv1alpha2.UserStatus{
user.Status = iamv1alpha2.UserStatus{
State: &limitExceed,
Reason: fmt.Sprintf("Failed login attempts exceed %d in last %s", failedLoginAttempts, r.AuthenticationOptions.AuthenticateRateLimiterDuration),
LastTransitionTime: &metav1.Time{Time: time.Now()},
}
err = r.Update(context.Background(), expected, &client.UpdateOptions{})
err = r.Update(ctx, user, &client.UpdateOptions{})
if err != nil {
return nil, err
return err
}
return expected, nil
}
return user, nil
return nil
}
func encrypt(password string) (string, error) {

View File

@@ -99,7 +99,7 @@ func TestDoNothing(t *testing.T) {
t.Fatal(err)
}
_, err = c.Reconcile(context.Background(), reconcile.Request{
result, err := c.Reconcile(context.Background(), reconcile.Request{
NamespacedName: types.NamespacedName{Name: user.Name},
})
if err != nil {
@@ -108,22 +108,15 @@ func TestDoNothing(t *testing.T) {
// append finalizer
updateEvent := <-w.ResultChan()
assert.Equal(t, updateEvent.Type, watch.Modified)
assert.Equal(t, watch.Modified, updateEvent.Type)
assert.NotNil(t, updateEvent.Object)
user = updateEvent.Object.(*iamv1alpha2.User)
assert.NotNil(t, user)
assert.NotEmpty(t, user.Finalizers)
result, err := c.Reconcile(context.Background(), reconcile.Request{
NamespacedName: types.NamespacedName{Name: user.Name},
})
if err != nil {
t.Fatal(err)
}
updateEvent = <-w.ResultChan()
// encrypt password
assert.Equal(t, updateEvent.Type, watch.Modified)
assert.Equal(t, watch.Modified, updateEvent.Type)
assert.NotNil(t, updateEvent.Object)
user = updateEvent.Object.(*iamv1alpha2.User)
assert.NotNil(t, user)
@@ -132,12 +125,12 @@ func TestDoNothing(t *testing.T) {
// becomes active after password encrypted
updateEvent = <-w.ResultChan()
user = updateEvent.Object.(*iamv1alpha2.User)
assert.Equal(t, *user.Status.State, iamv1alpha2.UserActive)
assert.Equal(t, iamv1alpha2.UserActive, *user.Status.State)
// block user
updateEvent = <-w.ResultChan()
user = updateEvent.Object.(*iamv1alpha2.User)
assert.Equal(t, *user.Status.State, iamv1alpha2.UserAuthLimitExceeded)
assert.Equal(t, iamv1alpha2.UserAuthLimitExceeded, *user.Status.State)
assert.True(t, result.Requeue)
time.Sleep(result.RequeueAfter + time.Second)
@@ -151,5 +144,5 @@ func TestDoNothing(t *testing.T) {
// unblock user
updateEvent = <-w.ResultChan()
user = updateEvent.Object.(*iamv1alpha2.User)
assert.Equal(t, *user.Status.State, iamv1alpha2.UserActive)
assert.Equal(t, iamv1alpha2.UserActive, *user.Status.State)
}

View File

@@ -122,7 +122,6 @@ func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, work
return client.IgnoreNotFound(err)
}
if !metav1.IsControlledBy(workspaceRole, &workspace) {
workspaceRole = workspaceRole.DeepCopy()
workspaceRole.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(workspaceRole.OwnerReferences)
if err := controllerutil.SetControllerReference(&workspace, workspaceRole, r.Scheme); err != nil {
logger.Error(err, "set controller reference failed")
@@ -151,6 +150,7 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, w
logger.Error(err, "create federated workspace role failed")
return err
}
return nil
}
}
logger.Error(err, "get federated workspace role failed")
@@ -174,10 +174,6 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, w
func newFederatedWorkspaceRole(workspaceRole *iamv1alpha2.WorkspaceRole) (*typesv1beta1.FederatedWorkspaceRole, error) {
federatedWorkspaceRole := &typesv1beta1.FederatedWorkspaceRole{
TypeMeta: metav1.TypeMeta{
Kind: typesv1beta1.FederatedWorkspaceRoleKind,
APIVersion: typesv1beta1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: workspaceRole.Name,
},
@@ -206,7 +202,6 @@ func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger lo
if workspaceRole.Labels == nil {
workspaceRole.Labels = make(map[string]string)
}
workspaceRole = workspaceRole.DeepCopy()
workspaceRole.Labels[constants.KubefedManagedLabel] = "false"
if err := r.Update(ctx, workspaceRole); err != nil {
logger.Error(err, "update kubefed managed label failed")

View File

@@ -123,7 +123,6 @@ func (r *Reconciler) bindWorkspace(ctx context.Context, logger logr.Logger, work
}
// owner reference not match workspace label
if !metav1.IsControlledBy(workspaceRoleBinding, workspace) {
workspaceRoleBinding := workspaceRoleBinding.DeepCopy()
workspaceRoleBinding.OwnerReferences = k8sutil.RemoveWorkspaceOwnerReference(workspaceRoleBinding.OwnerReferences)
if err := controllerutil.SetControllerReference(workspace, workspaceRoleBinding, r.Scheme); err != nil {
logger.Error(err, "set controller reference failed")
@@ -145,7 +144,7 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, w
federatedWorkspaceRoleBinding := &typesv1beta1.FederatedWorkspaceRoleBinding{}
if err := r.Client.Get(ctx, types.NamespacedName{Name: workspaceRoleBinding.Name}, federatedWorkspaceRoleBinding); err != nil {
if errors.IsNotFound(err) {
if federatedWorkspaceRoleBinding, err := newFederatedWorkspaceRole(workspaceRoleBinding); err != nil {
if federatedWorkspaceRoleBinding, err := newFederatedWorkspaceRoleBinding(workspaceRoleBinding); err != nil {
logger.Error(err, "generate federated workspace role binding failed")
return err
} else {
@@ -153,6 +152,7 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, w
logger.Error(err, "create federated workspace role binding failed")
return err
}
return nil
}
}
logger.Error(err, "get federated workspace role binding failed")
@@ -176,12 +176,8 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, w
return nil
}
func newFederatedWorkspaceRole(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) (*typesv1beta1.FederatedWorkspaceRoleBinding, error) {
federatedWorkspaceRole := &typesv1beta1.FederatedWorkspaceRoleBinding{
TypeMeta: metav1.TypeMeta{
Kind: typesv1beta1.FederatedWorkspaceRoleBindingKind,
APIVersion: typesv1beta1.SchemeGroupVersion.String(),
},
func newFederatedWorkspaceRoleBinding(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) (*typesv1beta1.FederatedWorkspaceRoleBinding, error) {
federatedWorkspaceRoleBinding := &typesv1beta1.FederatedWorkspaceRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: workspaceRoleBinding.Name,
},
@@ -198,10 +194,10 @@ func newFederatedWorkspaceRole(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBi
},
},
}
if err := controllerutil.SetControllerReference(workspaceRoleBinding, federatedWorkspaceRole, scheme.Scheme); err != nil {
if err := controllerutil.SetControllerReference(workspaceRoleBinding, federatedWorkspaceRoleBinding, scheme.Scheme); err != nil {
return nil, err
}
return federatedWorkspaceRole, nil
return federatedWorkspaceRoleBinding, nil
}
func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger logr.Logger, workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding) error {
@@ -209,7 +205,6 @@ func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger lo
if workspaceRoleBinding.Labels == nil {
workspaceRoleBinding.Labels = make(map[string]string)
}
workspaceRoleBinding = workspaceRoleBinding.DeepCopy()
workspaceRoleBinding.Labels[constants.KubefedManagedLabel] = "false"
logger.V(4).Info("update kubefed managed label")
if err := r.Update(ctx, workspaceRoleBinding); err != nil {

View File

@@ -91,9 +91,8 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
// +kubebuilder:rbac:groups=tenant.kubesphere.io,resources=workspaces,verbs=get;list;watch;
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := r.Logger.WithValues("workspacetemplate", req.NamespacedName)
rootCtx := context.Background()
workspaceTemplate := &tenantv1alpha2.WorkspaceTemplate{}
if err := r.Get(rootCtx, req.NamespacedName, workspaceTemplate); err != nil {
if err := r.Get(ctx, req.NamespacedName, workspaceTemplate); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
@@ -102,7 +101,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
// then lets add the finalizer and update the object.
if !sliceutil.HasString(workspaceTemplate.ObjectMeta.Finalizers, workspaceTemplateFinalizer) {
workspaceTemplate.ObjectMeta.Finalizers = append(workspaceTemplate.ObjectMeta.Finalizers, workspaceTemplateFinalizer)
if err := r.Update(rootCtx, workspaceTemplate); err != nil {
if err := r.Update(ctx, workspaceTemplate); err != nil {
return ctrl.Result{}, err
}
}
@@ -110,16 +109,16 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
// The object is being deleted
if sliceutil.HasString(workspaceTemplate.ObjectMeta.Finalizers, workspaceTemplateFinalizer) ||
sliceutil.HasString(workspaceTemplate.ObjectMeta.Finalizers, orphanFinalizer) {
if err := r.deleteOpenPitrixResourcesInWorkspace(rootCtx, workspaceTemplate.Name); err != nil {
logger.Error(err, "delete resource in workspace template failed")
if err := r.deleteOpenPitrixResourcesInWorkspace(ctx, workspaceTemplate.Name); err != nil {
logger.Error(err, "failed to delete related openpitrix resource")
return ctrl.Result{}, err
}
if err := r.deleteWorkspace(rootCtx, workspaceTemplate); err != nil {
if err := r.deleteWorkspace(ctx, workspaceTemplate); err != nil {
if errors.IsNotFound(err) {
logger.V(4).Info("workspace not found", "workspacerole", workspaceTemplate.Name)
logger.V(4).Info("related workspace not found")
} else {
logger.Error(err, "failed delete workspaces")
logger.Error(err, "failed to delete related workspace")
return ctrl.Result{}, nil
}
}
@@ -130,7 +129,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
})
logger.V(4).Info("update workspace template")
if err := r.Update(rootCtx, workspaceTemplate); err != nil {
if err := r.Update(ctx, workspaceTemplate); err != nil {
logger.Error(err, "update workspace template failed")
return ctrl.Result{}, err
}
@@ -140,18 +139,18 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
}
if r.MultiClusterEnabled {
if err := r.multiClusterSync(rootCtx, logger, workspaceTemplate); err != nil {
if err := r.multiClusterSync(ctx, logger, workspaceTemplate); err != nil {
return ctrl.Result{}, err
}
} else {
if err := r.singleClusterSync(rootCtx, logger, workspaceTemplate); err != nil {
if err := r.singleClusterSync(ctx, logger, workspaceTemplate); err != nil {
return ctrl.Result{}, err
}
}
if err := r.initWorkspaceRoles(rootCtx, logger, workspaceTemplate); err != nil {
if err := r.initWorkspaceRoles(ctx, logger, workspaceTemplate); err != nil {
return ctrl.Result{}, err
}
if err := r.initManagerRoleBinding(rootCtx, logger, workspaceTemplate); err != nil {
if err := r.initManagerRoleBinding(ctx, logger, workspaceTemplate); err != nil {
return ctrl.Result{}, err
}
r.Recorder.Event(workspaceTemplate, corev1.EventTypeNormal, controllerutils.SuccessSynced, controllerutils.MessageResourceSynced)
@@ -230,10 +229,6 @@ func (r *Reconciler) multiClusterSync(ctx context.Context, logger logr.Logger, w
func newFederatedWorkspace(template *tenantv1alpha2.WorkspaceTemplate) (*typesv1beta1.FederatedWorkspace, error) {
federatedWorkspace := &typesv1beta1.FederatedWorkspace{
TypeMeta: metav1.TypeMeta{
Kind: typesv1beta1.FederatedWorkspaceRoleKind,
APIVersion: typesv1beta1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: template.Name,
Labels: template.Labels,
@@ -261,6 +256,7 @@ func (r *Reconciler) deleteWorkspace(ctx context.Context, template *tenantv1alph
if err := r.Client.Get(ctx, types.NamespacedName{Name: template.Name}, federatedWorkspace); err != nil {
return err
}
// Workspace will be deleted with Orphan Option when it has a orphan finalizer.
// Reousrces that owned by the Workspace will not be deleted.
if sliceutil.HasString(template.ObjectMeta.Finalizers, orphanFinalizer) {
@@ -271,7 +267,17 @@ func (r *Reconciler) deleteWorkspace(ctx context.Context, template *tenantv1alph
if err := r.Update(ctx, federatedWorkspace); err != nil {
return err
}
} else {
// Usually namespace will bind the lifecycle of workspace with ownerReference,
// in multi-cluster environment workspace will not be created in host cluster
// if the cluster is not be granted or kubefed-controller-manager is unavailable,
// this will cause the federated namespace left an orphan object in host cluster.
// After workspaceTemplate deleted we need to deleted orphan namespace in host cluster directly.
if err := r.deleteNamespacesInWorkspace(ctx, template); err != nil {
return err
}
}
if err := r.Delete(ctx, federatedWorkspace); err != nil {
return err
}
@@ -301,7 +307,6 @@ func (r *Reconciler) ensureNotControlledByKubefed(ctx context.Context, logger lo
if workspaceTemplate.Labels == nil {
workspaceTemplate.Labels = make(map[string]string)
}
workspaceTemplate = workspaceTemplate.DeepCopy()
workspaceTemplate.Labels[constants.KubefedManagedLabel] = "false"
logger.V(4).Info("update kubefed managed label")
if err := r.Update(ctx, workspaceTemplate); err != nil {
@@ -326,8 +331,8 @@ func (r *Reconciler) initWorkspaceRoles(ctx context.Context, logger logr.Logger,
expected.Labels = make(map[string]string)
}
expected.Labels[tenantv1alpha1.WorkspaceLabel] = workspace.Name
var existed iamv1alpha2.WorkspaceRole
if err := r.Get(ctx, types.NamespacedName{Name: expected.Name}, &existed); err != nil {
workspaceRole := &iamv1alpha2.WorkspaceRole{}
if err := r.Get(ctx, types.NamespacedName{Name: expected.Name}, workspaceRole); err != nil {
if errors.IsNotFound(err) {
logger.V(4).Info("create workspace role", "workspacerole", expected.Name)
if err := r.Create(ctx, &expected); err != nil {
@@ -340,15 +345,14 @@ func (r *Reconciler) initWorkspaceRoles(ctx context.Context, logger logr.Logger,
return err
}
}
if !reflect.DeepEqual(expected.Labels, existed.Labels) ||
!reflect.DeepEqual(expected.Annotations, existed.Annotations) ||
!reflect.DeepEqual(expected.Rules, existed.Rules) {
updated := existed.DeepCopy()
updated.Labels = expected.Labels
updated.Annotations = expected.Annotations
updated.Rules = expected.Rules
logger.V(4).Info("update workspace role", "workspacerole", updated.Name)
if err := r.Update(ctx, updated); err != nil {
if !reflect.DeepEqual(expected.Labels, workspaceRole.Labels) ||
!reflect.DeepEqual(expected.Annotations, workspaceRole.Annotations) ||
!reflect.DeepEqual(expected.Rules, workspaceRole.Rules) {
workspaceRole.Labels = expected.Labels
workspaceRole.Annotations = expected.Annotations
workspaceRole.Rules = expected.Rules
logger.V(4).Info("update workspace role", "workspacerole", workspaceRole.Name)
if err := r.Update(ctx, workspaceRole); err != nil {
logger.Error(err, "update workspace role failed")
return err
}
@@ -471,6 +475,23 @@ func (r *Reconciler) deleteHelmRepos(ctx context.Context, ws string) error {
return err
}
// deleteNamespacesInWorkspace Deletes the namespace associated with the workspace, which match the workspace label selector
func (r *Reconciler) deleteNamespacesInWorkspace(ctx context.Context, template *tenantv1alpha2.WorkspaceTemplate) error {
namespaceList := &corev1.NamespaceList{}
err := r.Client.List(ctx, namespaceList, client.MatchingLabels{tenantv1alpha1.WorkspaceLabel: template.Name})
if err != nil {
return err
}
for _, namespace := range namespaceList.Items {
err = r.Client.Delete(ctx, &namespace)
if err != nil {
return err
}
}
return nil
}
func workspaceRoleBindingChanger(workspaceRoleBinding *iamv1alpha2.WorkspaceRoleBinding, workspace, username, workspaceRoleName string) controllerutil.MutateFn {
return func() error {
workspaceRoleBinding.Labels = map[string]string{

View File

@@ -19,6 +19,7 @@ package v1alpha1
import (
"context"
"fmt"
"time"
"github.com/emicklei/go-restful"
corev1 "k8s.io/api/core/v1"
@@ -31,30 +32,39 @@ import (
"kubesphere.io/api/gateway/v1alpha1"
"kubesphere.io/kubesphere/pkg/api"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/apiserver/query"
"kubesphere.io/kubesphere/pkg/informers"
operator "kubesphere.io/kubesphere/pkg/models/gateway"
"kubesphere.io/kubesphere/pkg/models/logging"
servererr "kubesphere.io/kubesphere/pkg/server/errors"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
loggingclient "kubesphere.io/kubesphere/pkg/simple/client/logging"
conversionsv1 "kubesphere.io/kubesphere/pkg/utils/conversions/core/v1"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
)
type handler struct {
options *gateway.Options
gw operator.GatewayOperator
factory informers.InformerFactory
lo logging.LoggingOperator
}
//newHandler create an instance of the handler
func newHandler(options *gateway.Options, cache cache.Cache, client client.Client, factory informers.InformerFactory, k8sClient kubernetes.Interface) *handler {
func newHandler(options *gateway.Options, cache cache.Cache, client client.Client, factory informers.InformerFactory, k8sClient kubernetes.Interface, loggingClient loggingclient.Client) *handler {
conversionsv1.RegisterConversions(scheme.Scheme)
// Do not register Gateway scheme globally. Which will cause conflict in ks-controller-manager.
v1alpha1.AddToScheme(client.Scheme())
var lo logging.LoggingOperator
if loggingClient != nil {
lo = logging.NewLoggingOperator(loggingClient)
}
return &handler{
options: options,
factory: factory,
gw: operator.NewGatewayOperator(client, cache, options, factory, k8sClient),
lo: lo,
}
}
@@ -173,3 +183,74 @@ func (h *handler) PodLog(request *restful.Request, response *restful.Response) {
return
}
}
func (h *handler) PodLogSearch(request *restful.Request, response *restful.Response) {
if h.lo == nil {
api.HandleError(response, request, fmt.Errorf("logging isn't enabled"))
return
}
ns := request.PathParameter("namespace")
logQuery, err := loggingv1alpha2.ParseQueryParameter(request)
if err != nil {
api.HandleError(response, request, err)
return
}
// ES log will be filted by pods and namespace by default.
pods, err := h.gw.GetPods(ns, &query.Query{})
if err != nil {
api.HandleError(response, request, err)
return
}
var podfilter []string
namespaceCreateTimeMap := make(map[string]*time.Time)
var ar loggingv1alpha2.APIResponse
for _, p := range pods.Items {
pod, ok := p.(*corev1.Pod)
if ok {
podfilter = append(podfilter, pod.Name)
namespaceCreateTimeMap[pod.Namespace] = nil
}
}
sf := loggingclient.SearchFilter{
NamespaceFilter: namespaceCreateTimeMap,
PodFilter: podfilter,
PodSearch: stringutils.Split(logQuery.PodSearch, ","),
ContainerSearch: stringutils.Split(logQuery.ContainerSearch, ","),
ContainerFilter: stringutils.Split(logQuery.ContainerFilter, ","),
LogSearch: stringutils.Split(logQuery.LogSearch, ","),
Starttime: logQuery.StartTime,
Endtime: logQuery.EndTime,
}
noHit := len(namespaceCreateTimeMap) == 0 || len(podfilter) == 0
if logQuery.Operation == loggingv1alpha2.OperationExport {
response.Header().Set(restful.HEADER_ContentType, "text/plain")
response.Header().Set("Content-Disposition", "attachment")
if noHit {
return
}
err = h.lo.ExportLogs(sf, response)
if err != nil {
api.HandleInternalError(response, request, err)
return
}
} else {
if noHit {
ar.Logs = &loggingclient.Logs{}
}
ar, err = h.lo.SearchLogs(sf, logQuery.From, logQuery.Size, logQuery.Sort)
if err != nil {
api.HandleError(response, request, err)
return
}
response.WriteEntity(ar)
}
}

View File

@@ -28,19 +28,21 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"kubesphere.io/kubesphere/pkg/api"
loggingv1alpha2 "kubesphere.io/kubesphere/pkg/api/logging/v1alpha2"
"kubesphere.io/kubesphere/pkg/apiserver/runtime"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/informers"
"kubesphere.io/kubesphere/pkg/server/errors"
"kubesphere.io/kubesphere/pkg/simple/client/gateway"
loggingclient "kubesphere.io/kubesphere/pkg/simple/client/logging"
)
var GroupVersion = schema.GroupVersion{Group: "gateway.kubesphere.io", Version: "v1alpha1"}
func AddToContainer(container *restful.Container, options *gateway.Options, cache cache.Cache, client client.Client, factory informers.InformerFactory, k8sClient kubernetes.Interface) error {
func AddToContainer(container *restful.Container, options *gateway.Options, cache cache.Cache, client client.Client, factory informers.InformerFactory, k8sClient kubernetes.Interface, loggingClient loggingclient.Client) error {
ws := runtime.NewWebService(GroupVersion)
handler := newHandler(options, cache, client, factory, k8sClient)
handler := newHandler(options, cache, client, factory, k8sClient, loggingClient)
// register gateway apis
ws.Route(ws.POST("/namespaces/{namespace}/gateways").
@@ -102,6 +104,14 @@ func AddToContainer(container *restful.Container, options *gateway.Options, cach
Returns(http.StatusOK, api.StatusOK, v1alpha1.Gateway{}).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.GatewayTag}))
ws.Route(ws.GET("/namespaces/{namespace}/gateways/{gateway}/logs").
To(handler.PodLogSearch).
Doc("Retrieve log of the gateway's pod from ES").
Param(ws.PathParameter("namespace", "the watching namespace of the gateway")).
Param(ws.PathParameter("gateway", "the name of the gateway")).
Returns(http.StatusOK, api.StatusOK, loggingv1alpha2.APIResponse{}).
Metadata(restfulspec.KeyOpenAPITags, []string{constants.GatewayTag}))
container.Add(ws)
return nil
}

View File

@@ -17,9 +17,11 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"math"
"net/url"
"strconv"
"strings"
"time"
restful "github.com/emicklei/go-restful"
"google.golang.org/grpc/codes"
@@ -90,6 +92,18 @@ func (h *openpitrixHandler) CreateRepo(req *restful.Request, resp *restful.Respo
// trim credential from url
parsedUrl.User = nil
syncPeriod := 0
// If SyncPeriod is empty, ignore it.
if createRepoRequest.SyncPeriod != "" {
duration, err := time.ParseDuration(createRepoRequest.SyncPeriod)
if err != nil {
api.HandleBadRequest(resp, nil, err)
return
} else if duration > 0 {
syncPeriod = int(math.Max(float64(duration/time.Second), constants.HelmRepoMinSyncPeriod))
}
}
repo := v1alpha1.HelmRepo{
ObjectMeta: metav1.ObjectMeta{
Name: idutils.GetUuid36(v1alpha1.HelmRepoIdPrefix),
@@ -103,11 +117,15 @@ func (h *openpitrixHandler) CreateRepo(req *restful.Request, resp *restful.Respo
Spec: v1alpha1.HelmRepoSpec{
Name: createRepoRequest.Name,
Url: parsedUrl.String(),
SyncPeriod: 0,
SyncPeriod: syncPeriod,
Description: stringutils.ShortenString(createRepoRequest.Description, 512),
},
}
if syncPeriod > 0 {
repo.Annotations[v1alpha1.RepoSyncPeriod] = createRepoRequest.SyncPeriod
}
if strings.HasPrefix(createRepoRequest.URL, "https://") || strings.HasPrefix(createRepoRequest.URL, "http://") {
if userInfo != nil {
repo.Spec.Credential.Username = userInfo.Username()
@@ -322,7 +340,7 @@ func (h *openpitrixHandler) DoAppAction(req *restful.Request, resp *restful.Resp
if err != nil {
klog.Errorln(err)
handleOpenpitrixError(resp, err)
api.HandleError(resp, nil, err)
return
}
@@ -388,7 +406,7 @@ func (h *openpitrixHandler) ModifyApp(req *restful.Request, resp *restful.Respon
if err != nil {
klog.Errorln(err)
handleOpenpitrixError(resp, err)
api.HandleError(resp, nil, err)
return
}
@@ -570,7 +588,7 @@ func (h *openpitrixHandler) ModifyAppVersion(req *restful.Request, resp *restful
if err != nil {
klog.Errorln(err)
handleOpenpitrixError(resp, err)
api.HandleError(resp, nil, err)
return
}
@@ -665,7 +683,7 @@ func (h *openpitrixHandler) DoAppVersionAction(req *restful.Request, resp *restf
if err != nil {
klog.Errorln(err)
handleOpenpitrixError(resp, err)
api.HandleError(resp, nil, err)
return
}

View File

@@ -76,7 +76,8 @@ func (p *passwordAuthenticator) Authenticate(_ context.Context, username, passwo
return nil, providerOptions.Name, err
}
linkedAccount, err := p.userGetter.findMappedUser(providerOptions.Name, authenticated.GetUserID())
if err != nil {
if err != nil && !errors.IsNotFound(err) {
klog.Error(err)
return nil, providerOptions.Name, err
}
// using this method requires you to manually provision users.
@@ -129,7 +130,8 @@ func (p *passwordAuthenticator) Authenticate(_ context.Context, username, passwo
return nil, "", err
}
u := &authuser.DefaultInfo{
Name: user.Name,
Name: user.Name,
Groups: user.Spec.Groups,
}
// check if the password is initialized
if uninitialized := user.Annotations[iamv1alpha2.UninitializedAnnotation]; uninitialized != "" {

View File

@@ -19,9 +19,22 @@
package auth
import (
"context"
"reflect"
"testing"
"github.com/mitchellh/mapstructure"
"golang.org/x/crypto/bcrypt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apiserver/pkg/authentication/user"
authuser "k8s.io/apiserver/pkg/authentication/user"
iamv1alpha2 "kubesphere.io/api/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
"kubesphere.io/kubesphere/pkg/apiserver/authentication/identityprovider"
"kubesphere.io/kubesphere/pkg/apiserver/authentication/oauth"
fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake"
ksinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions"
)
func TestEncryptPassword(t *testing.T) {
@@ -39,3 +52,197 @@ func hashPassword(password string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost)
return string(bytes), err
}
func Test_passwordAuthenticator_Authenticate(t *testing.T) {
oauthOptions := &authentication.Options{
OAuthOptions: &oauth.Options{
IdentityProviders: []oauth.IdentityProviderOptions{
{
Name: "fakepwd",
MappingMethod: "auto",
Type: "fakePasswordProvider",
Provider: oauth.DynamicOptions{
"identities": map[string]interface{}{
"user1": map[string]string{
"uid": "100001",
"email": "user1@kubesphere.io",
"username": "user1",
"password": "password",
},
"user2": map[string]string{
"uid": "100002",
"email": "user2@kubesphere.io",
"username": "user2",
"password": "password",
},
},
},
},
},
},
}
identityprovider.RegisterGenericProvider(&fakePasswordProviderFactory{})
if err := identityprovider.SetupWithOptions(oauthOptions.OAuthOptions.IdentityProviders); err != nil {
t.Fatal(err)
}
ksClient := fakeks.NewSimpleClientset()
ksInformerFactory := ksinformers.NewSharedInformerFactory(ksClient, 0)
err := ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(newUser("user1", "100001", "fakepwd"))
err = ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(newUser("user3", "100003", ""))
err = ksInformerFactory.Iam().V1alpha2().Users().Informer().GetIndexer().Add(newActiveUser("user4", "password"))
if err != nil {
t.Fatal(err)
}
authenticator := NewPasswordAuthenticator(
ksClient,
ksInformerFactory.Iam().V1alpha2().Users().Lister(),
oauthOptions,
)
type args struct {
ctx context.Context
username string
password string
}
tests := []struct {
name string
passwordAuthenticator PasswordAuthenticator
args args
want authuser.Info
want1 string
wantErr bool
}{
{
name: "Should successfully with existing provider user",
passwordAuthenticator: authenticator,
args: args{
ctx: context.Background(),
username: "user1",
password: "password",
},
want: &user.DefaultInfo{
Name: "user1",
},
wantErr: false,
},
{
name: "Should return register user",
passwordAuthenticator: authenticator,
args: args{
ctx: context.Background(),
username: "user2",
password: "password",
},
want: &user.DefaultInfo{
Name: "system:pre-registration",
Extra: map[string][]string{
"email": {"user2@kubesphere.io"},
"idp": {"fakepwd"},
"uid": {"100002"},
"username": {"user2"},
},
},
wantErr: false,
},
{
name: "Should failed login",
passwordAuthenticator: authenticator,
args: args{
ctx: context.Background(),
username: "user3",
password: "password",
},
wantErr: true,
},
{
name: "Should successfully with internal user",
passwordAuthenticator: authenticator,
args: args{
ctx: context.Background(),
username: "user4",
password: "password",
},
want: &user.DefaultInfo{
Name: "user4",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := tt.passwordAuthenticator
got, _, err := p.Authenticate(tt.args.ctx, tt.args.username, tt.args.password)
if (err != nil) != tt.wantErr {
t.Errorf("passwordAuthenticator.Authenticate() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("passwordAuthenticator.Authenticate() got = %v, want %v", got, tt.want)
}
})
}
}
type fakePasswordProviderFactory struct {
}
type fakePasswordProvider struct {
Identities map[string]fakePasswordIdentity `json:"identities"`
}
type fakePasswordIdentity struct {
UID string `json:"uid"`
Username string `json:"username"`
Email string `json:"email"`
Password string `json:"password"`
}
func (f fakePasswordIdentity) GetUserID() string {
return f.UID
}
func (f fakePasswordIdentity) GetUsername() string {
return f.Username
}
func (f fakePasswordIdentity) GetEmail() string {
return f.Email
}
func (fakePasswordProviderFactory) Type() string {
return "fakePasswordProvider"
}
func (fakePasswordProviderFactory) Create(options oauth.DynamicOptions) (identityprovider.GenericProvider, error) {
var fakeProvider fakePasswordProvider
if err := mapstructure.Decode(options, &fakeProvider); err != nil {
return nil, err
}
return &fakeProvider, nil
}
func (l fakePasswordProvider) Authenticate(username string, password string) (identityprovider.Identity, error) {
if i, ok := l.Identities[username]; ok && i.Password == password {
return i, nil
}
return nil, errors.NewUnauthorized("authorization failed")
}
func encrypt(password string) (string, error) {
bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
return string(bytes), err
}
func newActiveUser(username string, password string) *iamv1alpha2.User {
u := newUser(username, "", "")
password, _ = encrypt(password)
u.Spec.EncryptedPassword = password
s := iamv1alpha2.UserActive
u.Status.State = &s
return u
}

View File

@@ -259,12 +259,12 @@ func (c *gatewayOperator) GetGateways(namespace string) ([]*v1alpha1.Gateway, er
}
obj := &v1alpha1.Gateway{}
err := c.client.Get(context.TODO(), key, obj)
if errors.IsNotFound(err) {
return gateways, nil
} else if err != nil {
if err == nil {
gateways = append(gateways, obj)
} else if err != nil && !errors.IsNotFound(err) {
return nil, err
}
gateways = append(gateways, obj)
for _, g := range gateways {
s := &corev1.Service{}
@@ -281,7 +281,7 @@ func (c *gatewayOperator) GetGateways(namespace string) ([]*v1alpha1.Gateway, er
}
}
return gateways, err
return gateways, nil
}
// Create a Gateway in a namespace
@@ -396,8 +396,8 @@ func (c *gatewayOperator) ListGateways(query *query.Query) (*api.ListResult, err
}),
})
for _, s := range services.Items {
result = append(result, &s)
for i := range services.Items {
result = append(result, &services.Items[i])
}
return v1alpha3.DefaultList(result, query, c.compare, c.filter, c.transform), nil
@@ -447,16 +447,27 @@ func (c *gatewayOperator) compare(left runtime.Object, right runtime.Object, fie
}
func (c *gatewayOperator) filter(object runtime.Object, filter query.Filter) bool {
var objMeta v1.ObjectMeta
var namesapce string
gateway, ok := object.(*v1alpha1.Gateway)
if !ok {
return false
svc, ok := object.(*corev1.Service)
if !ok {
return false
}
namesapce = svc.Labels["project"]
objMeta = svc.ObjectMeta
} else {
namesapce = gateway.Spec.Conroller.Scope.Namespace
objMeta = gateway.ObjectMeta
}
switch filter.Field {
case query.FieldNamespace:
return strings.Compare(gateway.Spec.Conroller.Scope.Namespace, string(filter.Value)) == 0
return strings.Compare(namesapce, string(filter.Value)) == 0
default:
return v1alpha3.DefaultObjectMetaFilter(gateway.ObjectMeta, filter)
return v1alpha3.DefaultObjectMetaFilter(objMeta, filter)
}
}

View File

@@ -182,6 +182,9 @@ func Test_gatewayOperator_GetGateways(t *testing.T) {
Type: corev1.ServiceTypeNodePort,
},
},
Status: runtime.RawExtension{
Raw: []byte("{\"loadBalancer\":{},\"service\":[{\"name\":\"http\",\"protocol\":\"TCP\",\"port\":80,\"targetPort\":0}]}\n"),
},
},
},
},

View File

@@ -114,8 +114,8 @@ type amOperator struct {
k8sclient kubernetes.Interface
}
func NewReadOnlyOperator(factory informers.InformerFactory) AccessManagementInterface {
return &amOperator{
func NewReadOnlyOperator(factory informers.InformerFactory, devopsClient devops.Interface) AccessManagementInterface {
operator := &amOperator{
globalRoleBindingGetter: globalrolebinding.New(factory.KubeSphereSharedInformerFactory()),
workspaceRoleBindingGetter: workspacerolebinding.New(factory.KubeSphereSharedInformerFactory()),
clusterRoleBindingGetter: clusterrolebinding.New(factory.KubernetesSharedInformerFactory()),
@@ -126,16 +126,17 @@ func NewReadOnlyOperator(factory informers.InformerFactory) AccessManagementInte
roleGetter: role.New(factory.KubernetesSharedInformerFactory()),
namespaceLister: factory.KubernetesSharedInformerFactory().Core().V1().Namespaces().Lister(),
}
// no more CRDs of devopsprojects if the DevOps module was disabled
if devopsClient != nil {
operator.devopsProjectLister = factory.KubeSphereSharedInformerFactory().Devops().V1alpha3().DevOpsProjects().Lister()
}
return operator
}
func NewOperator(ksClient kubesphere.Interface, k8sClient kubernetes.Interface, factory informers.InformerFactory, devopsClient devops.Interface) AccessManagementInterface {
amOperator := NewReadOnlyOperator(factory).(*amOperator)
amOperator := NewReadOnlyOperator(factory, devopsClient).(*amOperator)
amOperator.ksclient = ksClient
amOperator.k8sclient = k8sClient
// no more CRDs of devopsprojects if the DevOps module was disabled
if devopsClient != nil {
amOperator.devopsProjectLister = factory.KubeSphereSharedInformerFactory().Devops().V1alpha3().DevOpsProjects().Lister()
}
return amOperator
}
@@ -1090,19 +1091,21 @@ func (am *amOperator) ListGroupRoleBindings(workspace string, query *query.Query
result = append(result, roleBinding)
}
}
devOpsProjects, err := am.devopsProjectLister.List(labels.SelectorFromSet(labels.Set{tenantv1alpha1.WorkspaceLabel: workspace}))
if err != nil {
return nil, err
}
for _, devOpsProject := range devOpsProjects {
roleBindings, err := am.roleBindingGetter.List(devOpsProject.Name, query)
if am.devopsProjectLister != nil {
devOpsProjects, err := am.devopsProjectLister.List(labels.SelectorFromSet(labels.Set{tenantv1alpha1.WorkspaceLabel: workspace}))
if err != nil {
klog.Error(err)
return nil, err
}
for _, obj := range roleBindings.Items {
roleBinding := obj.(*rbacv1.RoleBinding)
result = append(result, roleBinding)
for _, devOpsProject := range devOpsProjects {
roleBindings, err := am.roleBindingGetter.List(devOpsProject.Name, query)
if err != nil {
klog.Error(err)
return nil, err
}
for _, obj := range roleBindings.Items {
roleBinding := obj.(*rbacv1.RoleBinding)
result = append(result, roleBinding)
}
}
}
return result, nil

View File

@@ -175,11 +175,16 @@ func (c *applicationOperator) ValidatePackage(request *ValidatePackageRequest) (
func (c *applicationOperator) DoAppAction(appId string, request *ActionRequest) error {
app, err := c.appLister.Get(appId)
app, err := c.getHelmApplication(appId)
if err != nil {
return err
}
// All the app belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
if repoId, exist := app.Labels[constants.ChartRepoIdLabelKey]; exist && repoId != v1alpha1.AppStoreRepoId {
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplication), app.Name, errors.New("application is immutable"))
}
var filterState string
switch request.Action {
case ActionSuspend:
@@ -393,12 +398,17 @@ func (c *applicationOperator) ModifyApp(appId string, request *ModifyAppRequest)
return invalidS3Config
}
app, err := c.appLister.Get(appId)
app, err := c.getHelmApplication(appId)
if err != nil {
klog.Error(err)
return err
}
// All the app belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
if repoId, exist := app.Labels[constants.ChartRepoIdLabelKey]; exist && repoId != v1alpha1.AppStoreRepoId {
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplication), app.Name, errors.New("application is immutable"))
}
appCopy := app.DeepCopy()
// modify category
if request.CategoryID != nil {
@@ -602,16 +612,32 @@ func (c *applicationOperator) listApps(conditions *params.Conditions) (ret []*v1
repoId := conditions.Match[RepoId]
if repoId != "" && repoId != v1alpha1.AppStoreRepoId {
// get helm application from helm repo
if ret, exists := c.cachedRepos.ListApplicationsByRepoId(repoId); !exists {
if ret, exists := c.cachedRepos.ListApplicationsInRepo(repoId); !exists {
klog.Warningf("load repo failed, repo id: %s", repoId)
return nil, loadRepoInfoFailed
} else {
return ret, nil
}
} else if repoId == v1alpha1.AppStoreRepoId {
// List apps in the app-store and built-in repo
if c.backingStoreClient == nil {
return []*v1alpha1.HelmApplication{}, nil
}
ls := map[string]string{}
// We just care about the category label when listing apps in built-in repo.
if conditions.Match[CategoryId] != "" {
ls[constants.CategoryIdLabelKey] = conditions.Match[CategoryId]
}
appInRepo, _ := c.cachedRepos.ListApplicationsInBuiltinRepo(labels.SelectorFromSet(ls))
ret, err = c.appLister.List(labels.SelectorFromSet(buildLabelSelector(conditions)))
ret = append(ret, appInRepo...)
} else {
if c.backingStoreClient == nil {
return []*v1alpha1.HelmApplication{}, nil
}
ret, err = c.appLister.List(labels.SelectorFromSet(buildLabelSelector(conditions)))
}

View File

@@ -141,7 +141,7 @@ func (c *applicationOperator) DeleteAppVersion(id string) error {
}
func (c *applicationOperator) DescribeAppVersion(id string) (*AppVersion, error) {
version, err := c.versionLister.Get(id)
version, err := c.getAppVersion(id)
if err != nil {
klog.Errorf("get app version [%s] failed, error: %s", id, err)
return nil, err
@@ -152,12 +152,17 @@ func (c *applicationOperator) DescribeAppVersion(id string) (*AppVersion, error)
func (c *applicationOperator) ModifyAppVersion(id string, request *ModifyAppVersionRequest) error {
version, err := c.versionLister.Get(id)
version, err := c.getAppVersion(id)
if err != nil {
klog.Errorf("get app version [%s] failed, error: %s", id, err)
return err
}
// All the app versions belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
if repoId, exists := version.Labels[constants.ChartRepoIdLabelKey]; exists && repoId != v1alpha1.AppStoreRepoId {
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplicationVersion), version.Name, errors.New("version is immutable"))
}
versionCopy := version.DeepCopy()
spec := &versionCopy.Spec
@@ -355,12 +360,17 @@ func (c *applicationOperator) DoAppVersionAction(versionId string, request *Acti
}
state := v1alpha1.StateDraft
version, err := c.versionLister.Get(versionId)
version, err := c.getAppVersion(versionId)
if err != nil {
klog.Errorf("get app version %s failed, error: %s", versionId, err)
return err
}
// All the app versions belonging to a built-in repo have a label `application.kubesphere.io/repo-id`, and the value should be `builtin-stable` or else.
if repoId, exists := version.Labels[constants.ChartRepoIdLabelKey]; exists && repoId != v1alpha1.AppStoreRepoId {
return apierrors.NewForbidden(v1alpha1.Resource(v1alpha1.ResourcePluralHelmApplicationVersion), version.Name, errors.New("version is immutable"))
}
switch request.Action {
case ActionCancel:
if version.Status.State != v1alpha1.StateSubmitted {
@@ -588,3 +598,13 @@ func (c *applicationOperator) getAppVersionsByAppId(appId string) (ret []*v1alph
return
}
// get app version from repo and helm application
func (c *applicationOperator) getAppVersion(id string) (ret *v1alpha1.HelmApplicationVersion, err error) {
if ver, exists, _ := c.cachedRepos.GetAppVersion(id); exists {
return ver, nil
}
ret, err = c.versionLister.Get(id)
return
}

View File

@@ -17,6 +17,8 @@ import (
"context"
"sort"
"kubesphere.io/kubesphere/pkg/utils/reposcache"
"kubesphere.io/kubesphere/pkg/apiserver/query"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -48,12 +50,14 @@ type CategoryInterface interface {
type categoryOperator struct {
ctgClient typed_v1alpha1.ApplicationV1alpha1Interface
ctgLister listers_v1alpha1.HelmCategoryLister
repoCache reposcache.ReposCache
}
func newCategoryOperator(ksFactory externalversions.SharedInformerFactory, ksClient versioned.Interface) CategoryInterface {
func newCategoryOperator(repoCache reposcache.ReposCache, ksFactory externalversions.SharedInformerFactory, ksClient versioned.Interface) CategoryInterface {
c := &categoryOperator{
ctgClient: ksClient.ApplicationV1alpha1(),
ctgLister: ksFactory.Application().V1alpha1().HelmCategories().Lister(),
repoCache: repoCache,
}
return c
@@ -190,8 +194,15 @@ func (c *categoryOperator) ListCategories(conditions *params.Conditions, orderBy
start, end := (&query.Pagination{Limit: limit, Offset: offset}).GetValidPagination(totalCount)
ctgs = ctgs[start:end]
items := make([]interface{}, 0, len(ctgs))
ctgCountsOfBuiltinRepo := c.repoCache.CopyCategoryCount()
for i := range ctgs {
items = append(items, convertCategory(ctgs[i]))
convertedCtg := convertCategory(ctgs[i])
// The statistic of category for app in etcd is stored in the crd.
// The statistic of category for the app in the built-in repo is stored in the memory.
// So we should calculate these two value then return.
*convertedCtg.AppTotal += ctgCountsOfBuiltinRepo[convertedCtg.CategoryID]
items = append(items, convertedCtg)
}
return &models.PageableResponse{Items: items, TotalCount: totalCount}, nil

View File

@@ -20,6 +20,8 @@ import (
"context"
"testing"
"kubesphere.io/kubesphere/pkg/utils/reposcache"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakek8s "k8s.io/client-go/kubernetes/fake"
"k8s.io/klog"
@@ -82,5 +84,5 @@ func prepareCategoryOperator() CategoryInterface {
k8sClient = fakek8s.NewSimpleClientset()
fakeInformerFactory = informers.NewInformerFactories(k8sClient, ksClient, nil, nil, nil, nil)
return newCategoryOperator(fakeInformerFactory.KubeSphereSharedInformerFactory(), ksClient)
return newCategoryOperator(reposcache.NewReposCache(), fakeInformerFactory.KubeSphereSharedInformerFactory(), ksClient)
}

View File

@@ -56,7 +56,6 @@ func init() {
}
func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient versioned.Interface, s3Client s3.Interface) Interface {
once.Do(func() {
klog.Infof("start helm repo informer")
helmReposInformer = ksInformers.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmRepos().Informer()
@@ -66,16 +65,28 @@ func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient ve
cachedReposData.AddRepo(r)
},
UpdateFunc: func(oldObj, newObj interface{}) {
oldR := oldObj.(*v1alpha1.HelmRepo)
cachedReposData.DeleteRepo(oldR)
r := newObj.(*v1alpha1.HelmRepo)
cachedReposData.AddRepo(r)
oldRepo := oldObj.(*v1alpha1.HelmRepo)
newRepo := newObj.(*v1alpha1.HelmRepo)
cachedReposData.UpdateRepo(oldRepo, newRepo)
},
DeleteFunc: func(obj interface{}) {
r := obj.(*v1alpha1.HelmRepo)
cachedReposData.DeleteRepo(r)
},
})
ctgInformer := ksInformers.KubeSphereSharedInformerFactory().Application().V1alpha1().HelmCategories().Informer()
ctgInformer.AddIndexers(map[string]cache.IndexFunc{
reposcache.CategoryIndexer: func(obj interface{}) ([]string, error) {
ctg, _ := obj.(*v1alpha1.HelmCategory)
return []string{ctg.Spec.Name}, nil
},
})
indexer := ctgInformer.GetIndexer()
cachedReposData.SetCategoryIndexer(indexer)
go ctgInformer.Run(wait.NeverStop)
go helmReposInformer.Run(wait.NeverStop)
})
@@ -84,6 +95,6 @@ func NewOpenpitrixOperator(ksInformers ks_informers.InformerFactory, ksClient ve
ApplicationInterface: newApplicationOperator(cachedReposData, ksInformers.KubeSphereSharedInformerFactory(), ksClient, s3Client),
RepoInterface: newRepoOperator(cachedReposData, ksInformers.KubeSphereSharedInformerFactory(), ksClient),
ReleaseInterface: newReleaseOperator(cachedReposData, ksInformers.KubernetesSharedInformerFactory(), ksInformers.KubeSphereSharedInformerFactory(), ksClient),
CategoryInterface: newCategoryOperator(ksInformers.KubeSphereSharedInformerFactory(), ksClient),
CategoryInterface: newCategoryOperator(cachedReposData, ksInformers.KubeSphereSharedInformerFactory(), ksClient),
}
}

View File

@@ -19,6 +19,7 @@ import (
"net/url"
"sort"
"strings"
"time"
"kubesphere.io/kubesphere/pkg/apiserver/query"
@@ -162,6 +163,32 @@ func (c *repoOperator) ModifyRepo(id string, request *ModifyRepoRequest) error {
repoCopy.Spec.Description = stringutils.ShortenString(*request.Description, DescriptionLen)
}
if repoCopy.Annotations == nil {
repoCopy.Annotations = map[string]string{}
}
if request.SyncPeriod != nil {
syncPeriod := 0
if *request.SyncPeriod == "" {
// disable auto sync
syncPeriod = 0
} else {
if duration, err := time.ParseDuration(*request.SyncPeriod); err != nil {
return err
} else {
syncPeriod = int(duration / time.Second)
}
}
if syncPeriod == 0 {
// disable auto sync
repoCopy.Spec.SyncPeriod = 0
delete(repoCopy.Annotations, v1alpha1.RepoSyncPeriod)
} else {
repoCopy.Spec.SyncPeriod = syncPeriod
repoCopy.Annotations[v1alpha1.RepoSyncPeriod] = *request.SyncPeriod
}
}
// modify name of the repo
if request.Name != nil && len(*request.Name) > 0 && *request.Name != repoCopy.Spec.Name {
items, err := c.repoLister.List(labels.SelectorFromSet(map[string]string{constants.WorkspaceLabelKey: repo.GetWorkspace()}))

View File

@@ -585,6 +585,11 @@ type CreateRepoRequest struct {
// required, runtime provider eg.[qingcloud|aliyun|aws|kubernetes]
Providers []string `json:"providers"`
// min sync period to sync helm repo, a duration string is a sequence of
// decimal numbers, each with optional fraction and a unit suffix,
// such as "180s", "2h" or "45m".
SyncPeriod string `json:"sync_period"`
// repository type
Type string `json:"type,omitempty"`
@@ -612,6 +617,9 @@ type ModifyRepoRequest struct {
Workspace *string `json:"workspace,omitempty"`
// min sync period to sync helm repo
SyncPeriod *string `json:"sync_period"`
// repository name
Name *string `json:"name,omitempty"`
@@ -716,6 +724,8 @@ type Repo struct {
// visibility.eg:[public|private]
Visibility string `json:"visibility,omitempty"`
SyncPeriod string `json:"sync_period,omitempty"`
}
type CreateRepoResponse struct {

View File

@@ -401,6 +401,18 @@ func convertAppVersion(in *v1alpha1.HelmApplicationVersion) *AppVersion {
out.Icon = in.Spec.Icon
}
// The field Maintainers and Sources were a string field, so I encode the helm field's maintainers and sources,
// which are array, to string.
if len(in.Spec.Maintainers) > 0 {
maintainers, _ := json.Marshal(in.Spec.Maintainers)
out.Maintainers = string(maintainers)
}
if len(in.Spec.Sources) > 0 {
source, _ := json.Marshal(in.Spec.Sources)
out.Sources = string(source)
}
out.Status = in.State()
out.Owner = in.GetCreator()
out.Name = in.GetVersionName()
@@ -427,6 +439,7 @@ func convertRepo(in *v1alpha1.HelmRepo) *Repo {
cred, _ := json.Marshal(in.Spec.Credential)
out.Credential = string(cred)
out.SyncPeriod = in.Annotations[v1alpha1.RepoSyncPeriod]
out.URL = in.Spec.Url
return &out
@@ -564,6 +577,8 @@ func buildApplicationVersion(app *v1alpha1.HelmApplication, chrt helmrepoindex.V
Icon: chrt.GetIcon(),
Home: chrt.GetHome(),
Description: stringutils.ShortenString(chrt.GetDescription(), v1alpha1.MsgLen),
Sources: chrt.GetRawSources(),
Maintainers: chrt.GetRawMaintainers(),
},
Created: &t,
// set data to nil before save app version to etcd

View File

@@ -28,9 +28,10 @@ import (
)
const (
filedNameName = "nodeName"
filedPVCName = "pvcName"
filedServiceName = "serviceName"
fieldNodeName = "nodeName"
fieldPVCName = "pvcName"
fieldServiceName = "serviceName"
fieldStatus = "status"
)
type podsGetter struct {
@@ -82,12 +83,14 @@ func (p *podsGetter) filter(object runtime.Object, filter query.Filter) bool {
return false
}
switch filter.Field {
case filedNameName:
case fieldNodeName:
return pod.Spec.NodeName == string(filter.Value)
case filedPVCName:
case fieldPVCName:
return p.podBindPVC(pod, string(filter.Value))
case filedServiceName:
case fieldServiceName:
return p.podBelongToService(pod, string(filter.Value))
case fieldStatus:
return string(pod.Status.Phase) == string(filter.Value)
default:
return v1alpha3.DefaultObjectMetaFilter(pod.ObjectMeta, filter)
}

View File

@@ -51,7 +51,7 @@ func TestListPods(t *testing.T) {
Filters: map[query.Field]query.Value{query.FieldNamespace: query.Value("default")},
},
&api.ListResult{
Items: []interface{}{foo4, foo3, foo2, foo1},
Items: []interface{}{foo5, foo4, foo3, foo2, foo1},
TotalItems: len(pods),
},
nil,
@@ -68,7 +68,7 @@ func TestListPods(t *testing.T) {
Ascending: false,
Filters: map[query.Field]query.Value{
query.FieldNamespace: query.Value("default"),
filedPVCName: query.Value(foo4.Spec.Volumes[0].PersistentVolumeClaim.ClaimName),
fieldPVCName: query.Value(foo4.Spec.Volumes[0].PersistentVolumeClaim.ClaimName),
},
},
&api.ListResult{
@@ -77,6 +77,27 @@ func TestListPods(t *testing.T) {
},
nil,
},
{
"test status filter",
"default",
&query.Query{
Pagination: &query.Pagination{
Limit: 10,
Offset: 0,
},
SortBy: query.FieldName,
Ascending: false,
Filters: map[query.Field]query.Value{
query.FieldNamespace: query.Value("default"),
fieldStatus: query.Value(corev1.PodRunning),
},
},
&api.ListResult{
Items: []interface{}{foo5},
TotalItems: 1,
},
nil,
},
}
getter := prepare()
@@ -133,7 +154,16 @@ var (
},
},
}
pods = []interface{}{foo1, foo2, foo3, foo4}
foo5 = &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo5",
Namespace: "default",
},
Status: corev1.PodStatus{
Phase: corev1.PodRunning,
},
}
pods = []interface{}{foo1, foo2, foo3, foo4, foo5}
)
func prepare() v1alpha3.Interface {

View File

@@ -26,6 +26,8 @@ import (
type Options struct {
WatchesPath string `json:"watchesPath,omitempty" yaml:"watchesPath"`
Namespace string `json:"namespace,omitempty" yaml:"namespace"`
Repository string `json:"repository,omitempty" yaml:"repository"`
Tag string `json:"tag,omitempty" yaml:"tag"`
}
// NewGatewayOptions creates a default Gateway Option
@@ -33,6 +35,8 @@ func NewGatewayOptions() *Options {
return &Options{
WatchesPath: "",
Namespace: "", //constants.KubeSphereControlNamespace
Repository: "",
Tag: "",
}
}
@@ -59,4 +63,6 @@ func (s *Options) ApplyTo(options *Options) {
func (s *Options) AddFlags(fs *pflag.FlagSet, c *Options) {
fs.StringVar(&s.WatchesPath, "watches-path", c.WatchesPath, "Path to the watches file to use.")
fs.StringVar(&s.Namespace, "namespace", c.Namespace, "Working Namespace of the Gateway's Ingress Controller.")
fs.StringVar(&s.Repository, "repository", c.Repository, "The Gateway Controller's image repository")
fs.StringVar(&s.Tag, "tag", c.Tag, "The Gateway Controller's image tag")
}

View File

@@ -17,6 +17,7 @@ import (
"fmt"
"strings"
"kubesphere.io/kubesphere/pkg/constants"
"kubesphere.io/kubesphere/pkg/simple/client/monitoring"
)
@@ -167,8 +168,8 @@ var promQLTemplates = map[string]string{
// ingress
"ingress_request_count": `round(sum(increase(nginx_ingress_controller_requests{$1,$2}[$3])))`,
"ingress_request_4xx_count": `round(sum(increase(nginx_ingress_controller_requests{$1,$2,status="[4].*"}[$3])))`,
"ingress_request_5xx_count": `round(sum(increase(nginx_ingress_controller_requests{$1,$2,status="[5].*"}[$3])))`,
"ingress_request_4xx_count": `round(sum(increase(nginx_ingress_controller_requests{$1,$2,status=~"[4].*"}[$3])))`,
"ingress_request_5xx_count": `round(sum(increase(nginx_ingress_controller_requests{$1,$2,status=~"[5].*"}[$3])))`,
"ingress_active_connections": `sum(avg_over_time(nginx_ingress_controller_nginx_process_connections{$2,state="active"}[$3]))`,
"ingress_success_rate": `sum(rate(nginx_ingress_controller_requests{$1,$2,status!~"[4-5].*"}[$3])) / sum(rate(nginx_ingress_controller_requests{$1,$2}[$3]))`,
"ingress_request_duration_average": `sum_over_time(nginx_ingress_controller_request_duration_seconds_sum{$1,$2}[$3])/sum_over_time(nginx_ingress_controller_request_duration_seconds_count{$1,$2}[$3])`,
@@ -482,13 +483,20 @@ func makeIngressMetricExpr(tmpl string, o monitoring.QueryOptions) string {
// For monitoring ingress in the specific namespace
// GET /namespaces/{namespace}/ingress/{ingress} or
// GET /namespaces/{namespace}/ingress
if o.NamespaceName != "" {
if o.NamespaceName != constants.KubeSphereNamespace {
if o.Ingress != "" {
ingressSelector = fmt.Sprintf(`exported_namespace="%s", ingress="%s"`, o.NamespaceName, o.Ingress)
} else {
ingressSelector = fmt.Sprintf(`exported_namespace="%s", ingress=~"%s"`, o.NamespaceName, o.ResourceFilter)
}
} else {
if o.Ingress != "" {
ingressSelector = fmt.Sprintf(`ingress="%s"`, o.Ingress)
} else {
ingressSelector = fmt.Sprintf(`ingress=~"%s"`, o.ResourceFilter)
}
}
// job is a reqiuried filter
// GET /namespaces/{namespace}/ingress?job=xxx&pod=xxx
if o.Job != "" {

View File

@@ -16,7 +16,11 @@ limitations under the License.
package helmrepoindex
import "time"
import (
"time"
"kubesphere.io/api/application/v1alpha1"
)
type VersionInterface interface {
GetName() string
@@ -28,8 +32,10 @@ type VersionInterface interface {
GetIcon() string
GetHome() string
GetSources() string
GetRawSources() []string
GetKeywords() string
GetMaintainers() string
GetRawMaintainers() []*v1alpha1.Maintainer
GetScreenshots() string
GetPackageName() string
GetCreateTime() time.Time

View File

@@ -23,6 +23,8 @@ import (
"strings"
"time"
"kubesphere.io/api/application/v1alpha1"
"helm.sh/helm/v3/pkg/repo"
"k8s.io/klog"
@@ -64,10 +66,27 @@ func (h HelmVersionWrapper) GetSources() string {
return string(s)
}
func (h HelmVersionWrapper) GetRawSources() []string {
return h.Sources
}
func (h HelmVersionWrapper) GetKeywords() string {
return strings.Join(h.ChartVersion.Keywords, ",")
}
func (h HelmVersionWrapper) GetRawMaintainers() []*v1alpha1.Maintainer {
mt := make([]*v1alpha1.Maintainer, 0, len(h.Maintainers))
for _, value := range h.Maintainers {
mt = append(mt, &v1alpha1.Maintainer{
URL: value.URL,
Name: value.Name,
Email: value.Email,
})
}
return mt
}
func (h HelmVersionWrapper) GetMaintainers() string {
if len(h.ChartVersion.Maintainers) == 0 {
return ""

View File

@@ -78,8 +78,8 @@ func loadIndex(data []byte) (*helmrepo.IndexFile, error) {
var empty = struct{}{}
// merge new index with index from crd
func MergeRepoIndex(index *helmrepo.IndexFile, existsSavedIndex *SavedIndex) *SavedIndex {
// MergeRepoIndex merge new index with index from crd
func MergeRepoIndex(repo *v1alpha1.HelmRepo, index *helmrepo.IndexFile, existsSavedIndex *SavedIndex) *SavedIndex {
saved := &SavedIndex{}
if index == nil {
return existsSavedIndex
@@ -102,20 +102,37 @@ func MergeRepoIndex(index *helmrepo.IndexFile, existsSavedIndex *SavedIndex) *Sa
// add new applications
if application, exists := saved.Applications[name]; !exists {
application = &Application{
Name: name,
ApplicationId: idutils.GetUuid36(v1alpha1.HelmApplicationIdPrefix),
Description: versions[0].Description,
Icon: versions[0].Icon,
Name: name,
Description: versions[0].Description,
Icon: versions[0].Icon,
Created: time.Now(),
}
// The app id will be added to the labels of the helm release.
// But the apps in the repos which are created by the user may contain malformed text, so we generate a random name for them.
// The apps in the system repo have been audited by the admin, so the name of the charts should not include malformed text.
// Then we can add the name string to the labels of the k8s object.
if IsBuiltInRepo(repo.Name) {
application.ApplicationId = fmt.Sprintf("%s%s-%s", v1alpha1.HelmApplicationIdPrefix, repo.Name, name)
} else {
application.ApplicationId = idutils.GetUuid36(v1alpha1.HelmApplicationIdPrefix)
}
charts := make([]*ChartVersion, 0, len(versions))
for ind := range versions {
chart := &ChartVersion{
ApplicationId: application.ApplicationId,
ApplicationVersionId: idutils.GetUuid36(v1alpha1.HelmApplicationVersionIdPrefix),
ChartVersion: *versions[ind],
ApplicationId: application.ApplicationId,
ChartVersion: *versions[ind],
}
chart.ApplicationVersionId = generateAppVersionId(repo, versions[ind].Name, versions[ind].Version)
charts = append(charts, chart)
// Use the creation time of the oldest chart as the creation time of the app.
if versions[ind].Created.Before(application.Created) {
application.Created = versions[ind].Created
}
}
application.Charts = charts
@@ -132,10 +149,11 @@ func MergeRepoIndex(index *helmrepo.IndexFile, existsSavedIndex *SavedIndex) *Sa
// add new chart version
if _, exists := savedChartVersion[ver.Version]; !exists {
chart := &ChartVersion{
ApplicationId: application.ApplicationId,
ApplicationVersionId: idutils.GetUuid36(v1alpha1.HelmApplicationVersionIdPrefix),
ChartVersion: *ver,
ApplicationId: application.ApplicationId,
ChartVersion: *ver,
}
chart.ApplicationVersionId = generateAppVersionId(repo, ver.Name, ver.Version)
charts = append(charts, chart)
}
newVersion[ver.Version] = empty
@@ -204,6 +222,26 @@ func (i *SavedIndex) GetApplicationVersion(appId, versionId string) *v1alpha1.He
return nil
}
// The app version id will be added to the labels of the helm release.
// But the apps in the repos which are created by the user may contain malformed text, so we generate a random name for them.
// The apps in the system repo have been audited by the admin, so the name of the charts should not include malformed text.
// Then we can add the name string to the labels of the k8s object.
func generateAppVersionId(repo *v1alpha1.HelmRepo, chartName, version string) string {
if IsBuiltInRepo(repo.Name) {
return fmt.Sprintf("%s%s-%s-%s", v1alpha1.HelmApplicationIdPrefix, repo.Name, chartName, version)
} else {
return idutils.GetUuid36(v1alpha1.HelmApplicationVersionIdPrefix)
}
}
// IsBuiltInRepo checks whether a repo is a built-in repo.
// All the built-in repos are located in the workspace system-workspace and the name starts with 'built-in'
// to differentiate from the repos created by the user.
func IsBuiltInRepo(repoName string) bool {
return strings.HasPrefix(repoName, v1alpha1.BuiltinRepoPrefix)
}
type SavedIndex struct {
APIVersion string `json:"apiVersion"`
Generated time.Time `json:"generated"`
@@ -290,7 +328,7 @@ type Application struct {
// application status
Status string `json:"status"`
// The URL to an icon file.
Icon string `json:"icon,omitempty"`
Charts []*ChartVersion `json:"charts"`
Icon string `json:"icon,omitempty"`
Created time.Time `json:"created"`
Charts []*ChartVersion `json:"charts"`
}

View File

@@ -16,32 +16,7 @@ limitations under the License.
package net
import (
"net"
"net/http"
"strings"
)
// 0 is considered as a non valid port
func IsValidPort(port int) bool {
return port > 0 && port < 65535
}
func GetRequestIP(req *http.Request) string {
address := strings.Trim(req.Header.Get("X-Real-Ip"), " ")
if address != "" {
return address
}
address = strings.Trim(req.Header.Get("X-Forwarded-For"), " ")
if address != "" {
return address
}
address, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return req.RemoteAddr
}
return address
}

View File

@@ -19,7 +19,6 @@ package reposcache
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io/ioutil"
@@ -28,7 +27,11 @@ import (
"strings"
"sync"
"k8s.io/client-go/tools/cache"
"github.com/Masterminds/semver/v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
"kubesphere.io/api/application/v1alpha1"
@@ -37,65 +40,89 @@ import (
"kubesphere.io/kubesphere/pkg/simple/client/openpitrix/helmrepoindex"
)
const (
CategoryIndexer = "category_indexer"
CategoryAnnotationKey = "app.kubesphere.io/category"
)
var WorkDir string
func NewReposCache() ReposCache {
return &cachedRepos{
chartsInRepo: map[workspace]map[string]int{},
repos: map[string]*v1alpha1.HelmRepo{},
apps: map[string]*v1alpha1.HelmApplication{},
versions: map[string]*v1alpha1.HelmApplicationVersion{},
repoCtgCounts: map[string]map[string]int{},
chartsInRepo: map[workspace]map[string]int{},
repos: map[string]*v1alpha1.HelmRepo{},
apps: map[string]*v1alpha1.HelmApplication{},
versions: map[string]*v1alpha1.HelmApplicationVersion{},
builtinCategoryCounts: map[string]int{},
}
}
type ReposCache interface {
AddRepo(repo *v1alpha1.HelmRepo) error
DeleteRepo(repo *v1alpha1.HelmRepo) error
UpdateRepo(old, new *v1alpha1.HelmRepo) error
GetApplication(string) (*v1alpha1.HelmApplication, bool)
GetAppVersion(string) (*v1alpha1.HelmApplicationVersion, bool, error)
GetAppVersionWithData(string) (*v1alpha1.HelmApplicationVersion, bool, error)
ListAppVersionsByAppId(appId string) (ret []*v1alpha1.HelmApplicationVersion, exists bool)
ListApplicationsByRepoId(repoId string) (ret []*v1alpha1.HelmApplication, exists bool)
ListApplicationsInRepo(repoId string) (ret []*v1alpha1.HelmApplication, exists bool)
ListApplicationsInBuiltinRepo(selector labels.Selector) (ret []*v1alpha1.HelmApplication, exists bool)
SetCategoryIndexer(indexer cache.Indexer)
CopyCategoryCount() map[string]int
}
type workspace string
type cachedRepos struct {
sync.RWMutex
chartsInRepo map[workspace]map[string]int
repoCtgCounts map[string]map[string]int
chartsInRepo map[workspace]map[string]int
// builtinCategoryCounts saves the count of every category in the built-in repo.
builtinCategoryCounts map[string]int
repos map[string]*v1alpha1.HelmRepo
apps map[string]*v1alpha1.HelmApplication
versions map[string]*v1alpha1.HelmApplicationVersion
// indexerOfHelmCtg is the indexer of HelmCategory, used to query the category id from category name.
indexerOfHelmCtg cache.Indexer
}
func (c *cachedRepos) deleteRepo(repo *v1alpha1.HelmRepo) {
if len(repo.Status.Data) == 0 {
return
}
index, err := helmrepoindex.ByteArrayToSavedIndex([]byte(repo.Status.Data))
if err != nil {
klog.Errorf("json unmarshal repo %s failed, error: %s", repo.Name, err)
return
}
klog.V(4).Infof("delete repo %s from cache", repo.Name)
c.Lock()
defer c.Unlock()
klog.V(2).Infof("delete repo %s from cache", repo.Name)
repoId := repo.GetHelmRepoId()
ws := workspace(repo.GetWorkspace())
if _, exists := c.chartsInRepo[ws]; exists {
delete(c.chartsInRepo[ws], repoId)
}
delete(c.repoCtgCounts, repoId)
delete(c.repos, repoId)
for _, app := range index.Applications {
if _, exists := c.apps[app.ApplicationId]; !exists {
continue
}
if helmrepoindex.IsBuiltInRepo(repo.Name) {
ctgId := c.apps[app.ApplicationId].Labels[constants.CategoryIdLabelKey]
if ctgId != "" {
c.builtinCategoryCounts[ctgId] -= 1
}
}
delete(c.apps, app.ApplicationId)
for _, ver := range app.Charts {
delete(c.versions, ver.ApplicationVersionId)
@@ -118,10 +145,47 @@ func loadBuiltinChartData(name, version string) ([]byte, error) {
}
func (c *cachedRepos) DeleteRepo(repo *v1alpha1.HelmRepo) error {
c.Lock()
defer c.Unlock()
c.deleteRepo(repo)
return nil
}
// CopyCategoryCount copies the internal map to avoid `concurrent map iteration and map write`.
func (c *cachedRepos) CopyCategoryCount() map[string]int {
c.RLock()
defer c.RUnlock()
ret := make(map[string]int, len(c.builtinCategoryCounts))
for k, v := range c.builtinCategoryCounts {
ret[k] = v
}
return ret
}
func (c *cachedRepos) SetCategoryIndexer(indexer cache.Indexer) {
c.Lock()
c.indexerOfHelmCtg = indexer
c.Unlock()
}
// translateCategoryNameToId translate a category-name to a category-id.
// The caller should hold the lock
func (c *cachedRepos) translateCategoryNameToId(ctgName string) string {
if c.indexerOfHelmCtg == nil || ctgName == "" {
return v1alpha1.UncategorizedId
}
if items, err := c.indexerOfHelmCtg.ByIndex(CategoryIndexer, ctgName); len(items) == 0 || err != nil {
return v1alpha1.UncategorizedId
} else {
obj, _ := items[0].(*v1alpha1.HelmCategory)
return obj.Name
}
}
func (c *cachedRepos) GetApplication(appId string) (app *v1alpha1.HelmApplication, exists bool) {
c.RLock()
defer c.RUnlock()
@@ -131,11 +195,24 @@ func (c *cachedRepos) GetApplication(appId string) (app *v1alpha1.HelmApplicatio
return
}
func (c *cachedRepos) UpdateRepo(old, new *v1alpha1.HelmRepo) error {
if old.Status.Data == new.Status.Data {
return nil
}
c.Lock()
defer c.Unlock()
c.deleteRepo(old)
return c.addRepo(new, false)
}
func (c *cachedRepos) AddRepo(repo *v1alpha1.HelmRepo) error {
c.Lock()
defer c.Unlock()
return c.addRepo(repo, false)
}
//Add new Repo to cachedRepos
// Add a new Repo to cachedRepos
func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
if len(repo.Status.Data) == 0 {
return nil
@@ -146,10 +223,7 @@ func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
return err
}
klog.V(4).Infof("add repo %s to cache", repo.Name)
c.Lock()
defer c.Unlock()
klog.V(2).Infof("add repo %s to cache", repo.Name)
ws := workspace(repo.GetWorkspace())
if _, exists := c.chartsInRepo[ws]; !exists {
@@ -158,29 +232,27 @@ func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
repoId := repo.GetHelmRepoId()
c.repos[repoId] = repo
//c.repoCtgCounts[repo.GetHelmRepoId()] = make(map[string]int)
if _, exists := c.repoCtgCounts[repoId]; !exists {
c.repoCtgCounts[repoId] = map[string]int{}
}
var appName string
chartsCount := 0
for key, app := range index.Applications {
if builtin {
appName = v1alpha1.HelmApplicationIdPrefix + app.Name
} else {
appName = app.ApplicationId
appName = app.ApplicationId
appLabels := make(map[string]string)
if helmrepoindex.IsBuiltInRepo(repo.Name) {
appLabels[constants.WorkspaceLabelKey] = "system-workspace"
}
HelmApp := v1alpha1.HelmApplication{
appLabels[constants.ChartRepoIdLabelKey] = repoId
helmApp := v1alpha1.HelmApplication{
ObjectMeta: metav1.ObjectMeta{
Name: appName,
Annotations: map[string]string{
constants.CreatorAnnotationKey: repo.GetCreator(),
},
Labels: map[string]string{
constants.ChartRepoIdLabelKey: repo.GetHelmRepoId(),
},
Labels: appLabels,
CreationTimestamp: metav1.Time{Time: app.Created},
},
Spec: v1alpha1.HelmApplicationSpec{
Name: key,
@@ -191,25 +263,18 @@ func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
State: v1alpha1.StateActive,
},
}
c.apps[app.ApplicationId] = &HelmApp
c.apps[app.ApplicationId] = &helmApp
var ctg, appVerName string
var chartData []byte
for _, ver := range app.Charts {
chartsCount += 1
if ver.Annotations != nil && ver.Annotations["category"] != "" {
ctg = ver.Annotations["category"]
}
if builtin {
appVerName = base64.StdEncoding.EncodeToString([]byte(ver.Name + ver.Version))
chartData, err = loadBuiltinChartData(ver.Name, ver.Version)
if err != nil {
return err
}
} else {
appVerName = ver.ApplicationVersionId
}
var latestVersionName string
var latestSemver *semver.Version
// build all the versions of this app
for _, chartVersion := range app.Charts {
chartsCount += 1
hvw := helmrepoindex.HelmVersionWrapper{ChartVersion: &chartVersion.ChartVersion}
appVerName = chartVersion.ApplicationVersionId
version := &v1alpha1.HelmApplicationVersion{
ObjectMeta: metav1.ObjectMeta{
Name: appVerName,
@@ -218,42 +283,64 @@ func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
constants.ChartApplicationIdLabelKey: appName,
constants.ChartRepoIdLabelKey: repo.GetHelmRepoId(),
},
CreationTimestamp: metav1.Time{Time: ver.Created},
CreationTimestamp: metav1.Time{Time: chartVersion.Created},
},
Spec: v1alpha1.HelmApplicationVersionSpec{
Metadata: &v1alpha1.Metadata{
Name: ver.Name,
AppVersion: ver.AppVersion,
Version: ver.Version,
Name: hvw.GetName(),
AppVersion: hvw.GetAppVersion(),
Version: hvw.GetVersion(),
},
URLs: ver.URLs,
Digest: ver.Digest,
URLs: chartVersion.URLs,
Digest: chartVersion.Digest,
Data: chartData,
},
Status: v1alpha1.HelmApplicationVersionStatus{
State: v1alpha1.StateActive,
},
}
c.versions[ver.ApplicationVersionId] = version
}
//modify application category
ctgId := ""
if ctg != "" {
if c.apps[app.ApplicationId].Annotations == nil {
c.apps[app.ApplicationId].Annotations = map[string]string{constants.CategoryIdLabelKey: ctg}
} else {
c.apps[app.ApplicationId].Annotations[constants.CategoryIdLabelKey] = ctg
// It is not necessary to store these pieces of information when this is not a built-in repo.
if helmrepoindex.IsBuiltInRepo(repo.Name) {
version.Spec.Sources = hvw.GetRawSources()
version.Spec.Maintainers = hvw.GetRawMaintainers()
version.Spec.Home = hvw.GetHome()
}
c.versions[chartVersion.ApplicationVersionId] = version
// Find the latest version.
currSemver, err := semver.NewVersion(version.GetSemver())
if err == nil {
if latestSemver == nil {
// the first valid semver
latestSemver = currSemver
latestVersionName = version.GetVersionName()
// Use the category of the latest version as the category of the app.
ctg = chartVersion.Annotations[CategoryAnnotationKey]
} else if latestSemver.LessThan(currSemver) {
// find a newer valid semver
latestSemver = currSemver
latestVersionName = version.GetVersionName()
ctg = chartVersion.Annotations[CategoryAnnotationKey]
}
} else {
// If the semver is invalid, just ignore it.
klog.V(2).Infof("parse version failed, id: %s, err: %s", version.Name, err)
}
ctgId = ctg
} else {
ctgId = v1alpha1.UncategorizedId
}
if _, exists := c.repoCtgCounts[repoId][ctgId]; !exists {
c.repoCtgCounts[repoId][ctgId] = 1
} else {
c.repoCtgCounts[repoId][ctgId] += 1
helmApp.Status.LatestVersion = latestVersionName
if helmrepoindex.IsBuiltInRepo(repo.Name) {
// Add category id to the apps in the built-in repo
ctgId := c.translateCategoryNameToId(ctg)
if helmApp.Labels == nil {
helmApp.Labels = map[string]string{}
}
helmApp.Labels[constants.CategoryIdLabelKey] = ctgId
c.builtinCategoryCounts[ctgId] += 1
}
}
@@ -262,7 +349,7 @@ func (c *cachedRepos) addRepo(repo *v1alpha1.HelmRepo, builtin bool) error {
return nil
}
func (c *cachedRepos) ListApplicationsByRepoId(repoId string) (ret []*v1alpha1.HelmApplication, exists bool) {
func (c *cachedRepos) ListApplicationsInRepo(repoId string) (ret []*v1alpha1.HelmApplication, exists bool) {
c.RLock()
defer c.RUnlock()
@@ -279,6 +366,23 @@ func (c *cachedRepos) ListApplicationsByRepoId(repoId string) (ret []*v1alpha1.H
return ret, true
}
func (c *cachedRepos) ListApplicationsInBuiltinRepo(selector labels.Selector) (ret []*v1alpha1.HelmApplication, exists bool) {
c.RLock()
defer c.RUnlock()
ret = make([]*v1alpha1.HelmApplication, 0, 20)
for _, app := range c.apps {
if strings.HasPrefix(app.GetHelmRepoId(), v1alpha1.BuiltinRepoPrefix) {
if selector != nil && !selector.Empty() &&
(app.Labels == nil || !selector.Matches(labels.Set(app.Labels))) { // If the selector is not empty, we must check whether the labels of the app match the selector.
continue
}
ret = append(ret, app)
}
}
return ret, true
}
func (c *cachedRepos) ListAppVersionsByAppId(appId string) (ret []*v1alpha1.HelmApplicationVersion, exists bool) {
c.RLock()
defer c.RUnlock()

View File

@@ -49,6 +49,7 @@ const (
HelmApplicationAppStoreSuffix = "-store"
HelmApplicationIdPrefix = "app-"
HelmRepoIdPrefix = "repo-"
BuiltinRepoPrefix = "builtin-"
HelmApplicationVersionIdPrefix = "appv-"
HelmCategoryIdPrefix = "ctg-"
HelmAttachmentPrefix = "att-"
@@ -59,5 +60,6 @@ const (
ApplicationInstance = "app.kubesphere.io/instance"
RepoSyncPeriod = "app.kubesphere.io/sync-period"
OriginWorkspaceLabelKey = "kubesphere.io/workspace-origin"
)

View File

@@ -79,6 +79,8 @@ func init() {
&FederatedUserList{},
&FederatedGroup{},
&FederatedGroupList{},
&FederatedGroupBinding{},
&FederatedGroupBindingList{},
&FederatedWorkspace{},
&FederatedWorkspaceList{},
&FederatedWorkspaceRole{},

View File

@@ -155,6 +155,9 @@ func (f *Framework) GenericClient(userAgent string) client.Client {
Host: ctx.Host,
Username: ctx.Username,
Password: ctx.Password,
ContentConfig: rest.ContentConfig{
ContentType: runtime.ContentTypeJSON,
},
}
rest.AddUserAgent(config, userAgent)

View File

@@ -34,13 +34,10 @@ import (
func NewClient(s *runtime.Scheme, user, passsword string) (client.Client, error) {
ctx := framework.TestContext
token, err := getToken(ctx.Host, user, passsword)
if err != nil {
return nil, err
}
config := &rest.Config{
Host: ctx.Host,
BearerToken: token.AccessToken,
Host: ctx.Host,
Username: user,
Password: passsword,
}
return generic.New(config, client.Options{Scheme: s})
@@ -48,13 +45,10 @@ func NewClient(s *runtime.Scheme, user, passsword string) (client.Client, error)
func NewRestClient(user, passsword string) (*restclient.RestClient, error) {
ctx := framework.TestContext
token, err := getToken(ctx.Host, user, passsword)
if err != nil {
return nil, err
}
config := &rest.Config{
Host: ctx.Host,
BearerToken: token.AccessToken,
Host: ctx.Host,
Username: user,
Password: passsword,
}
return restclient.NewForConfig(config)

View File

@@ -27,6 +27,7 @@ import (
"kubesphere.io/api/iam/v1alpha2"
"kubesphere.io/kubesphere/pkg/utils/stringutils"
"kubesphere.io/kubesphere/test/e2e/constant"
"kubesphere.io/kubesphere/test/e2e/framework"
"kubesphere.io/kubesphere/test/e2e/framework/iam"
@@ -73,6 +74,11 @@ var _ = Describe("Groups", func() {
_, err = restClient.IamV1alpha2().Groups().CreateBinding(context.TODO(), workspace, group, UserName)
framework.ExpectNoError(err)
Eventually(func() bool {
user, err := iam.GetUser(adminClient, UserName)
return err == nil && stringutils.FindString(user.Spec.Groups, group) != -1
}, timeout, interval).Should(BeTrue())
By("Creating a new client with user authentication")
userClient, err = iam.NewClient(f.GetScheme(), u.Name, constant.DefaultPassword)
framework.ExpectNoError(err)

18
test/e2e/kind.yaml Normal file
View File

@@ -0,0 +1,18 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
extraMounts:
- hostPath: /etc/localtime
containerPath: /etc/localtime
extraPortMappings:
- containerPort: 30881
hostPort: 9090
- role: worker
extraMounts:
- hostPath: /etc/localtime
containerPath: /etc/localtime
- role: worker
extraMounts:
- hostPath: /etc/localtime
containerPath: /etc/localtime

25
vendor/modules.txt vendored
View File

@@ -272,6 +272,7 @@ github.com/emirpasic/gods/trees
github.com/emirpasic/gods/trees/binaryheap
github.com/emirpasic/gods/utils
# github.com/evanphx/json-patch v4.11.0+incompatible => github.com/evanphx/json-patch v4.9.0+incompatible
## explicit
github.com/evanphx/json-patch
# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d => github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d
github.com/exponent-io/jsonpath
@@ -491,7 +492,7 @@ github.com/jszwec/csvutil
github.com/kevinburke/ssh_config
# github.com/konsorten/go-windows-terminal-sequences v1.0.2 => github.com/konsorten/go-windows-terminal-sequences v1.0.2
github.com/konsorten/go-windows-terminal-sequences
# github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
# github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 => github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
## explicit
github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1
github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1beta1
@@ -922,7 +923,7 @@ go.uber.org/atomic
go.uber.org/multierr
# go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee => go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee
go.uber.org/tools/update-license
# go.uber.org/zap v1.17.0 => go.uber.org/zap v1.13.0
# go.uber.org/zap v1.19.0 => go.uber.org/zap v1.13.0
go.uber.org/zap
go.uber.org/zap/buffer
go.uber.org/zap/internal/bufferpool
@@ -985,7 +986,7 @@ golang.org/x/oauth2/internal
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
# golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 => golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e
# golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 => golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
@@ -1014,7 +1015,7 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
# golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
# golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
golang.org/x/time/rate
# golang.org/x/tools v0.1.5 => golang.org/x/tools v0.0.0-20190710153321-831012c29e42
golang.org/x/tools/go/analysis
@@ -1328,7 +1329,7 @@ istio.io/client-go/pkg/listers/security/v1beta1
## explicit
istio.io/gogo-genproto/googleapis/google/api
istio.io/gogo-genproto/googleapis/google/rpc
# k8s.io/api v0.21.3 => k8s.io/api v0.21.2
# k8s.io/api v0.21.4 => k8s.io/api v0.21.2
## explicit
k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1
@@ -1376,7 +1377,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
# k8s.io/apiextensions-apiserver v0.21.3 => k8s.io/apiextensions-apiserver v0.21.2
# k8s.io/apiextensions-apiserver v0.21.4 => k8s.io/apiextensions-apiserver v0.21.2
## explicit
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
@@ -1395,7 +1396,7 @@ k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensio
k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1
# k8s.io/apimachinery v0.21.3 => k8s.io/apimachinery v0.21.2
# k8s.io/apimachinery v0.21.4 => k8s.io/apimachinery v0.21.2
## explicit
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@@ -1890,7 +1891,7 @@ k8s.io/code-generator/cmd/lister-gen/args
k8s.io/code-generator/cmd/lister-gen/generators
k8s.io/code-generator/pkg/namer
k8s.io/code-generator/pkg/util
# k8s.io/component-base v0.21.2 => k8s.io/component-base v0.21.2
# k8s.io/component-base v0.21.4 => k8s.io/component-base v0.21.2
## explicit
k8s.io/component-base/cli/flag
k8s.io/component-base/config
@@ -1957,7 +1958,7 @@ k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1
k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/fake
k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1
k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/fake
# k8s.io/utils v0.0.0-20210527160623-6fdb442a123b => k8s.io/utils v0.0.0-20200603063816-c1c6865ac451
# k8s.io/utils v0.0.0-20210802155522-efc7438f0176 => k8s.io/utils v0.0.0-20200603063816-c1c6865ac451
## explicit
k8s.io/utils/buffer
k8s.io/utils/exec
@@ -1983,6 +1984,7 @@ kubesphere.io/api/network/calicov3
kubesphere.io/api/network/crdinstall
kubesphere.io/api/network/v1alpha1
kubesphere.io/api/notification/v2beta1
kubesphere.io/api/notification/v2beta2
kubesphere.io/api/quota/v1alpha2
kubesphere.io/api/servicemesh/crdinstall
kubesphere.io/api/servicemesh/v1alpha2
@@ -2015,7 +2017,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
# sigs.k8s.io/application v0.8.4-0.20201016185654-c8e2959e57a0 => sigs.k8s.io/application v0.8.4-0.20201016185654-c8e2959e57a0
## explicit
sigs.k8s.io/application/api/v1beta1
# sigs.k8s.io/controller-runtime v0.9.3 => sigs.k8s.io/controller-runtime v0.9.3
# sigs.k8s.io/controller-runtime v0.9.8-0.20211019125639-aa2b3e68a52d => sigs.k8s.io/controller-runtime v0.9.8-0.20211019125639-aa2b3e68a52d
## explicit
sigs.k8s.io/controller-runtime
sigs.k8s.io/controller-runtime/pkg/builder
@@ -2544,6 +2546,7 @@ sigs.k8s.io/yaml
# github.com/kr/text => github.com/kr/text v0.1.0
# github.com/kshvakov/clickhouse => github.com/kshvakov/clickhouse v1.3.5
# github.com/kubernetes-csi/external-snapshotter/client/v3 => github.com/kubernetes-csi/external-snapshotter/client/v3 v3.0.0
# github.com/kubernetes-csi/external-snapshotter/client/v4 => github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0
# github.com/kubesphere/sonargo => github.com/kubesphere/sonargo v0.0.2
# github.com/kylelemons/go-gypsy => github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28
# github.com/kylelemons/godebug => github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb
@@ -2854,7 +2857,7 @@ sigs.k8s.io/yaml
# rsc.io/sampler => rsc.io/sampler v1.3.0
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19
# sigs.k8s.io/application => sigs.k8s.io/application v0.8.4-0.20201016185654-c8e2959e57a0
# sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.3
# sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.9.8-0.20211019125639-aa2b3e68a52d
# sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.6.2
# sigs.k8s.io/kind => sigs.k8s.io/kind v0.8.1
# sigs.k8s.io/kubebuilder/v3 => sigs.k8s.io/kubebuilder/v3 v3.0.0-alpha.0.0.20210716121009-fde793f20067

View File

@@ -40,6 +40,7 @@ TOOLS_BIN_DIR := $(TOOLS_DIR)/bin
GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/golangci-lint)
GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff
CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen
ENVTEST_DIR := $(abspath tools/setup-envtest)
# The help will print out all targets with their descriptions organized bellow their categories. The categories are represented by `##@` and the target descriptions by `##`.
# The awk commands is responsible to read the entire set of makefiles included in this invocation, looking for lines of the file as xyz: ## something, and then pretty-format the target and help. Then, if there's a line with ##@ something, that gets pretty-printed as a category.
@@ -97,6 +98,7 @@ lint-fix: $(GOLANGCI_LINT) ## Lint the codebase and run auto-fixers if supported
modules: ## Runs go mod to ensure modules are up to date.
go mod tidy
cd $(TOOLS_DIR); go mod tidy
cd $(ENVTEST_DIR); go mod tidy
.PHONY: generate
generate: $(CONTROLLER_GEN) ## Runs controller-gen for internal types for config file

View File

@@ -11,20 +11,20 @@ require (
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.13.0
github.com/onsi/gomega v1.15.0
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_model v0.2.0
go.uber.org/goleak v1.1.10
go.uber.org/zap v1.17.0
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6
go.uber.org/zap v1.19.0
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
gomodules.xyz/jsonpatch/v2 v2.2.0
google.golang.org/appengine v1.6.7 // indirect
k8s.io/api v0.21.2
k8s.io/apiextensions-apiserver v0.21.2
k8s.io/apimachinery v0.21.2
k8s.io/client-go v0.21.2
k8s.io/component-base v0.21.2
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b
k8s.io/api v0.21.4
k8s.io/apiextensions-apiserver v0.21.4
k8s.io/apimachinery v0.21.4
k8s.io/client-go v0.21.4
k8s.io/component-base v0.21.4
k8s.io/utils v0.0.0-20210802155522-efc7438f0176
sigs.k8s.io/yaml v1.2.0
)

View File

@@ -47,6 +47,8 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -293,15 +295,14 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
@@ -400,8 +401,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -533,8 +534,9 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6rNY2pbRfbIMVnepueo=
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
@@ -552,8 +554,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs=
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -699,18 +701,18 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y=
k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU=
k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE=
k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA=
k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc=
k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM=
k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw=
k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0=
k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA=
k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U=
k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4=
k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc=
k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc=
k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk=
k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU=
k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk=
k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw=
k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g=
k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc=
k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew=
k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
k8s.io/component-base v0.21.4 h1:Bc0AttSyhJFVXEIHz+VX+D11j/5z7SPPhl6whiXaRzs=
k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
@@ -720,15 +722,15 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s=
k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176 h1:Mx0aa+SUAcNRQbs5jUzV8lkDlGFU8laZsY9jrcVX5SY=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@@ -274,8 +274,9 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
ip.selectors[gvk].ApplyToList(&opts)
res := listObj.DeepCopyObject()
isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
err := client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk])
isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res)
return res, err
},
// Setup the watch function
@@ -283,8 +284,9 @@ func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformer
ip.selectors[gvk].ApplyToList(&opts)
// Watch needs to be set to true separately
opts.Watch = true
isNamespaceScoped := ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
return client.Get().NamespaceIfScoped(ip.namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk])
isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot
return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx)
},
}, nil
}
@@ -313,8 +315,9 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
ip.selectors[gvk].ApplyToList(&opts)
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk])
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts)
}
return dynamicClient.Resource(mapping.Resource).List(ctx, opts)
},
@@ -323,8 +326,9 @@ func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInform
ip.selectors[gvk].ApplyToList(&opts)
// Watch needs to be set to true separately
opts.Watch = true
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk])
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts)
}
return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts)
},
@@ -358,8 +362,9 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM
return &cache.ListWatch{
ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
ip.selectors[gvk].ApplyToList(&opts)
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return client.Resource(mapping.Resource).Namespace(ip.namespace).List(ctx, opts)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk])
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts)
}
return client.Resource(mapping.Resource).List(ctx, opts)
},
@@ -368,8 +373,9 @@ func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersM
ip.selectors[gvk].ApplyToList(&opts)
// Watch needs to be set to true separately
opts.Watch = true
if ip.namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return client.Resource(mapping.Resource).Namespace(ip.namespace).Watch(ctx, opts)
namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors[gvk])
if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot {
return client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts)
}
return client.Resource(mapping.Resource).Watch(ctx, opts)
},
@@ -386,3 +392,23 @@ func resyncPeriod(resync time.Duration) func() time.Duration {
return time.Duration(float64(resync.Nanoseconds()) * factor)
}
}
// restrictNamespaceBySelector returns either a global restriction for all ListWatches
// if not default/empty, or the namespace that a ListWatch for the specific resource
// is restricted to, based on a specified field selector for metadata.namespace field.
func restrictNamespaceBySelector(namespaceOpt string, s Selector) string {
if namespaceOpt != "" {
// namespace is already restricted
return namespaceOpt
}
fieldSelector := s.Field
if fieldSelector == nil || fieldSelector.Empty() {
return ""
}
// check whether a selector includes the namespace field
value, found := fieldSelector.RequiresExactMatch("metadata.namespace")
if found {
return value
}
return ""
}

View File

@@ -157,14 +157,12 @@ func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConf
protobufSchemeLock.RUnlock()
}
if cfg.NegotiatedSerializer == nil {
if isUnstructured {
// If the object is unstructured, we need to preserve the GVK information.
// Use our own custom serializer.
cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}}
} else {
cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs}
}
if isUnstructured {
// If the object is unstructured, we need to preserve the GVK information.
// Use our own custom serializer.
cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}}
} else {
cfg.NegotiatedSerializer = serializer.WithoutConversionCodecFactory{CodecFactory: codecs}
}
return cfg

View File

@@ -207,7 +207,7 @@ func CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f M
return OperationResultCreated, nil
}
existing := obj.DeepCopyObject() //nolint:ifshort
existing := obj.DeepCopyObject() //nolint
if err := mutate(f, key, obj); err != nil {
return OperationResultNone, err
}

View File

@@ -0,0 +1,24 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flock
import "errors"
var (
// ErrAlreadyLocked is returned when the file is already locked.
ErrAlreadyLocked = errors.New("the file is already locked")
)

View File

@@ -18,18 +18,30 @@ limitations under the License.
package flock
import "golang.org/x/sys/unix"
import (
"errors"
"fmt"
"os"
"golang.org/x/sys/unix"
)
// Acquire acquires a lock on a file for the duration of the process. This method
// is reentrant.
func Acquire(path string) error {
fd, err := unix.Open(path, unix.O_CREAT|unix.O_RDWR|unix.O_CLOEXEC, 0600)
if err != nil {
if errors.Is(err, os.ErrExist) {
return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked)
}
return err
}
// We don't need to close the fd since we should hold
// it until the process exits.
return unix.Flock(fd, unix.LOCK_EX)
err = unix.Flock(fd, unix.LOCK_NB|unix.LOCK_EX)
if errors.Is(err, unix.EWOULDBLOCK) { // This condition requires LOCK_NB.
return fmt.Errorf("cannot lock file %q: %w", path, ErrAlreadyLocked)
}
return err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package addr
import (
"errors"
"fmt"
"io/fs"
"net"
@@ -31,7 +32,7 @@ import (
// TODO(directxman12): interface / release functionality for external port managers
const (
portReserveTime = 10 * time.Minute
portReserveTime = 2 * time.Minute
portConflictRetry = 100
portFilePrefix = "port-"
)
@@ -76,7 +77,8 @@ func (c *portCache) add(port int) (bool, error) {
return false, err
}
// Try allocating new port, by acquiring a file.
if err := flock.Acquire(fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port)); os.IsExist(err) {
path := fmt.Sprintf("%s/%s%d", cacheDir, portFilePrefix, port)
if err := flock.Acquire(path); errors.Is(err, flock.ErrAlreadyLocked) {
return false, nil
} else if err != nil {
return false, err
@@ -86,22 +88,19 @@ func (c *portCache) add(port int) (bool, error) {
var cache = &portCache{}
func suggest(listenHost string) (int, string, error) {
func suggest(listenHost string) (*net.TCPListener, int, string, error) {
if listenHost == "" {
listenHost = "localhost"
}
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(listenHost, "0"))
if err != nil {
return -1, "", err
return nil, -1, "", err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return -1, "", err
return nil, -1, "", err
}
if err := l.Close(); err != nil {
return -1, "", err
}
return l.Addr().(*net.TCPAddr).Port,
return l, l.Addr().(*net.TCPAddr).Port,
addr.IP.String(),
nil
}
@@ -112,10 +111,11 @@ func suggest(listenHost string) (int, string, error) {
// allocated within 1 minute.
func Suggest(listenHost string) (int, string, error) {
for i := 0; i < portConflictRetry; i++ {
port, resolvedHost, err := suggest(listenHost)
listener, port, resolvedHost, err := suggest(listenHost)
if err != nil {
return -1, "", err
}
defer listener.Close()
if ok, err := cache.add(port); ok {
return port, resolvedHost, nil
} else if err != nil {

View File

@@ -157,6 +157,11 @@ func (e *Etcd) defaultArgs() map[string][]string {
args["advertise-client-urls"] = []string{e.URL.String()}
args["listen-client-urls"] = []string{e.URL.String()}
}
// Add unsafe no fsync, available from etcd 3.5
if ok, _ := e.processState.CheckFlag("unsafe-no-fsync"); ok {
args["unsafe-no-fsync"] = []string{"true"}
}
return args
}

View File

@@ -76,7 +76,7 @@ func (p *loggerPromise) V(l *DelegatingLogger, level int) *loggerPromise {
// Fulfill instantiates the Logger with the provided logger.
func (p *loggerPromise) Fulfill(parentLogger logr.Logger) {
var logger = parentLogger
logger := logr.WithCallDepth(parentLogger, 1)
if p.name != nil {
logger = logger.WithName(*p.name)
}

View File

@@ -37,9 +37,9 @@ import (
"sigs.k8s.io/controller-runtime/pkg/config"
"sigs.k8s.io/controller-runtime/pkg/config/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/healthz"
logf "sigs.k8s.io/controller-runtime/pkg/internal/log"
intrec "sigs.k8s.io/controller-runtime/pkg/internal/recorder"
"sigs.k8s.io/controller-runtime/pkg/leaderelection"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/metrics"
"sigs.k8s.io/controller-runtime/pkg/recorder"
"sigs.k8s.io/controller-runtime/pkg/runtime/inject"
@@ -572,7 +572,7 @@ func setOptionsDefaults(options Options) Options {
}
if options.Logger == nil {
options.Logger = logf.RuntimeLog.WithName("manager")
options.Logger = log.Log
}
return options

View File

@@ -292,6 +292,9 @@ func (s *Server) Start(ctx context.Context) error {
// StartedChecker returns an healthz.Checker which is healthy after the
// server has been started.
func (s *Server) StartedChecker() healthz.Checker {
config := &tls.Config{
InsecureSkipVerify: true, // nolint:gosec // config is used to connect to our own webhook port.
}
return func(req *http.Request) error {
s.mu.Lock()
defer s.mu.Unlock()
@@ -300,11 +303,15 @@ func (s *Server) StartedChecker() healthz.Checker {
return fmt.Errorf("webhook server has not been started yet")
}
conn, err := net.DialTimeout("tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), 10*time.Second)
d := &net.Dialer{Timeout: 10 * time.Second}
conn, err := tls.DialWithDialer(d, "tcp", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)), config)
if err != nil {
return fmt.Errorf("webhook server is not reachable: %v", err)
}
conn.Close()
if err := conn.Close(); err != nil {
return fmt.Errorf("webhook server is not reachable: closing connection: %v", err)
}
return nil
}