Merge pull request #6452 from ks-ci-bot/cherry-pick-6451-to-master
Update branch to latest state
This commit is contained in:
@@ -152,6 +152,9 @@ func (s *ControllerManagerOptions) Merge(conf *config.Config) {
|
||||
if conf.TerminalOptions != nil {
|
||||
s.TerminalOptions = conf.TerminalOptions
|
||||
}
|
||||
if conf.KubeconfigOptions != nil {
|
||||
s.KubeconfigOptions = conf.KubeconfigOptions
|
||||
}
|
||||
if conf.HelmExecutorOptions != nil {
|
||||
s.HelmExecutorOptions = conf.HelmExecutorOptions
|
||||
}
|
||||
|
||||
@@ -40,11 +40,13 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/controller/loginrecord"
|
||||
"kubesphere.io/kubesphere/pkg/controller/namespace"
|
||||
"kubesphere.io/kubesphere/pkg/controller/quota"
|
||||
"kubesphere.io/kubesphere/pkg/controller/resourceprotection"
|
||||
"kubesphere.io/kubesphere/pkg/controller/role"
|
||||
"kubesphere.io/kubesphere/pkg/controller/rolebinding"
|
||||
"kubesphere.io/kubesphere/pkg/controller/roletemplate"
|
||||
"kubesphere.io/kubesphere/pkg/controller/secret"
|
||||
"kubesphere.io/kubesphere/pkg/controller/serviceaccount"
|
||||
"kubesphere.io/kubesphere/pkg/controller/serviceaccounttoken"
|
||||
"kubesphere.io/kubesphere/pkg/controller/storageclass"
|
||||
"kubesphere.io/kubesphere/pkg/controller/telemetry"
|
||||
"kubesphere.io/kubesphere/pkg/controller/user"
|
||||
@@ -115,8 +117,11 @@ func init() {
|
||||
runtime.Must(controller.Register(&application.AppVersionReconciler{}))
|
||||
// k8s application
|
||||
runtime.Must(controller.Register(&k8sapplication.Reconciler{}))
|
||||
runtime.Must(controller.Register(&application.ReleaseWebhook{}))
|
||||
// kubectl
|
||||
runtime.Must(controller.Register(&kubectl.Reconciler{}))
|
||||
runtime.Must(controller.Register(&serviceaccounttoken.Reconciler{}))
|
||||
runtime.Must(controller.Register(&resourceprotection.Webhook{}))
|
||||
}
|
||||
|
||||
func NewControllerManagerCommand() *cobra.Command {
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
|
||||
CRDS_PATH=$1
|
||||
echo "ks-crds pre upgrade..."
|
||||
# shellcheck disable=SC1060
|
||||
for crd in `ls $CRDS_PATH|grep \.yaml$`; do
|
||||
echo $crd
|
||||
kubectl apply -f $CRDS_PATH/$crd
|
||||
for crd in "$CRDS_PATH"/*.yaml; do
|
||||
basename "$crd"
|
||||
kubectl apply -f "$crd"
|
||||
done
|
||||
@@ -1,48 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# set -x
|
||||
|
||||
CRD_NAMES=$1
|
||||
MAPPING_CONFIG=$2
|
||||
|
||||
for extension in `kubectl get installplan -o json | jq -r '.items[] | select(.status.state == "Installed") | .metadata.name'`
|
||||
do
|
||||
namespace=$(kubectl get installplan $extension -o=jsonpath='{.status.targetNamespace}')
|
||||
version=$(kubectl get extension $extension -o=jsonpath='{.status.installedVersion}')
|
||||
extensionversion=$extension-$version
|
||||
echo "Found extension $extensionversion installed"
|
||||
helm status $extension --namespace $namespace
|
||||
if [ $? -eq 0 ]; then
|
||||
helm mapkubeapis $extension --namespace $namespace --mapfile $MAPPING_CONFIG
|
||||
for extension in $(kubectl get installplan -o json | jq -r '.items[] | select(.status.state == "Installed") | .metadata.name'); do
|
||||
namespace=$(kubectl get installplan "$extension" -o=jsonpath='{.status.targetNamespace}')
|
||||
version=$(kubectl get extension "$extension" -o=jsonpath='{.status.installedVersion}')
|
||||
extensionVersion="$extension-$version"
|
||||
echo "Found extension $extensionVersion installed"
|
||||
if helm status "$extension" --namespace "$namespace" &>/dev/null; then
|
||||
helm mapkubeapis "$extension" --namespace "$namespace" --mapfile "$MAPPING_CONFIG"
|
||||
fi
|
||||
helm status $extension-agent --namespace $namespace
|
||||
if [ $? -eq 0 ]; then
|
||||
helm mapkubeapis $extension-agent --namespace $namespace --mapfile $MAPPING_CONFIG
|
||||
if helm status "$extension-agent" --namespace "$namespace" &>/dev/null; then
|
||||
helm mapkubeapis "$extension-agent" --namespace "$namespace" --mapfile "$MAPPING_CONFIG"
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
# remove namespace's finalizers && ownerReferences
|
||||
kubectl patch workspaces.tenant.kubesphere.io system-workspace -p '{"metadata":{"finalizers":[]}}' --type=merge
|
||||
kubectl patch workspacetemplates.tenant.kubesphere.io system-workspace -p '{"metadata":{"finalizers":[]}}' --type=merge
|
||||
for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}' -l 'kubesphere.io/managed=true')
|
||||
do
|
||||
kubectl label ns $ns kubesphere.io/workspace- && \
|
||||
kubectl patch ns $ns -p '{"metadata":{"ownerReferences":[]}}' --type=merge && \
|
||||
echo "{\"kind\":\"Namespace\",\"apiVersion\":\"v1\",\"metadata\":{\"name\":\"$ns\",\"finalizers\":null}}" | kubectl replace --raw "/api/v1/namespaces/$ns/finalize" -f -
|
||||
|
||||
for ns in $(kubectl get ns -o jsonpath='{.items[*].metadata.name}' -l 'kubesphere.io/managed=true'); do
|
||||
kubectl label ns "$ns" kubesphere.io/workspace- && \
|
||||
kubectl patch ns "$ns" -p '{"metadata":{"ownerReferences":[]}}' --type=merge && \
|
||||
echo '{"kind":"Namespace","apiVersion":"v1","metadata":{"name":"'"$ns"'","finalizers":null}}' | kubectl replace --raw "/api/v1/namespaces/$ns/finalize" -f -
|
||||
done
|
||||
|
||||
|
||||
# delete crds
|
||||
for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`
|
||||
do
|
||||
if [[ ${CRD_NAMES[@]/${crd}/} != ${CRD_NAMES[@]} ]]; then
|
||||
scop=$(eval echo $(kubectl get crd ${crd} -o jsonpath="{.spec.scope}"))
|
||||
if [[ $scop =~ "Namespaced" ]] ; then
|
||||
kubectl get $crd -A --no-headers | awk '{print $1" "$2" ""'$crd'"}' | xargs -n 3 sh -c 'kubectl patch $2 -n $0 $1 -p "{\"metadata\":{\"finalizers\":null}}" --type=merge 2>/dev/null && kubectl delete $2 -n $0 $1 2>/dev/null'
|
||||
else
|
||||
kubectl get $crd -A --no-headers | awk '{print $1" ""'$crd'"}' | xargs -n 2 sh -c 'kubectl patch $1 $0 -p "{\"metadata\":{\"finalizers\":null}}" --type=merge 2>/dev/null && kubectl delete $1 $0 2>/dev/null'
|
||||
fi
|
||||
kubectl delete crd $crd 2>/dev/null;
|
||||
for crd in $(kubectl get crds -o jsonpath='{.items[*].metadata.name}'); do
|
||||
if [[ " ${CRD_NAMES[*]} " =~ ${crd} ]]; then
|
||||
echo "Deleting CRD $crd"
|
||||
scope=$(kubectl get crd "$crd" -o jsonpath='{.spec.scope}')
|
||||
if [[ $scope == "Namespaced" ]]; then
|
||||
kubectl get "$crd" -A --no-headers | awk '{print $1" "$2" ""'"$crd"'"}' | xargs -n 3 sh -c 'kubectl patch $2 -n $0 $1 -p "{\"metadata\":{\"finalizers\":null}}" --type=merge 2>/dev/null && kubectl delete $2 -n $0 $1 2>/dev/null'
|
||||
else
|
||||
kubectl get "$crd" -A --no-headers | awk '{print $1" ""'"$crd"'"}' | xargs -n 2 sh -c 'kubectl patch $1 $0 -p "{\"metadata\":{\"finalizers\":null}}" --type=merge 2>/dev/null && kubectl delete $1 $0 2>/dev/null'
|
||||
fi
|
||||
kubectl delete crd "$crd" 2>/dev/null
|
||||
fi
|
||||
done
|
||||
done
|
||||
@@ -1,21 +0,0 @@
|
||||
{{- define "kubectl.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.kubectl.image "global" (default .Values.global (dict "imageRegistry" "docker.io"))) }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "common.images.image" -}}
|
||||
{{- $registryName := .global.imageRegistry -}}
|
||||
{{- $repositoryName := .imageRoot.repository -}}
|
||||
{{- $separator := ":" -}}
|
||||
{{- $termination := .global.tag | toString -}}
|
||||
{{- if .imageRoot.registry }}
|
||||
{{- $registryName = .imageRoot.registry -}}
|
||||
{{- end -}}
|
||||
{{- if .imageRoot.tag }}
|
||||
{{- $termination = .imageRoot.tag | toString -}}
|
||||
{{- end -}}
|
||||
{{- if .imageRoot.digest }}
|
||||
{{- $separator = "@" -}}
|
||||
{{- $termination = .imageRoot.digest | toString -}}
|
||||
{{- end -}}
|
||||
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
|
||||
{{- end -}}
|
||||
@@ -20,7 +20,6 @@ data:
|
||||
{{ (.Files.Glob "scripts/post-delete.sh").AsConfig | indent 2 }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -31,7 +30,6 @@ metadata:
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
@@ -49,14 +47,13 @@ subjects:
|
||||
name: "{{ .Release.Name }}-post-delete-crd"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
|
||||
---
|
||||
|
||||
{{- $crdNameList := list }}
|
||||
{{- range $path, $_ := .Files.Glob "crds/**" }}
|
||||
{{- $crd := $.Files.Get $path | fromYaml }}
|
||||
{{- $crdNameList = append $crdNameList $crd.metadata.name }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -70,6 +67,9 @@ spec:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: "{{ .Release.Name }}-post-delete-crd"
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: post-delete-job
|
||||
image: {{ template "kubectl.image" . }}
|
||||
@@ -81,7 +81,6 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /scripts
|
||||
name: scripts
|
||||
resources: {{- toYaml .Values.kubectl.resources | nindent 12 }}
|
||||
volumes:
|
||||
- name: scripts
|
||||
configMap:
|
||||
|
||||
@@ -11,7 +11,6 @@ data:
|
||||
{{ (.Files.Glob "crds/*").AsConfig | indent 2 }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -22,7 +21,6 @@ metadata:
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
@@ -54,6 +52,9 @@ spec:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: "{{ .Release.Name }}-pre-upgrade-crd"
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: crd-install
|
||||
image: {{ template "kubectl.image" . }}
|
||||
@@ -64,7 +65,6 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /scripts
|
||||
name: scripts
|
||||
resources: {{- toYaml .Values.kubectl.resources | nindent 12 }}
|
||||
volumes:
|
||||
- name: scripts
|
||||
configMap:
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Default values for ks-crds.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
global:
|
||||
imageRegistry: ""
|
||||
imagePullSecrets: []
|
||||
# - name: "image-pull-secret"
|
||||
|
||||
kubectl:
|
||||
image:
|
||||
registry: ""
|
||||
|
||||
@@ -102,8 +102,8 @@ spec:
|
||||
release: {{ .Release.Name }}
|
||||
{{ template "redis-ha.fullname" . }}: replica
|
||||
{{- end }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext: {{ toYaml .Values.securityContext | nindent 8 }}
|
||||
serviceAccountName: {{ template "redis-ha.serviceAccountName" . }}
|
||||
|
||||
@@ -109,8 +109,8 @@ spec:
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /data
|
||||
{{- if .Values.haproxy.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.haproxy.imagePullSecrets | nindent 8 }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: haproxy
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
global:
|
||||
imageRegistry: ""
|
||||
imagePullSecrets: []
|
||||
# - name: "image-pull-secret"
|
||||
|
||||
image:
|
||||
registry: ""
|
||||
@@ -12,13 +14,6 @@ image:
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## This imagePullSecrets is only for redis images
|
||||
##
|
||||
imagePullSecrets: []
|
||||
# - name: "image-pull-secret"
|
||||
|
||||
## replicas number for each component
|
||||
replicas: 3
|
||||
|
||||
@@ -102,13 +97,6 @@ haproxy:
|
||||
|
||||
## Custom labels for the haproxy pod
|
||||
labels: {}
|
||||
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
imagePullSecrets: []
|
||||
# - name: "image-pull-secret"
|
||||
|
||||
annotations: {}
|
||||
resources: {}
|
||||
emptyDir: {}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
{{/*
|
||||
Return the proper image name
|
||||
*/}}
|
||||
{{- define "ks-apiserver.image" -}}
|
||||
{{- define "apiserver.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.apiserver.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "ks-console.image" -}}
|
||||
{{- define "console.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.console.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "ks-controller-manager.image" -}}
|
||||
{{- define "controller.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.controller.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -33,7 +33,7 @@ Return the proper image name
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.redis.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "extensions_museum.image" -}}
|
||||
{{- define "extensionRepo.image" -}}
|
||||
{{ include "common.images.image" (dict "imageRoot" .Values.ksExtensionRepository.image "global" .Values.global) }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -53,46 +53,4 @@ Return the proper image name
|
||||
{{- $termination = .imageRoot.digest | toString -}}
|
||||
{{- end -}}
|
||||
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the proper Docker Image Registry Secret Names
|
||||
*/}}
|
||||
{{- define "apiserver.imagePullSecrets" -}}
|
||||
{{- include "common.images.pullSecrets" (dict "images" (list .Values.apiserver.image) "global" .Values.global) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "console.imagePullSecrets" -}}
|
||||
{{- include "common.images.pullSecrets" (dict "images" (list .Values.console.image) "global" .Values.global) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "controller.imagePullSecrets" -}}
|
||||
{{- include "common.images.pullSecrets" (dict "images" (list .Values.controller.image) "global" .Values.global) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "extensions_museum.imagePullSecrets" -}}
|
||||
{{- include "common.images.pullSecrets" (dict "images" (list .Values.ksExtensionRepository.image) "global" .Values.global) -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "common.images.pullSecrets" -}}
|
||||
{{- $pullSecrets := list }}
|
||||
|
||||
{{- if .global }}
|
||||
{{- range .global.imagePullSecrets -}}
|
||||
{{- $pullSecrets = append $pullSecrets . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- range .images -}}
|
||||
{{- range .pullSecrets -}}
|
||||
{{- $pullSecrets = append $pullSecrets . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- if (not (empty $pullSecrets)) }}
|
||||
imagePullSecrets:
|
||||
{{- range $pullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -15,7 +15,6 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: letsencrypt
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
acme:
|
||||
email: {{ .Values.letsEncrypt.email }}
|
||||
@@ -27,7 +26,7 @@ spec:
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-{{ .Values.letsEncrypt.environment }}
|
||||
{{- if or (.Capabilities.APIVersions.Has "certmanager.k8s.io/v1alpha1") }}
|
||||
http01: { }
|
||||
http01: {}
|
||||
{{- else }}
|
||||
solvers:
|
||||
- http01:
|
||||
@@ -55,7 +54,6 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: self-signed
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selfSigned: {}
|
||||
{{- end }}
|
||||
@@ -79,7 +77,6 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ks-apiserver-certificate
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
# Secret names are always required.
|
||||
secretName: ks-apiserver-tls-certs
|
||||
@@ -124,7 +121,6 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: ks-console-certificate
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
# Secret names are always required.
|
||||
secretName: ks-console-tls-certs
|
||||
|
||||
@@ -10,5 +10,4 @@ stringData:
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: extensions.customresourcefilters
|
||||
namespace: kubesphere-system
|
||||
type: config.kubesphere.io/custom-resource-filter
|
||||
57
config/ks-core/templates/dynamic-upgrade-job.yaml
Normal file
57
config/ks-core/templates/dynamic-upgrade-job.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
{{- if .Values.upgrade.enabled }}
|
||||
{{- if .Values.upgrade.dynamic }}
|
||||
{{- if .Values.upgrade.config }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ks-upgrade-dynamic-config
|
||||
data:
|
||||
config-patch.yaml: |
|
||||
{{- toYaml .Values.upgrade.config | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: dynamic-upgrade
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: dynamic-upgrade-job
|
||||
image: {{ template "upgrade.image" . }}
|
||||
imagePullPolicy: {{ .Values.upgrade.image.pullPolicy }}
|
||||
command:
|
||||
- ks-upgrade
|
||||
- dynamic-upgrade
|
||||
- --logtostderr=true
|
||||
- --config=/etc/kubesphere/config.yaml
|
||||
{{- if .Values.upgrade.config }}
|
||||
- --config=/etc/kubesphere/config-patch.yaml
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/ks-upgrade
|
||||
name: data
|
||||
{{- if .Values.upgrade.config }}
|
||||
- mountPath: /etc/kubesphere/config-patch.yaml
|
||||
name: config
|
||||
subPath: config-patch.yaml
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.upgrade.persistenceVolume.name }}
|
||||
{{- if .Values.upgrade.config }}
|
||||
- name: config
|
||||
configMap:
|
||||
name: ks-upgrade-dynamic-config
|
||||
defaultMode: 420
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -10,7 +10,6 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: extensions-museum
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: extensions-museum
|
||||
spec:
|
||||
@@ -26,10 +25,12 @@ spec:
|
||||
# force restart ks-apiserver after the upgrade is complete if kubesphere-config changes
|
||||
checksum/cert: {{ sha256sum $cert.Cert }}
|
||||
spec:
|
||||
{{- include "extensions_museum.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: extensions-museum
|
||||
image: {{ template "extensions_museum.image" . }}
|
||||
image: {{ template "extensionRepo.image" . }}
|
||||
command:
|
||||
- "/chartmuseum"
|
||||
- "--storage-local-rootdir"
|
||||
@@ -55,7 +56,6 @@ apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: extensions-museum-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
ca.crt: {{ b64enc $ca.Cert }}
|
||||
@@ -67,7 +67,6 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: extensions-museum
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app: extensions-museum
|
||||
|
||||
@@ -10,14 +10,6 @@ rules:
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- apiGroups:
|
||||
- extensions.kubesphere.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- nonResourceURLs:
|
||||
- '/static/images/*'
|
||||
verbs:
|
||||
@@ -76,6 +68,7 @@ rules:
|
||||
- config.kubesphere.io
|
||||
resources:
|
||||
- configs
|
||||
- platformconfigs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
@@ -256,4 +249,20 @@ rules:
|
||||
- users
|
||||
verbs:
|
||||
- create
|
||||
- list
|
||||
- list
|
||||
|
||||
---
|
||||
apiVersion: iam.kubesphere.io/v1beta1
|
||||
kind: GlobalRole
|
||||
metadata:
|
||||
name: ks-console
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions.kubesphere.io
|
||||
- config.kubesphere.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
@@ -3,7 +3,6 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: ks-console
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
{{- if .Values.internalTLS }}
|
||||
{{- if eq .Values.ingress.ingressClassName "nginx" }}
|
||||
|
||||
@@ -4,10 +4,8 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: ks-agent
|
||||
tier: backend
|
||||
version: {{ .Chart.AppVersion }}
|
||||
name: ks-agent
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
@@ -31,7 +29,9 @@ spec:
|
||||
tier: backend
|
||||
spec:
|
||||
serviceAccountName: {{ template "ks-core.serviceAccountName" . }}
|
||||
{{- include "controller.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -48,7 +48,7 @@ spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: ks-apiserver
|
||||
image: {{ template "ks-apiserver.image" . }}
|
||||
image: {{ template "apiserver.image" . }}
|
||||
imagePullPolicy: {{ .Values.apiserver.image.pullPolicy }}
|
||||
{{- if .Values.apiserver.containerPorts }}
|
||||
ports: {{- include "common.tplvalues.render" (dict "value" .Values.apiserver.containerPorts "context" $) | nindent 12 }}
|
||||
@@ -81,7 +81,7 @@ spec:
|
||||
{{- include "common.tplvalues.render" (dict "value" .Values.apiserver.extraVolumeMounts "context" $) | nindent 12 }}
|
||||
{{- end }}
|
||||
- name: ks-controller-manager
|
||||
image: {{ template "ks-controller-manager.image" . }}
|
||||
image: {{ template "controller.image" . }}
|
||||
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
|
||||
{{- if .Values.controller.containerPorts }}
|
||||
ports: {{- include "common.tplvalues.render" (dict "value" .Values.controller.containerPorts "context" $) | nindent 12 }}
|
||||
@@ -100,7 +100,7 @@ spec:
|
||||
- mountPath: /etc/kubesphere/
|
||||
name: kubesphere-config
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: webhook-secret
|
||||
name: webhook-cert
|
||||
- mountPath: /etc/localtime
|
||||
name: host-time
|
||||
readOnly: true
|
||||
@@ -112,7 +112,7 @@ spec:
|
||||
configMap:
|
||||
name: kubesphere-config
|
||||
defaultMode: 420
|
||||
- name: webhook-secret
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: ks-controller-manager-webhook-cert
|
||||
|
||||
@@ -4,10 +4,8 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: ks-apiserver
|
||||
tier: backend
|
||||
version: {{ .Chart.AppVersion }}
|
||||
name: ks-apiserver
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
@@ -29,7 +27,9 @@ spec:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/kubesphere-config.yaml") . | sha256sum }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "ks-core.serviceAccountName" . }}
|
||||
{{- include "apiserver.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
affinity:
|
||||
{{- with .Values.affinity }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
@@ -62,7 +62,7 @@ spec:
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: ks-apiserver
|
||||
image: {{ template "ks-apiserver.image" . }}
|
||||
image: {{ template "apiserver.image" . }}
|
||||
imagePullPolicy: {{ .Values.apiserver.image.pullPolicy }}
|
||||
{{- if .Values.apiserver.containerPorts }}
|
||||
ports: {{- include "common.tplvalues.render" (dict "value" .Values.apiserver.containerPorts "context" $) | nindent 12 }}
|
||||
@@ -85,7 +85,7 @@ spec:
|
||||
livenessProbe:
|
||||
failureThreshold: 8
|
||||
httpGet:
|
||||
path: /version
|
||||
path: /livez
|
||||
port: 9090
|
||||
{{- if .Values.internalTLS }}
|
||||
scheme: HTTPS
|
||||
@@ -107,14 +107,14 @@ spec:
|
||||
- name: tls-cert
|
||||
mountPath: /etc/kubesphere/pki/
|
||||
{{- end }}
|
||||
{{ if .Values.ha.enabled }}
|
||||
{{- if .Values.ha.enabled }}
|
||||
env:
|
||||
- name: KUBESPHERE_CACHE_OPTIONS_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: redis-secret
|
||||
key: auth
|
||||
{{ end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- configMap:
|
||||
defaultMode: 420
|
||||
@@ -133,4 +133,4 @@ spec:
|
||||
secretName: ks-apiserver-tls-certs
|
||||
defaultMode: 420
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
@@ -31,5 +31,4 @@ data:
|
||||
enableNodeListTerminal: {{ .Values.console.config.enableNodeListTerminal }}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ks-console-config
|
||||
namespace: kubesphere-system
|
||||
name: ks-console-config
|
||||
@@ -1,13 +1,36 @@
|
||||
{{ if eq (include "multicluster.role" .) "host" }}
|
||||
apiVersion: kubesphere.io/v1alpha1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: ks-console
|
||||
namespace: kubesphere-system
|
||||
secrets: []
|
||||
|
||||
---
|
||||
apiVersion: iam.kubesphere.io/v1beta1
|
||||
kind: GlobalRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
iam.kubesphere.io/role-ref: ks-console
|
||||
name: ks-console
|
||||
roleRef:
|
||||
apiGroup: iam.kubesphere.io
|
||||
kind: GlobalRole
|
||||
name: ks-console
|
||||
subjects:
|
||||
- apiGroup: kubesphere.io
|
||||
kind: ServiceAccount
|
||||
name: ks-console
|
||||
namespace: kubesphere-system
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: ks-console
|
||||
tier: frontend
|
||||
version: {{ .Chart.AppVersion }}
|
||||
name: ks-console
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
@@ -27,10 +50,11 @@ spec:
|
||||
annotations:
|
||||
# force restart ks-console after the upgrade is complete if ks-console-config changes
|
||||
checksum/config: {{ include (print $.Template.BasePath "/ks-console-config.yaml") . | sha256sum }}
|
||||
kubesphere.io/serviceaccount-name: ks-console
|
||||
spec:
|
||||
serviceAccount: {{ template "ks-core.serviceAccountName" . }}
|
||||
serviceAccountName: {{ template "ks-core.serviceAccountName" . }}
|
||||
{{- include "console.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -46,7 +70,7 @@ spec:
|
||||
app: ks-console
|
||||
topologyKey: kubernetes.io/hostname
|
||||
namespaces:
|
||||
- {{ .Release.Namespace | quote }}
|
||||
- {{ .Release.Namespace | quote }}
|
||||
{{- else }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
@@ -58,28 +82,28 @@ spec:
|
||||
namespaces:
|
||||
- {{ .Release.Namespace | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" .) | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.internalTLS }}
|
||||
initContainers:
|
||||
- name: init-ca
|
||||
image: {{ template "kubectl.image" . }}
|
||||
command: [ "/bin/sh", "-c" ]
|
||||
args: [ "cp /tmp/ca.crt /usr/local/share/ca-certificates/ \
|
||||
command: ["/bin/sh", "-c"]
|
||||
args: ["cp /tmp/ca.crt /usr/local/share/ca-certificates/ \
|
||||
&& update-ca-certificates && cp /etc/ssl/certs/ca-certificates.crt /certs/"]
|
||||
volumeMounts:
|
||||
- name: tls-cert
|
||||
mountPath: /tmp/ca.crt
|
||||
subPath: ca.crt
|
||||
readOnly: false
|
||||
- name: sys-cert
|
||||
mountPath: /certs
|
||||
readOnly: false
|
||||
- name: tls-cert
|
||||
mountPath: /tmp/ca.crt
|
||||
subPath: ca.crt
|
||||
readOnly: false
|
||||
- name: sys-cert
|
||||
mountPath: /certs
|
||||
readOnly: false
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: ks-console
|
||||
image: {{ template "ks-console.image" . }}
|
||||
image: {{ template "console.image" . }}
|
||||
imagePullPolicy: {{ .Values.console.image.pullPolicy }}
|
||||
{{- if .Values.console.containerPorts }}
|
||||
ports: {{- include "common.tplvalues.render" (dict "value" .Values.console.containerPorts "context" $) | nindent 12 }}
|
||||
@@ -147,14 +171,13 @@ spec:
|
||||
- name: sys-cert
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
---
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: ks-console
|
||||
tier: frontend
|
||||
version: {{ .Chart.AppVersion }}
|
||||
annotations:
|
||||
{{- if .Values.internalTLS }}
|
||||
@@ -165,21 +188,19 @@ metadata:
|
||||
name: ks-console
|
||||
spec:
|
||||
ports:
|
||||
- name: nginx
|
||||
{{- if .Values.internalTLS }}
|
||||
port: 443
|
||||
{{- else }}
|
||||
port: 80
|
||||
- name: nginx
|
||||
{{- if .Values.internalTLS }}
|
||||
port: 443
|
||||
{{- else }}
|
||||
port: 80
|
||||
{{- end }}
|
||||
protocol: TCP
|
||||
targetPort: 8000
|
||||
{{- with .Values.console.nodePort }}
|
||||
nodePort: {{ . }}
|
||||
{{- end }}
|
||||
protocol: TCP
|
||||
targetPort: 8000
|
||||
{{- with .Values.console.nodePort }}
|
||||
nodePort:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: ks-console
|
||||
tier: frontend
|
||||
{{- if .Values.console.nodePort }}
|
||||
type: NodePort
|
||||
{{- else}}
|
||||
|
||||
@@ -4,15 +4,13 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: ks-controller-manager
|
||||
tier: backend
|
||||
version: {{ .Chart.AppVersion }}
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 0
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: {{ if .Values.ha.enabled }}3{{ else }}1{{ end }}
|
||||
@@ -31,7 +29,9 @@ spec:
|
||||
checksum/config: {{ include (print $.Template.BasePath "/kubesphere-config.yaml") . | sha256sum }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "ks-core.serviceAccountName" . }}
|
||||
{{- include "controller.imagePullSecrets" . | nindent 6 }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.nodeSelector "context" $) | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -68,7 +68,7 @@ spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: ks-controller-manager
|
||||
image: {{ template "ks-controller-manager.image" . }}
|
||||
image: {{ template "controller.image" . }}
|
||||
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
|
||||
{{- if .Values.controller.containerPorts }}
|
||||
ports: {{- include "common.tplvalues.render" (dict "value" .Values.controller.containerPorts "context" $) | nindent 12 }}
|
||||
@@ -87,7 +87,7 @@ spec:
|
||||
- mountPath: /etc/kubesphere/
|
||||
name: kubesphere-config
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: webhook-secret
|
||||
name: webhook-cert
|
||||
- mountPath: /etc/localtime
|
||||
name: host-time
|
||||
readOnly: true
|
||||
@@ -99,7 +99,7 @@ spec:
|
||||
configMap:
|
||||
name: kubesphere-config
|
||||
defaultMode: 420
|
||||
- name: webhook-secret
|
||||
- name: webhook-cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: ks-controller-manager-webhook-cert
|
||||
|
||||
@@ -37,14 +37,17 @@ data:
|
||||
multicluster:
|
||||
clusterRole: {{ include "multicluster.role" . | quote }}
|
||||
hostClusterName: {{ include "multicluster.hostClusterName" . | include "validateHostClusterName" | quote }}
|
||||
kubeconfig:
|
||||
# service-account-token client-certificate oidc-token webhook-token
|
||||
authMode: {{ (.Values.kubeconfig).authMode | default "client-certificate" }}
|
||||
terminal:
|
||||
kubectl:
|
||||
image: {{ template "kubectl.image" . }}
|
||||
image: {{ include "kubectl.image" . | quote }}
|
||||
node:
|
||||
image: {{ template "nodeShell.image" . }}
|
||||
image: {{ include "nodeShell.image" . | quote }}
|
||||
uploadFileLimit: 100Mi
|
||||
helmExecutor:
|
||||
image: {{ template "helm.image" . }}
|
||||
image: {{ include "helm.image" . | quote }}
|
||||
timeout: {{ .Values.helmExecutor.timeout }}
|
||||
historyMax: {{ .Values.helmExecutor.historyMax }}
|
||||
jobTTLAfterFinished: {{ .Values.helmExecutor.jobTTLAfterFinished }}
|
||||
|
||||
@@ -13,7 +13,6 @@ stringData:
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: oauthclient-kubesphere
|
||||
namespace: kubesphere-system
|
||||
labels:
|
||||
config.kubesphere.io/type: oauthclient
|
||||
type: config.kubesphere.io/oauthclient
|
||||
|
||||
@@ -10,7 +10,6 @@ data:
|
||||
{{ (.Files.Glob "scripts/post-delete.sh").AsConfig | indent 2 }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
@@ -21,7 +20,6 @@ metadata:
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
@@ -40,7 +38,6 @@ subjects:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@@ -54,6 +51,9 @@ spec:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: "{{ .Release.Name }}-post-delete"
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: post-delete-job
|
||||
image: {{ template "kubectl.image" . }}
|
||||
|
||||
@@ -3,7 +3,7 @@ kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-post-patch-system-ns"
|
||||
annotations:
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed
|
||||
spec:
|
||||
@@ -11,9 +11,12 @@ spec:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: post-patch-system-ns
|
||||
image: {{ template "kubectl.image" . }}
|
||||
image: {{ template "kubectl.image" . }}
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
@@ -23,6 +26,5 @@ spec:
|
||||
do
|
||||
kubectl label ns $ns kubesphere.io/workspace=system-workspace
|
||||
kubectl label ns $ns kubesphere.io/managed=true
|
||||
done
|
||||
kubectl get ns -l 'kubesphere.io/workspace,!kubesphere.io/managed' --no-headers -o custom-columns=NAME:.metadata.name | \
|
||||
xargs -I {} kubectl label ns {} kubesphere.io/managed=true
|
||||
kubectl label ns $ns kubesphere.io/protected-resource=true
|
||||
done
|
||||
40
config/ks-core/templates/post-patch-user-job.yaml
Normal file
40
config/ks-core/templates/post-patch-user-job.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-post-patch-user"
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade
|
||||
"helm.sh/hook-weight": "-4"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded,hook-failed
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: post-patch-user
|
||||
image: {{ template "kubectl.image" . }}
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
#!/bin/bash
|
||||
# Get all users with the specified label
|
||||
kubectl get users -l iam.kubesphere.io/identify-provider \
|
||||
-o custom-columns=\
|
||||
NAME:.metadata.name,\
|
||||
IDP:".metadata.labels['iam\.kubesphere\.io/identify-provider']",\
|
||||
UID:".metadata.labels['iam\.kubesphere\.io/origin-uid']" \
|
||||
--no-headers | while read -r username idp uid; do
|
||||
# Check if variables are not empty and not <none>
|
||||
if [ ! -z "$username" ] && [ ! -z "$idp" ] && [ ! -z "$uid" ] && \
|
||||
[ "$idp" != "<none>" ] && [ "$uid" != "<none>" ]; then
|
||||
# Set annotation
|
||||
annotation_key="iam.kubesphere.io/identity-provider.${idp}"
|
||||
kubectl annotate --overwrite user "${username}" "${annotation_key}=${uid}"
|
||||
echo "Updated user ${username} with annotation ${annotation_key}=${uid}"
|
||||
fi
|
||||
done
|
||||
48
config/ks-core/templates/post-upgrade-job.yaml
Normal file
48
config/ks-core/templates/post-upgrade-job.yaml
Normal file
@@ -0,0 +1,48 @@
|
||||
{{- if .Values.upgrade.enabled }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-post-upgrade
|
||||
annotations:
|
||||
"helm.sh/hook": post-upgrade
|
||||
"helm.sh/hook-weight": "0"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: post-upgrade-job
|
||||
image: {{ template "upgrade.image" . }}
|
||||
imagePullPolicy: {{ .Values.upgrade.image.pullPolicy }}
|
||||
command:
|
||||
- ks-upgrade
|
||||
- post-upgrade
|
||||
- --logtostderr=true
|
||||
- --config=/etc/kubesphere/config.yaml
|
||||
{{- if .Values.upgrade.config }}
|
||||
- --config=/etc/kubesphere/config-patch.yaml
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if .Values.upgrade.config }}
|
||||
- mountPath: /etc/kubesphere/config-patch.yaml
|
||||
name: config
|
||||
subPath: config-patch.yaml
|
||||
{{- end }}
|
||||
- mountPath: /tmp/ks-upgrade
|
||||
name: data
|
||||
volumes:
|
||||
{{- if .Values.upgrade.config }}
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-upgrade-config
|
||||
defaultMode: 420
|
||||
{{- end }}
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.upgrade.persistenceVolume.name }}
|
||||
{{- end }}
|
||||
83
config/ks-core/templates/pre-upgrade-job.yaml
Normal file
83
config/ks-core/templates/pre-upgrade-job.yaml
Normal file
@@ -0,0 +1,83 @@
|
||||
{{- if .Values.upgrade.enabled }}
|
||||
{{- if .Values.upgrade.config }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-upgrade-config
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "-1"
|
||||
data:
|
||||
config-patch.yaml: |
|
||||
{{- toYaml .Values.upgrade.config | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
{{- if not (lookup "v1" "PersistentVolumeClaim" .Release.Namespace .Values.upgrade.persistenceVolume.name) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ .Values.upgrade.persistenceVolume.name }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "-1"
|
||||
labels:
|
||||
app: ks-upgrade
|
||||
version: {{ .Chart.AppVersion }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.upgrade.persistenceVolume.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.upgrade.persistenceVolume.size | quote }}
|
||||
storageClassName: {{ .Values.upgrade.persistenceVolume.storageClassName }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-pre-upgrade
|
||||
annotations:
|
||||
"helm.sh/hook": pre-upgrade
|
||||
"helm.sh/hook-weight": "0"
|
||||
"helm.sh/hook-delete-policy": before-hook-creation
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: pre-upgrade-job
|
||||
image: {{ template "upgrade.image" . }}
|
||||
imagePullPolicy: {{ .Values.upgrade.image.pullPolicy }}
|
||||
command:
|
||||
- ks-upgrade
|
||||
- pre-upgrade
|
||||
- --logtostderr=true
|
||||
- --config=/etc/kubesphere/config.yaml
|
||||
{{- if .Values.upgrade.config }}
|
||||
- --config=/etc/kubesphere/config-patch.yaml
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
{{- if .Values.upgrade.config }}
|
||||
- mountPath: /etc/kubesphere/config-patch.yaml
|
||||
name: config
|
||||
subPath: config-patch.yaml
|
||||
{{- end }}
|
||||
- mountPath: /tmp/ks-upgrade
|
||||
name: data
|
||||
volumes:
|
||||
{{- if .Values.upgrade.config }}
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ .Release.Name }}-upgrade-config
|
||||
defaultMode: 420
|
||||
{{- end }}
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.upgrade.persistenceVolume.name }}
|
||||
{{- end }}
|
||||
52
config/ks-core/templates/prepare-upgrade-job.yaml
Normal file
52
config/ks-core/templates/prepare-upgrade-job.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
{{- if .Values.upgrade.enabled }}
|
||||
{{- if .Values.upgrade.prepare }}
|
||||
{{- if .Values.upgrade.config }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: ks-upgrade-prepare-config
|
||||
data:
|
||||
config-patch.yaml: |
|
||||
{{- toYaml .Values.upgrade.config | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: prepare-upgrade
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
serviceAccountName: {{ include "ks-core.serviceAccountName" . }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: prepare-upgrade-job
|
||||
image: {{ template "upgrade.image" . }}
|
||||
imagePullPolicy: {{ .Values.upgrade.image.pullPolicy }}
|
||||
command:
|
||||
- ks-upgrade
|
||||
- prepare-upgrade
|
||||
- --logtostderr=true
|
||||
- --config=/etc/kubesphere/config.yaml
|
||||
{{- if .Values.upgrade.config }}
|
||||
- --config=/etc/kubesphere/config-patch.yaml
|
||||
{{- end }}
|
||||
{{- if .Values.upgrade.config }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubesphere/config-patch.yaml
|
||||
name: config
|
||||
subPath: config-patch.yaml
|
||||
{{- end }}
|
||||
{{- if .Values.upgrade.config }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: ks-upgrade-prepare-config
|
||||
defaultMode: 420
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -234,8 +234,6 @@ spec:
|
||||
en: Project Settings
|
||||
zh: '项目设置'
|
||||
|
||||
---
|
||||
# category
|
||||
---
|
||||
apiVersion: iam.kubesphere.io/v1beta1
|
||||
kind: Category
|
||||
|
||||
@@ -1569,64 +1569,6 @@ spec:
|
||||
---
|
||||
apiVersion: iam.kubesphere.io/v1beta1
|
||||
kind: RoleTemplate
|
||||
metadata:
|
||||
annotations:
|
||||
iam.kubesphere.io/role-template-rules: '{"workloadtemplates": "view"}'
|
||||
labels:
|
||||
iam.kubesphere.io/aggregate-to-operator: ""
|
||||
iam.kubesphere.io/aggregate-to-viewer: ""
|
||||
iam.kubesphere.io/aggregate-to-regular: ""
|
||||
iam.kubesphere.io/category: namespace-configuration-management
|
||||
iam.kubesphere.io/scope: "namespace"
|
||||
kubesphere.io/managed: "true"
|
||||
name: namespace-view-workloadtemplates
|
||||
spec:
|
||||
description:
|
||||
en: 'View workloadtemplates in the project.'
|
||||
zh: '查看项目中的工作负载模板。'
|
||||
displayName:
|
||||
en: WorkloadTemplate Viewing
|
||||
zh: '工作负载模板查看'
|
||||
rules:
|
||||
- apiGroups:
|
||||
- 'workloadtemplate.kubesphere.io'
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
apiVersion: iam.kubesphere.io/v1beta1
|
||||
kind: RoleTemplate
|
||||
metadata:
|
||||
annotations:
|
||||
iam.kubesphere.io/dependencies: '["namespace-view-workloadtemplates"]'
|
||||
iam.kubesphere.io/role-template-rules: '{"workloadtemplates": "manage"}'
|
||||
labels:
|
||||
iam.kubesphere.io/aggregate-to-operator: ""
|
||||
iam.kubesphere.io/category: namespace-configuration-management
|
||||
iam.kubesphere.io/scope: "namespace"
|
||||
kubesphere.io/managed: "true"
|
||||
name: namespace-manage-workloadtemplates
|
||||
spec:
|
||||
description:
|
||||
en: 'Create, edit, and delete workloadtemplates in the project.'
|
||||
zh: '创建、编辑和删除项目中的工作负载模板。'
|
||||
displayName:
|
||||
en: WorkloadTemplate Management
|
||||
zh: '工作负载模板管理'
|
||||
rules:
|
||||
- apiGroups:
|
||||
- 'workloadtemplate.kubesphere.io'
|
||||
resources:
|
||||
- "*"
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: iam.kubesphere.io/v1beta1
|
||||
kind: RoleTemplate
|
||||
metadata:
|
||||
annotations:
|
||||
iam.kubesphere.io/role-template-rules: '{"secrets": "view"}'
|
||||
@@ -1721,7 +1663,6 @@ metadata:
|
||||
iam.kubesphere.io/dependencies: '["namespace-view-serviceaccount"]'
|
||||
iam.kubesphere.io/role-template-rules: '{"serviceaccounts": "manage"}'
|
||||
labels:
|
||||
iam.kubesphere.io/aggregate-to-operator: ""
|
||||
iam.kubesphere.io/category: namespace-configuration-management
|
||||
iam.kubesphere.io/scope: "namespace"
|
||||
kubesphere.io/managed: "true"
|
||||
|
||||
@@ -3,7 +3,6 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "ks-core.serviceAccountName" . }}
|
||||
namespace: kubesphere-system
|
||||
labels:
|
||||
{{- include "ks-core.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
@@ -45,5 +44,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "ks-core.serviceAccountName" . }}
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/created-by: kubesphere.io/ks-apiserver
|
||||
labels:
|
||||
app: ks-apiserver
|
||||
tier: backend
|
||||
version: {{ .Chart.AppVersion }}
|
||||
name: ks-apiserver
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
@@ -19,8 +15,7 @@ spec:
|
||||
{{- end }}
|
||||
targetPort: 9090
|
||||
{{- with .Values.apiserver.nodePort }}
|
||||
nodePort:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
nodePort: {{ . }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- if eq (include "multicluster.role" .) "host" }}
|
||||
@@ -28,24 +23,20 @@ spec:
|
||||
{{- else }}
|
||||
app: ks-agent
|
||||
{{- end }}
|
||||
tier: backend
|
||||
# version: {{ .Chart.AppVersion }}
|
||||
{{- if .Values.apiserver.nodePort }}
|
||||
type: NodePort
|
||||
{{- else}}
|
||||
type: ClusterIP
|
||||
{{- end}}
|
||||
---
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: ks-controller-manager
|
||||
tier: backend
|
||||
version: {{ .Chart.AppVersion }}
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
@@ -57,7 +48,5 @@ spec:
|
||||
{{- else }}
|
||||
app: ks-agent
|
||||
{{- end }}
|
||||
tier: backend
|
||||
# version: {{ .Chart.AppVersion }}
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
|
||||
@@ -7,7 +7,6 @@ apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ks-apiserver-tls-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
ca.crt: {{ b64enc $ca.Cert }}
|
||||
@@ -22,7 +21,6 @@ apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: ks-console-tls-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
ca.crt: {{ b64enc $ca.Cert }}
|
||||
@@ -37,7 +35,6 @@ apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Values.ingress.tls.secretName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
ca.crt: {{ b64enc $ca.Cert }}
|
||||
|
||||
@@ -4,7 +4,6 @@ apiVersion: traefik.containo.us/v1alpha1
|
||||
kind: ServersTransport
|
||||
metadata:
|
||||
name: ks-console-transport
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serverName: ks-console
|
||||
insecureSkipVerify: false
|
||||
|
||||
@@ -19,7 +19,7 @@ type: Opaque
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: users.iam.kubesphere.io
|
||||
name: validator.user.iam.kubesphere.io
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
@@ -27,7 +27,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-iam-kubesphere-io-v1beta1-user
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
@@ -54,6 +54,45 @@ webhooks:
|
||||
sideEffects: None
|
||||
timeoutSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: defaulter.user.iam.kubesphere.io
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-iam-kubesphere-io-v1beta1-user
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
matchPolicy: Exact
|
||||
name: users.iam.kubesphere.io
|
||||
namespaceSelector: {}
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/managed-by
|
||||
operator: NotIn
|
||||
values:
|
||||
- Helm
|
||||
rules:
|
||||
- apiGroups:
|
||||
- iam.kubesphere.io
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- users
|
||||
scope: '*'
|
||||
sideEffects: None
|
||||
timeoutSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
@@ -66,7 +105,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-kubesphere-io-v1alpha1-installplan
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
@@ -105,7 +144,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-kubesphere-io-v1alpha1-installplan
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
@@ -145,7 +184,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-quota-kubesphere-io-v1alpha2
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
@@ -180,7 +219,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-extensions-kubesphere-io-v1alpha1-jsbundle
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
@@ -208,7 +247,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-extensions-kubesphere-io-v1alpha1-apiservice
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
@@ -235,7 +274,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-extensions-kubesphere-io-v1alpha1-reverseproxy
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
@@ -262,7 +301,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-extensions-kubesphere-io-v1alpha1-extensionentry
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
@@ -296,7 +335,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate--v1-secret
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
@@ -321,6 +360,51 @@ webhooks:
|
||||
sideEffects: None
|
||||
timeoutSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: protector.kubesphere.io
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /resource-protector
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
matchPolicy: Exact
|
||||
name: protector.kubesphere.io
|
||||
namespaceSelector: {}
|
||||
objectSelector:
|
||||
matchExpressions:
|
||||
- key: kubesphere.io/protected-resource
|
||||
operator: Exists
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- namespaces
|
||||
scope: '*'
|
||||
- apiGroups:
|
||||
- "tenant.kubesphere.io"
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- workspacetemplates
|
||||
scope: '*'
|
||||
sideEffects: None
|
||||
timeoutSeconds: 30
|
||||
|
||||
{{- if eq (include "multicluster.role" .) "host" }}
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
@@ -334,7 +418,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-extensions-kubesphere-io-v1alpha1-jsbundle
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
@@ -358,7 +442,6 @@ webhooks:
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
@@ -370,7 +453,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /serviceaccount-pod-injector
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
@@ -403,7 +486,7 @@ webhooks:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: kubesphere-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate--v1-secret
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
@@ -427,3 +510,39 @@ webhooks:
|
||||
scope: '*'
|
||||
sideEffects: None
|
||||
timeoutSeconds: 30
|
||||
|
||||
---
|
||||
{{- if eq (include "multicluster.role" .) "host" }}
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: applications.kubesphere.io
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
caBundle: {{ b64enc $ca.Cert | quote }}
|
||||
service:
|
||||
name: ks-controller-manager
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-application-kubesphere-io-v2-applicationrelease
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: applicationrelease.extensions.kubesphere.io
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- application.kubesphere.io
|
||||
apiVersions:
|
||||
- v2
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- applicationreleases
|
||||
scope: '*'
|
||||
sideEffects: None
|
||||
timeoutSeconds: 30
|
||||
{{- end }}
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
apiVersion: tenant.kubesphere.io/v1beta1
|
||||
kind: WorkspaceTemplate
|
||||
metadata:
|
||||
labels:
|
||||
kubesphere.io/protected-resource: 'true'
|
||||
annotations:
|
||||
kubesphere.io/creator: admin
|
||||
kubesphere.io/description: "system-workspace is a built-in workspace automatically created by KubeSphere. It contains all system components to run KubeSphere."
|
||||
|
||||
@@ -4,6 +4,8 @@ global:
|
||||
imageRegistry: docker.io
|
||||
tag: v4.1.1
|
||||
imagePullSecrets: []
|
||||
# - name: "image-pull-secret"
|
||||
|
||||
|
||||
## @param nameOverride String to partially override common.names.fullname
|
||||
##
|
||||
@@ -113,14 +115,6 @@ apiserver:
|
||||
tag: ""
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally, specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g.:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## @param containerPorts [array] List of container ports to enable in the ks-apiserver container
|
||||
##
|
||||
containerPorts:
|
||||
@@ -166,14 +160,6 @@ console:
|
||||
tag: ""
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally, specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g.:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
config:
|
||||
enableNodeListTerminal: true
|
||||
## @param containerPorts [array] List of container ports to enable in the ks-console container
|
||||
@@ -213,14 +199,6 @@ controller:
|
||||
tag: ""
|
||||
digest: ""
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally, specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## e.g.:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
## @param containerPorts [array] List of container ports to enable in the ks-controller-manager container
|
||||
##
|
||||
containerPorts:
|
||||
@@ -348,14 +326,34 @@ extension:
|
||||
httpPort: 80
|
||||
httpsPort: 443
|
||||
|
||||
hook:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1024Mi
|
||||
requests:
|
||||
cpu: 20m
|
||||
memory: 100Mi
|
||||
upgrade:
|
||||
enabled: false
|
||||
image:
|
||||
registry: ""
|
||||
repository: kubesphere/ks-upgrade
|
||||
tag: ""
|
||||
pullPolicy: IfNotPresent
|
||||
persistenceVolume:
|
||||
name: ks-upgrade
|
||||
storageClassName: ""
|
||||
accessMode: ReadWriteOnce
|
||||
size: 5Gi
|
||||
config: {}
|
||||
# storage:
|
||||
# local:
|
||||
# path: /tmp/ks-upgrade
|
||||
# download:
|
||||
# globalRegistryUrl: "https://extensions-museum.kubesphere-system.svc/charts"
|
||||
# file: {}
|
||||
# http:
|
||||
# timeout: 20
|
||||
# oci: {}
|
||||
# skipValidator: false
|
||||
# jobs:
|
||||
# core:
|
||||
# disabled: false
|
||||
# priority: 10000
|
||||
|
||||
|
||||
ha:
|
||||
enabled: false
|
||||
@@ -438,13 +436,6 @@ ksCRDs:
|
||||
repository: kubesphere/kubectl
|
||||
tag: "v1.27.16"
|
||||
pullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 1024Mi
|
||||
requests:
|
||||
cpu: 20m
|
||||
memory: 100Mi
|
||||
|
||||
# add museum for all ks-extensions
|
||||
ksExtensionRepository:
|
||||
@@ -453,4 +444,4 @@ ksExtensionRepository:
|
||||
registry: ""
|
||||
repository: kubesphere/ks-extensions-museum
|
||||
tag: "latest"
|
||||
pullPolicy: Always
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
@@ -120,6 +120,7 @@ func (s *APIServer) PrepareRun(stopCh <-chan struct{}) error {
|
||||
s.installKubeSphereAPIs()
|
||||
s.installMetricsAPI()
|
||||
s.installHealthz()
|
||||
s.installLivez()
|
||||
if err := s.installOpenAPI(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -198,7 +199,12 @@ func (s *APIServer) installKubeSphereAPIs() {
|
||||
|
||||
// installHealthz creates the healthz endpoint for this server
|
||||
func (s *APIServer) installHealthz() {
|
||||
urlruntime.Must(healthz.InstallHandler(s.container, []healthz.HealthChecker{}...))
|
||||
urlruntime.Must(healthz.InstallHandler(s.container))
|
||||
}
|
||||
|
||||
// installLivez creates the livez endpoint for this server
|
||||
func (s *APIServer) installLivez() {
|
||||
urlruntime.Must(healthz.InstallLivezHandler(s.container))
|
||||
}
|
||||
|
||||
func (s *APIServer) Run(ctx context.Context) (err error) {
|
||||
@@ -262,7 +268,7 @@ func (s *APIServer) buildHandlerChain(handler http.Handler, stopCh <-chan struct
|
||||
default:
|
||||
fallthrough
|
||||
case authorization.RBAC:
|
||||
excludedPaths := []string{"/oauth/*", "/dist/*", "/.well-known/openid-configuration", "/kapis/version", "/version", "/metrics", "/healthz", "/openapi/v2", "/openapi/v3"}
|
||||
excludedPaths := []string{"/oauth/*", "/dist/*", "/.well-known/openid-configuration", "/version", "/metrics", "/livez", "/healthz", "/openapi/v2", "/openapi/v3"}
|
||||
pathAuthorizer, _ := path.NewAuthorizer(excludedPaths)
|
||||
amOperator := am.NewReadOnlyOperator(s.ResourceManager)
|
||||
authorizers = unionauthorizer.New(pathAuthorizer, rbac.NewRBACAuthorizer(amOperator))
|
||||
|
||||
@@ -10,6 +10,7 @@ type Value string
|
||||
|
||||
const (
|
||||
FieldName = "name"
|
||||
FieldNameAndAlias = "nameAndAlias"
|
||||
FieldNames = "names"
|
||||
FieldUID = "uid"
|
||||
FieldCreationTimeStamp = "creationTimestamp"
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/controller/options"
|
||||
"kubesphere.io/kubesphere/pkg/models/composedapp"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"kubesphere.io/kubesphere/pkg/models/terminal"
|
||||
"kubesphere.io/kubesphere/pkg/multicluster"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
@@ -111,6 +112,7 @@ type Config struct {
|
||||
AuthorizationOptions *authorization.Options `json:"authorization,omitempty" yaml:"authorization,omitempty" mapstructure:"authorization"`
|
||||
MultiClusterOptions *multicluster.Options `json:"multicluster,omitempty" yaml:"multicluster,omitempty" mapstructure:"multicluster"`
|
||||
AuditingOptions *auditing.Options `json:"auditing,omitempty" yaml:"auditing,omitempty" mapstructure:"auditing"`
|
||||
KubeconfigOptions *kubeconfig.Options `json:"kubeconfig,omitempty" yaml:"kubeconfig,omitempty" mapstructure:"kubeconfig"`
|
||||
TerminalOptions *terminal.Options `json:"terminal,omitempty" yaml:"terminal,omitempty" mapstructure:"terminal"`
|
||||
HelmExecutorOptions *options.HelmExecutorOptions `json:"helmExecutor,omitempty" yaml:"helmExecutor,omitempty" mapstructure:"helmExecutor"`
|
||||
ExtensionOptions *options.ExtensionOptions `json:"extension,omitempty" yaml:"extension,omitempty" mapstructure:"extension"`
|
||||
@@ -129,6 +131,7 @@ func New() *Config {
|
||||
AuthorizationOptions: authorization.NewOptions(),
|
||||
MultiClusterOptions: multicluster.NewOptions(),
|
||||
TerminalOptions: terminal.NewOptions(),
|
||||
KubeconfigOptions: kubeconfig.NewOptions(),
|
||||
AuditingOptions: auditing.NewAuditingOptions(),
|
||||
HelmExecutorOptions: options.NewHelmExecutorOptions(),
|
||||
ExtensionOptions: options.NewExtensionOptions(),
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authorization"
|
||||
"kubesphere.io/kubesphere/pkg/controller/options"
|
||||
"kubesphere.io/kubesphere/pkg/models/composedapp"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"kubesphere.io/kubesphere/pkg/models/terminal"
|
||||
"kubesphere.io/kubesphere/pkg/multicluster"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/cache"
|
||||
@@ -33,6 +34,7 @@ func newTestConfig() (*Config, error) {
|
||||
AuthenticationOptions: authentication.NewOptions(),
|
||||
MultiClusterOptions: multicluster.NewOptions(),
|
||||
AuditingOptions: auditing.NewAuditingOptions(),
|
||||
KubeconfigOptions: kubeconfig.NewOptions(),
|
||||
TerminalOptions: terminal.NewOptions(),
|
||||
HelmExecutorOptions: options.NewHelmExecutorOptions(),
|
||||
ExtensionOptions: options.NewExtensionOptions(),
|
||||
|
||||
@@ -8,6 +8,7 @@ package constants
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
|
||||
const (
|
||||
SystemWorkspace = "system-workspace"
|
||||
KubeSystemNamespace = "kube-system"
|
||||
KubeSphereNamespace = "kubesphere-system"
|
||||
KubeSphereAPIServerName = "ks-apiserver"
|
||||
@@ -15,6 +16,7 @@ const (
|
||||
KubeSphereConfigMapDataKey = "kubesphere.yaml"
|
||||
KubectlPodNamePrefix = "ks-managed-kubectl"
|
||||
|
||||
ProtectedResourceLabel = "kubesphere.io/protected-resource"
|
||||
WorkspaceLabelKey = "kubesphere.io/workspace"
|
||||
DisplayNameAnnotationKey = "kubesphere.io/alias-name"
|
||||
DescriptionAnnotationKey = "kubesphere.io/description"
|
||||
|
||||
@@ -9,13 +9,16 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
|
||||
erro "errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
appv2 "kubesphere.io/api/application/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
@@ -36,6 +39,7 @@ var _ kscontroller.Controller = &AppCategoryReconciler{}
|
||||
|
||||
type AppCategoryReconciler struct {
|
||||
client.Client
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func (r *AppCategoryReconciler) Name() string {
|
||||
@@ -48,6 +52,7 @@ func (r *AppCategoryReconciler) Enabled(clusterRole string) bool {
|
||||
|
||||
func (r *AppCategoryReconciler) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
r.Client = mgr.GetClient()
|
||||
r.logger = ctrl.Log.WithName("controllers").WithName(categoryController)
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(categoryController).
|
||||
For(&appv2.Category{}).
|
||||
@@ -69,8 +74,8 @@ func (r *AppCategoryReconciler) SetupWithManager(mgr *kscontroller.Manager) erro
|
||||
}
|
||||
|
||||
func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
klog.V(4).Info("reconcile", "app category", req.String())
|
||||
|
||||
r.logger.V(4).Info("reconcile app category", "app category", req.String())
|
||||
logger := r.logger.WithValues("app category", req.String())
|
||||
category := &appv2.Category{}
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, category); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
@@ -78,7 +83,7 @@ func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
return reconcile.Result{}, r.ensureUncategorizedCategory()
|
||||
}
|
||||
// ignore exceptions caused by incorrectly adding app labels.
|
||||
klog.Errorf("not found %s, check if you added the correct app category", req.String())
|
||||
logger.Error(err, "not found, check if you added the correct app category")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
@@ -93,7 +98,7 @@ func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
// our finalizer is present, so lets handle our external dependency
|
||||
// remove our finalizer from the list and update it.
|
||||
if category.Status.Total > 0 {
|
||||
klog.Errorf("can not delete helm category: %s which owns applications", req.String())
|
||||
logger.Error(erro.New("category is using"), "can not delete helm category, in which owns applications")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -107,13 +112,13 @@ func (r *AppCategoryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
appv2.RepoIDLabelKey: appv2.UploadRepoKey,
|
||||
}
|
||||
if err := r.List(ctx, apps, opts); err != nil {
|
||||
klog.Errorf("failed to list apps: %v", err)
|
||||
r.logger.Error(err, "failed to list apps")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if category.Status.Total != len(apps.Items) {
|
||||
category.Status.Total = len(apps.Items)
|
||||
if err := r.Status().Update(ctx, category); err != nil {
|
||||
klog.Errorf("failed to update category status: %v", err)
|
||||
r.logger.Error(err, "failed to update category status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -125,7 +130,7 @@ func (r *AppCategoryReconciler) ensureUncategorizedCategory() error {
|
||||
ctg := &appv2.Category{}
|
||||
err := r.Get(context.TODO(), types.NamespacedName{Name: appv2.UncategorizedCategoryID}, ctg)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
klog.Errorf("failed to get uncategorized category: %v", err)
|
||||
r.logger.Error(err, "failed to get uncategorized category")
|
||||
return err
|
||||
}
|
||||
ctg.Name = appv2.UncategorizedCategoryID
|
||||
|
||||
@@ -13,40 +13,32 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
|
||||
"kubesphere.io/utils/helm"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"kubesphere.io/api/constants"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
|
||||
"kubesphere.io/utils/s3"
|
||||
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/controller"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller/options"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
helmrelease "helm.sh/helm/v3/pkg/release"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
appv2 "kubesphere.io/api/application/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/application"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
"kubesphere.io/api/constants"
|
||||
"kubesphere.io/utils/helm"
|
||||
"kubesphere.io/utils/s3"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/controller"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller/options"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/application"
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -76,9 +68,10 @@ func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
|
||||
r.Client = mgr.GetClient()
|
||||
clusterClientSet, err := clusterclient.NewClusterClientSet(mgr.GetCache())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cluster client set: %v", err)
|
||||
return fmt.Errorf("failed to create cluster client set")
|
||||
}
|
||||
r.clusterClientSet = clusterClientSet
|
||||
r.logger = ctrl.Log.WithName("controllers").WithName(helminstallerController)
|
||||
|
||||
if r.HelmExecutorOptions == nil || r.HelmExecutorOptions.Image == "" {
|
||||
return fmt.Errorf("helm executor options is nil or image is empty")
|
||||
@@ -86,7 +79,7 @@ func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
|
||||
|
||||
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to init store: %v", err)
|
||||
r.logger.Error(err, "failed to init store")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -105,11 +98,11 @@ func (r *AppReleaseReconciler) SetupWithManager(mgr *controller.Manager) error {
|
||||
func (r *AppReleaseReconciler) mapper(ctx context.Context, o client.Object) (requests []reconcile.Request) {
|
||||
cluster := o.(*clusterv1alpha1.Cluster)
|
||||
|
||||
klog.Infof("cluster %s has been deleted", cluster.Name)
|
||||
r.logger.V(4).Info("cluster has been deleted", "cluster", cluster)
|
||||
apprlsList := &appv2.ApplicationReleaseList{}
|
||||
opts := &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.ClusterNameLabelKey: cluster.Name})}
|
||||
if err := r.List(ctx, apprlsList, opts); err != nil {
|
||||
klog.Errorf("failed to list application releases: %v", err)
|
||||
r.logger.Error(err, "failed to list application releases")
|
||||
return requests
|
||||
}
|
||||
for _, apprls := range apprlsList.Items {
|
||||
@@ -124,15 +117,15 @@ type AppReleaseReconciler struct {
|
||||
HelmExecutorOptions *kscontroller.HelmExecutorOptions
|
||||
ossStore s3.Interface
|
||||
cmStore s3.Interface
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
|
||||
apprls := &appv2.ApplicationRelease{}
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
logger := r.logger.WithValues("application release", apprls.Name).WithValues("namespace", apprls.Namespace)
|
||||
timeoutRecheck := apprls.Annotations[appv2.TimeoutRecheck]
|
||||
var reCheck int
|
||||
if timeoutRecheck == "" {
|
||||
@@ -143,12 +136,12 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
|
||||
dstKubeConfig, runClient, err := r.getClusterInfo(apprls.GetRlsCluster())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get cluster info: %v", err)
|
||||
logger.Error(err, "failed to get cluster info")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
executor, err := r.getExecutor(apprls, dstKubeConfig, runClient)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get executor: %v", err)
|
||||
logger.Error(err, "failed to get executor")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
@@ -160,18 +153,18 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
|
||||
helmKubeConfig, err := application.GetHelmKubeConfig(ctx, cluster, runClient)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get helm kubeconfig: %v", err)
|
||||
logger.Error(err, "failed to get helm kubeconfig")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if apierrors.IsNotFound(err) || (err == nil && !cluster.DeletionTimestamp.IsZero()) {
|
||||
klog.Errorf("cluster not found or deleting %s: %v", apprls.GetRlsCluster(), err)
|
||||
logger.Error(err, "cluster not found or deleting", "cluster", apprls.GetRlsCluster())
|
||||
apprls.Status.State = appv2.StatusClusterDeleted
|
||||
apprls.Status.Message = fmt.Sprintf("cluster %s has been deleted", cluster.Name)
|
||||
patch, _ := json.Marshal(apprls)
|
||||
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to update application release")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
@@ -180,7 +173,7 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
if !controllerutil.ContainsFinalizer(apprls, HelmReleaseFinalizer) && apprls.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
expected := apprls.DeepCopy()
|
||||
controllerutil.AddFinalizer(expected, HelmReleaseFinalizer)
|
||||
klog.Infof("add finalizer for apprelease %s", apprls.Name)
|
||||
logger.V(6).Info("add finalizer for application release")
|
||||
return ctrl.Result{}, r.Patch(ctx, expected, client.MergeFrom(apprls))
|
||||
}
|
||||
|
||||
@@ -194,14 +187,14 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
}
|
||||
wait, err := r.cleanJob(ctx, apprls, runClient)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to clean job: %v", err)
|
||||
logger.Error(err, "failed to clean job")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if wait {
|
||||
klog.Infof("job wait, job for %s is still active", apprls.Name)
|
||||
logger.V(6).Info("job wait, job is still active")
|
||||
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
|
||||
}
|
||||
klog.Infof("job for %s has been cleaned", apprls.Name)
|
||||
logger.V(4).WithValues().Info("job has been cleaned")
|
||||
|
||||
if err = r.Client.Get(ctx, req.NamespacedName, apprls); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
@@ -209,10 +202,10 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
apprls.Finalizers = nil
|
||||
err = r.Update(ctx, apprls)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to remove finalizer for apprelease %s: %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to remove finalizer for application release")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("remove finalizer for apprelease %s", apprls.Name)
|
||||
logger.V(6).Info("remove finalizer for application release")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -262,10 +255,10 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
if apprls.Status.State != appv2.StatusTimeout {
|
||||
err = r.updateStatus(ctx, apprls, appv2.StatusTimeout, "Installation timeout")
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to update application release status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("Installation timeout, will check status again after %d second", timeoutVerificationAgain)
|
||||
logger.V(2).Info("installation timeout, will check status again after seconds", "timeout verification again", timeoutVerificationAgain)
|
||||
return ctrl.Result{RequeueAfter: timeoutVerificationAgain * time.Second}, nil
|
||||
}
|
||||
|
||||
@@ -278,15 +271,15 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
patch, _ := json.Marshal(apprls)
|
||||
err = r.Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to update application release")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("update recheck times %s for %s", strconv.Itoa(reCheck+1), apprls.Name)
|
||||
logger.V(2).Info("update recheck times", "recheck times", strconv.Itoa(reCheck+1))
|
||||
|
||||
if deployed {
|
||||
err = r.updateStatus(ctx, apprls, appv2.StatusActive, "StatusActive")
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update apprelease %s %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to update application release")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
@@ -299,7 +292,7 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
err = r.updateStatus(ctx, apprls, appv2.StatusActive, release.Info.Description)
|
||||
return ctrl.Result{}, err
|
||||
default:
|
||||
klog.V(5).Infof("helm release %s/%s status %s, check again after %d second", apprls.GetRlsNamespace(), apprls.Name, release.Info.Status, verificationAgain)
|
||||
r.logger.V(5).Info(fmt.Sprintf("helm release %s/%s status %s, check again after %d seconds", apprls.GetRlsNamespace(), apprls.Name, release.Info.Status, verificationAgain))
|
||||
return ctrl.Result{RequeueAfter: verificationAgain * time.Second}, nil
|
||||
}
|
||||
}
|
||||
@@ -313,12 +306,13 @@ func (r *AppReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) checkJob(ctx context.Context, apprls *appv2.ApplicationRelease, runClient client.Client, release *helmrelease.Release) (ct ctrl.Result, todo bool, err error) {
|
||||
klog.Infof("helm release %s/%s ready to create or upgrade yet,check job %s", apprls.GetRlsNamespace(), apprls.Name, apprls.Status.InstallJobName)
|
||||
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
|
||||
logger.V(4).Info("helm release %s/%s ready to create or upgrade yet,check job %s", apprls.GetRlsNamespace(), apprls.Name, apprls.Status.InstallJobName)
|
||||
|
||||
job := &batchv1.Job{}
|
||||
if err := runClient.Get(ctx, types.NamespacedName{Namespace: apprls.GetRlsNamespace(), Name: apprls.Status.InstallJobName}, job); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Errorf("job %s not found", apprls.Status.InstallJobName)
|
||||
logger.Error(err, "job not found", "install job", apprls.Status.InstallJobName)
|
||||
msg := "deploy failed, job not found"
|
||||
return ctrl.Result{}, false, r.updateStatus(ctx, apprls, appv2.StatusDeployFailed, msg)
|
||||
}
|
||||
@@ -329,7 +323,7 @@ func (r *AppReleaseReconciler) checkJob(ctx context.Context, apprls *appv2.Appli
|
||||
return ctrl.Result{}, false, r.updateStatus(ctx, apprls, appv2.StatusActive, "Upgrade succeeful")
|
||||
}
|
||||
if job.Status.Failed > 0 {
|
||||
klog.Infof("install apprls %s job %s , failed times %d/%d", apprls.Name, job.Name, job.Status.Failed, *job.Spec.BackoffLimit+1)
|
||||
logger.V(2).Info(fmt.Sprintf("install job failed, failed times %d/%d", job.Status.Failed, *job.Spec.BackoffLimit+1), "job", job.Name)
|
||||
}
|
||||
if job.Spec.BackoffLimit != nil && job.Status.Failed > *job.Spec.BackoffLimit {
|
||||
// When in the upgrade state, if job execution fails while the HelmRelease status remains deployed, directly mark the AppRelease as StatusDeployFailed.
|
||||
@@ -344,45 +338,47 @@ func (r *AppReleaseReconciler) checkJob(ctx context.Context, apprls *appv2.Appli
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) removeAll(ctx context.Context, apprls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (ct ctrl.Result, err error) {
|
||||
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
|
||||
err = r.updateStatus(ctx, apprls, appv2.StatusDeleting, "Uninstalling")
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update apprelease %s status : %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to update application release status")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
uninstallJobName, err := r.uninstall(ctx, apprls, executor, kubeconfig)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to uninstall helm release %s: %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to uninstall application release")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.cleanStore(ctx, apprls)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to clean store: %v", err)
|
||||
logger.Error(err, "failed to clean store")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("remove apprelease %s success", apprls.Name)
|
||||
logger.V(4).Info("remove application release success")
|
||||
|
||||
if uninstallJobName != "" {
|
||||
klog.Infof("try to update uninstall apprls job name %s to apprelease %s", uninstallJobName, apprls.Name)
|
||||
logger.V(4).Info("try to update application release uninstall job", "job", uninstallJobName)
|
||||
apprls.Status.UninstallJobName = uninstallJobName
|
||||
apprls.Status.LastUpdate = metav1.Now()
|
||||
patch, _ := json.Marshal(apprls)
|
||||
err = r.Status().Patch(ctx, apprls, client.RawPatch(client.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
klog.Errorf("failed to update apprelease %s: %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to update application release")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
klog.Infof("update uninstall apprls job name %s to apprelease %s success", uninstallJobName, apprls.Name)
|
||||
logger.V(4).Info("update application release uninstall job success", "job", uninstallJobName)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) getClusterDynamicClient(clusterName string, apprls *appv2.ApplicationRelease) (*dynamic.DynamicClient, error) {
|
||||
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
|
||||
clusterClient, err := r.clusterClientSet.GetClusterClient(clusterName)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get cluster client: %v", err)
|
||||
logger.Error(err, "failed to get cluster client", "cluster", clusterName)
|
||||
return nil, err
|
||||
}
|
||||
creator := apprls.Annotations[constants.CreatorAnnotationKey]
|
||||
@@ -392,7 +388,7 @@ func (r *AppReleaseReconciler) getClusterDynamicClient(clusterName string, apprl
|
||||
UserName: creator,
|
||||
}
|
||||
}
|
||||
klog.Infof("DynamicClient impersonate kubeAsUser: %s", creator)
|
||||
logger.V(4).Info("DynamicClient impersonate kubeAsUser", "creator", creator)
|
||||
dynamicClient, err := dynamic.NewForConfig(&conf)
|
||||
return dynamicClient, err
|
||||
}
|
||||
|
||||
73
pkg/controller/application/apprelease_webhook.go
Normal file
73
pkg/controller/application/apprelease_webhook.go
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package application
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
appv2 "kubesphere.io/api/application/v2"
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
var _ admission.CustomValidator = &ReleaseWebhook{}
|
||||
var _ kscontroller.ClusterSelector = &ReleaseWebhook{}
|
||||
var _ kscontroller.Controller = &ReleaseWebhook{}
|
||||
|
||||
type ReleaseWebhook struct {
|
||||
cache.Cache
|
||||
}
|
||||
|
||||
func (a *ReleaseWebhook) Name() string {
|
||||
return "applicationrelease-webhook"
|
||||
}
|
||||
|
||||
func (a *ReleaseWebhook) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
a.Cache = mgr.GetCache()
|
||||
return ctrl.NewWebhookManagedBy(mgr).WithValidator(a).For(&appv2.ApplicationRelease{}).Complete()
|
||||
|
||||
}
|
||||
|
||||
func (a *ReleaseWebhook) Enabled(clusterRole string) bool {
|
||||
return strings.EqualFold(clusterRole, string(clusterv1alpha1.ClusterRoleHost))
|
||||
}
|
||||
|
||||
func (a *ReleaseWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) {
|
||||
return a.validateAppVersionState(ctx, obj.(*appv2.ApplicationRelease))
|
||||
}
|
||||
|
||||
func (a *ReleaseWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (warnings admission.Warnings, err error) {
|
||||
return a.validateAppVersionState(ctx, newObj.(*appv2.ApplicationRelease))
|
||||
}
|
||||
|
||||
func (a *ReleaseWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *ReleaseWebhook) validateAppVersionState(ctx context.Context, release *appv2.ApplicationRelease) (warnings admission.Warnings, err error) {
|
||||
versionID := release.Spec.AppVersionID
|
||||
appVersion := &appv2.ApplicationVersion{}
|
||||
err = a.Get(ctx, types.NamespacedName{Name: versionID}, appVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if appVersion.Status.State != appv2.ReviewStatusActive && release.Status.State != appv2.ReviewStatusPassed {
|
||||
|
||||
return nil, fmt.Errorf("invalid application version: %s, state: %s, for release: %s",
|
||||
versionID, appVersion.Status.State, release.Name)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"kubesphere.io/utils/s3"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/application"
|
||||
@@ -37,6 +38,7 @@ type AppVersionReconciler struct {
|
||||
client.Client
|
||||
ossStore s3.Interface
|
||||
cmStore s3.Interface
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func (r *AppVersionReconciler) Name() string {
|
||||
@@ -49,9 +51,10 @@ func (r *AppVersionReconciler) Enabled(clusterRole string) bool {
|
||||
|
||||
func (r *AppVersionReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
|
||||
r.Client = mgr.GetClient()
|
||||
r.logger = ctrl.Log.WithName("controllers").WithName(appVersionController)
|
||||
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to init store: %v", err)
|
||||
r.logger.Error(err, "failed to init store")
|
||||
return err
|
||||
}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
@@ -66,12 +69,18 @@ func (r *AppVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
if err := r.Client.Get(ctx, req.NamespacedName, appVersion); err != nil {
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
logger := r.logger.WithValues("application version", appVersion.Name)
|
||||
if !controllerutil.ContainsFinalizer(appVersion, appv2.CleanupFinalizer) {
|
||||
controllerutil.RemoveFinalizer(appVersion, appv2.StoreCleanFinalizer)
|
||||
controllerutil.AddFinalizer(appVersion, appv2.CleanupFinalizer)
|
||||
return ctrl.Result{}, r.Update(ctx, appVersion)
|
||||
}
|
||||
|
||||
//Delete app files, non-important logic, errors will not affect the main process
|
||||
if !appVersion.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
err := r.deleteFile(ctx, appVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to clean file for appversion %s: %v", appVersion.Name, err)
|
||||
logger.Error(err, "Failed to clean file")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,32 +88,33 @@ func (r *AppVersionReconciler) Reconcile(ctx context.Context, req ctrl.Request)
|
||||
}
|
||||
|
||||
func (r *AppVersionReconciler) deleteFile(ctx context.Context, appVersion *appv2.ApplicationVersion) error {
|
||||
logger := r.logger.WithValues("application version", appVersion.Name)
|
||||
defer func() {
|
||||
controllerutil.RemoveFinalizer(appVersion, appv2.StoreCleanFinalizer)
|
||||
controllerutil.RemoveFinalizer(appVersion, appv2.CleanupFinalizer)
|
||||
err := r.Update(ctx, appVersion)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to remove finalizer from appversion %s: %v", appVersion.Name, err)
|
||||
logger.Error(err, "Failed to remove finalizer from application version")
|
||||
}
|
||||
klog.Infof("Remove finalizer from appversion %s successfully", appVersion.Name)
|
||||
logger.V(4).Info("Remove finalizer from application version %s successfully")
|
||||
}()
|
||||
|
||||
klog.Infof("ApplicationVersion %s has been deleted, try to clean file", appVersion.Name)
|
||||
logger.V(4).Info("ApplicationVersion has been deleted, try to clean file")
|
||||
id := []string{appVersion.Name}
|
||||
apprls := &appv2.ApplicationReleaseList{}
|
||||
err := r.Client.List(ctx, apprls, client.MatchingLabels{appv2.AppVersionIDLabelKey: appVersion.Name})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list ApplicationRelease: %v", err)
|
||||
logger.Error(err, "Failed to list ApplicationRelease")
|
||||
return err
|
||||
}
|
||||
if len(apprls.Items) > 0 {
|
||||
klog.Infof("ApplicationVersion %s is still in use, keep file in store", appVersion.Name)
|
||||
logger.V(4).Info("ApplicationVersion is still in use, keep file in store")
|
||||
return nil
|
||||
}
|
||||
err = application.FailOverDelete(r.cmStore, r.ossStore, id)
|
||||
if err != nil {
|
||||
klog.Errorf("Fail to delete appversion %s from store: %v", appVersion.Name, err)
|
||||
logger.Error(err, "Fail to delete application version from store")
|
||||
return err
|
||||
}
|
||||
klog.Infof("Delete file %s from store successfully", appVersion.Name)
|
||||
logger.V(4).Info("Delete file from store successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/klog/v2"
|
||||
appv2 "kubesphere.io/api/application/v2"
|
||||
"kubesphere.io/utils/helm"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -22,21 +21,21 @@ import (
|
||||
)
|
||||
|
||||
func (r *AppReleaseReconciler) uninstall(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) (jobName string, err error) {
|
||||
|
||||
klog.Infof("uninstall helm release %s", rls.Name)
|
||||
logger := r.logger.WithValues("application release", rls).WithValues("namespace", rls.Namespace)
|
||||
logger.V(4).Info("uninstall helm release")
|
||||
|
||||
creator := rls.Annotations[constants.CreatorAnnotationKey]
|
||||
klog.Infof("helm impersonate kubeAsUser: %s", creator)
|
||||
logger.V(4).Info("helm impersonate kubeAsUser", "creator", creator)
|
||||
options := []helm.HelmOption{
|
||||
helm.SetNamespace(rls.GetRlsNamespace()),
|
||||
helm.SetKubeconfig(kubeconfig),
|
||||
}
|
||||
|
||||
if jobName, err = executor.Uninstall(ctx, rls.Name, options...); err != nil {
|
||||
klog.Error(err, "failed to force delete helm release")
|
||||
logger.Error(err, "failed to force delete helm release")
|
||||
return jobName, err
|
||||
}
|
||||
klog.Infof("uninstall helm release %s success,job name: %s", rls.Name, jobName)
|
||||
logger.Info("uninstall helm release success", "job", jobName)
|
||||
|
||||
return jobName, nil
|
||||
}
|
||||
@@ -49,12 +48,13 @@ func (r *AppReleaseReconciler) jobStatus(job *batchv1.Job) (active, completed, f
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) createOrUpgradeAppRelease(ctx context.Context, rls *appv2.ApplicationRelease, executor helm.Executor, kubeconfig []byte) error {
|
||||
logger := r.logger.WithValues("application release", rls).WithValues("namespace", rls.Namespace)
|
||||
clusterName := rls.GetRlsCluster()
|
||||
namespace := rls.GetRlsNamespace()
|
||||
klog.Infof("begin to create or upgrade %s app release %s in cluster %s ns: %s", rls.Spec.AppType, rls.Name, clusterName, namespace)
|
||||
logger.V(6).Info("begin to create or upgrade app release", "cluster", clusterName)
|
||||
|
||||
creator := rls.Annotations[constants.CreatorAnnotationKey]
|
||||
klog.Infof("helm impersonate kubeAsUser: %s", creator)
|
||||
logger.V(6).Info("helm impersonate kubeAsUser", "creator", creator)
|
||||
options := []helm.HelmOption{
|
||||
helm.SetInstall(true),
|
||||
helm.SetNamespace(namespace),
|
||||
@@ -66,23 +66,23 @@ func (r *AppReleaseReconciler) createOrUpgradeAppRelease(ctx context.Context, rl
|
||||
if rls.Spec.AppType == appv2.AppTypeHelm {
|
||||
_, err := executor.Get(ctx, rls.Name, options...)
|
||||
if err != nil && err.Error() == "release: not found" {
|
||||
klog.Infof("release %s not found, begin to create", rls.Name)
|
||||
logger.V(4).Info("release not found, begin to create")
|
||||
}
|
||||
if err == nil {
|
||||
klog.Infof("release %s found, begin to upgrade", rls.Name)
|
||||
logger.V(6).Info("release found, begin to upgrade")
|
||||
state = appv2.StatusUpgraded
|
||||
}
|
||||
}
|
||||
|
||||
data, err := application.FailOverGet(r.cmStore, r.ossStore, rls.Spec.AppVersionID, r.Client, true)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get app version data, err: %v", err)
|
||||
logger.Error(err, "failed to get app version data")
|
||||
return err
|
||||
}
|
||||
options = append(options, helm.SetChartData(data))
|
||||
|
||||
if rls.Status.InstallJobName, err = executor.Upgrade(ctx, rls.Name, "", rls.Spec.Values, options...); err != nil {
|
||||
klog.Errorf("failed to create executor job, err: %v", err)
|
||||
logger.Error(err, "failed to create executor job")
|
||||
return r.updateStatus(ctx, rls, appv2.StatusFailed, err.Error())
|
||||
}
|
||||
|
||||
@@ -99,22 +99,23 @@ func (r *AppReleaseReconciler) getExecutor(apprls *appv2.ApplicationRelease, kub
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) getYamlInstaller(runClient client.Client, apprls *appv2.ApplicationRelease) (executor helm.Executor, err error) {
|
||||
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
|
||||
dynamicClient, err := r.getClusterDynamicClient(apprls.GetRlsCluster(), apprls)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get dynamic client: %v", err)
|
||||
logger.Error(err, "failed to get dynamic client")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonList, err := application.ReadYaml(apprls.Spec.Values)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to read yaml: %v", err)
|
||||
logger.Error(err, "failed to read yaml")
|
||||
return nil, err
|
||||
}
|
||||
var gvrListInfo []application.InsInfo
|
||||
for _, i := range jsonList {
|
||||
gvr, utd, err := application.GetInfoFromBytes(i, runClient.RESTMapper())
|
||||
if err != nil {
|
||||
klog.Errorf("failed to get info from bytes: %v", err)
|
||||
logger.Error(err, "failed to get info from bytes")
|
||||
return nil, err
|
||||
}
|
||||
ins := application.InsInfo{
|
||||
@@ -134,6 +135,7 @@ func (r *AppReleaseReconciler) getYamlInstaller(runClient client.Client, apprls
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease, kubeconfig []byte) (executor helm.Executor, err error) {
|
||||
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
|
||||
executorOptions := []helm.ExecutorOption{
|
||||
helm.SetExecutorKubeConfig(kubeconfig),
|
||||
helm.SetExecutorNamespace(apprls.GetRlsNamespace()),
|
||||
@@ -148,7 +150,7 @@ func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease,
|
||||
|
||||
executor, err = helm.NewExecutor(executorOptions...)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to create helm executor: %v", err)
|
||||
logger.Error(err, "failed to create helm executor")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -156,25 +158,25 @@ func (r *AppReleaseReconciler) getHelmExecutor(apprls *appv2.ApplicationRelease,
|
||||
}
|
||||
|
||||
func (r *AppReleaseReconciler) cleanJob(ctx context.Context, apprls *appv2.ApplicationRelease, runClient client.Client) (wait bool, err error) {
|
||||
|
||||
logger := r.logger.WithValues("application release", apprls).WithValues("namespace", apprls.Namespace)
|
||||
jobs := &batchv1.JobList{}
|
||||
|
||||
opts := []client.ListOption{client.InNamespace(apprls.GetRlsNamespace()), client.MatchingLabels{appv2.AppReleaseReferenceLabelKey: apprls.Name}}
|
||||
err = runClient.List(ctx, jobs, opts...)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to list job for %s: %v", apprls.Name, err)
|
||||
logger.Error(err, "failed to list job")
|
||||
return false, err
|
||||
}
|
||||
if len(jobs.Items) == 0 {
|
||||
klog.Infof("cluster: %s namespace: %s no job found for %s", apprls.GetRlsCluster(), apprls.GetRlsNamespace(), apprls.Name)
|
||||
logger.V(6).Info("no job found", "cluster", apprls.GetRlsCluster())
|
||||
return false, nil
|
||||
}
|
||||
klog.Infof("found %d jobs for %s", len(jobs.Items), apprls.Name)
|
||||
logger.V(6).Info("found jobs", "job number", len(jobs.Items))
|
||||
for _, job := range jobs.Items {
|
||||
klog.Infof("begin to clean job %s/%s", job.Namespace, job.Name)
|
||||
logger.V(6).Info("begin to clean job", "namespace", job.Namespace, "job", job.Name)
|
||||
jobActive, jobCompleted, failed := r.jobStatus(&job)
|
||||
if jobActive {
|
||||
klog.Infof("job %s is still active", job.Name)
|
||||
logger.V(6).Info("job is still active", "job", job.Name)
|
||||
return true, nil
|
||||
}
|
||||
if jobCompleted || failed {
|
||||
@@ -182,18 +184,18 @@ func (r *AppReleaseReconciler) cleanJob(ctx context.Context, apprls *appv2.Appli
|
||||
opt := client.DeleteOptions{PropagationPolicy: &deletePolicy}
|
||||
err = runClient.Delete(ctx, &job, &opt)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to delete job %s: %v", job.Name, err)
|
||||
logger.Error(err, "failed to delete job", "job", job.Name)
|
||||
return false, err
|
||||
}
|
||||
klog.Infof("job %s has been deleted", job.Name)
|
||||
logger.V(4).Info("job has been deleted", "job", job.Name)
|
||||
} else {
|
||||
klog.Infof("job:%s status unknown, wait for next reconcile: %v", job.Name, job.Status)
|
||||
logger.V(4).Info("job status unknown, wait for next reconcile", "job", job.Name, "status", job.Status)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
klog.Infof("all job has been deleted")
|
||||
logger.Info("all job has been deleted")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -202,13 +204,13 @@ func (r *AppReleaseReconciler) cleanStore(ctx context.Context, apprls *appv2.App
|
||||
appVersion := &appv2.ApplicationVersion{}
|
||||
err = r.Get(ctx, client.ObjectKey{Name: name}, appVersion)
|
||||
if apierrors.IsNotFound(err) {
|
||||
klog.Infof("appVersion %s has been deleted, cleanup file in oss", name)
|
||||
r.logger.Info("application version has been deleted, cleanup file in oss", "application version", name)
|
||||
err = application.FailOverDelete(r.cmStore, r.ossStore, []string{appVersion.Name})
|
||||
if err != nil {
|
||||
klog.Warningf("failed to cleanup file in oss: %v", err)
|
||||
r.logger.Error(err, "failed to cleanup file in oss")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
klog.Infof("appVersion %s still exists, no need to cleanup file in oss", name)
|
||||
r.logger.V(6).Info("application version still exists, no need to cleanup file in oss", "application version", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,36 +13,31 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"kubesphere.io/api/constants"
|
||||
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
|
||||
"kubesphere.io/utils/s3"
|
||||
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
helmrepo "helm.sh/helm/v3/pkg/repo"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/ptr"
|
||||
appv2 "kubesphere.io/api/application/v2"
|
||||
clusterv1alpha1 "kubesphere.io/api/cluster/v1alpha1"
|
||||
"kubesphere.io/api/constants"
|
||||
tenantv1beta1 "kubesphere.io/api/tenant/v1beta1"
|
||||
"kubesphere.io/utils/s3"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/application"
|
||||
)
|
||||
|
||||
const helmRepoController = "helmrepo"
|
||||
const helmRepoController = "helmrepo-controller"
|
||||
|
||||
var _ reconcile.Reconciler = &RepoReconciler{}
|
||||
var _ kscontroller.Controller = &RepoReconciler{}
|
||||
@@ -52,6 +47,7 @@ type RepoReconciler struct {
|
||||
client.Client
|
||||
ossStore s3.Interface
|
||||
cmStore s3.Interface
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func (r *RepoReconciler) Name() string {
|
||||
@@ -65,11 +61,11 @@ func (r *RepoReconciler) Enabled(clusterRole string) bool {
|
||||
func (r *RepoReconciler) mapper(ctx context.Context, o client.Object) (requests []reconcile.Request) {
|
||||
workspace := o.(*tenantv1beta1.WorkspaceTemplate)
|
||||
|
||||
klog.Infof("workspace %s has been deleted", workspace.Name)
|
||||
r.logger.V(4).Info("workspace has been deleted", "workspace", workspace.Name)
|
||||
repoList := &appv2.RepoList{}
|
||||
opts := &client.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{constants.WorkspaceLabelKey: workspace.Name})}
|
||||
if err := r.List(ctx, repoList, opts); err != nil {
|
||||
klog.Errorf("failed to list repo: %v", err)
|
||||
r.logger.Error(err, "failed to list repo")
|
||||
return requests
|
||||
}
|
||||
for _, repo := range repoList.Items {
|
||||
@@ -81,10 +77,10 @@ func (r *RepoReconciler) mapper(ctx context.Context, o client.Object) (requests
|
||||
func (r *RepoReconciler) SetupWithManager(mgr *kscontroller.Manager) (err error) {
|
||||
r.Client = mgr.GetClient()
|
||||
r.recorder = mgr.GetEventRecorderFor(helmRepoController)
|
||||
|
||||
r.logger = ctrl.Log.WithName("controllers").WithName(helmRepoController)
|
||||
r.cmStore, r.ossStore, err = application.InitStore(mgr.Options.S3Options, r.Client)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to init store: %v", err)
|
||||
r.logger.Error(err, "failed to init store")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -104,30 +100,32 @@ func (r *RepoReconciler) UpdateStatus(ctx context.Context, helmRepo *appv2.Repo)
|
||||
newRepo.Name = helmRepo.Name
|
||||
newRepo.Status.State = helmRepo.Status.State
|
||||
newRepo.Status.LastUpdateTime = metav1.Now()
|
||||
logger := r.logger.WithValues("repo", helmRepo.Name)
|
||||
|
||||
patch, _ := json.Marshal(newRepo)
|
||||
err := r.Status().Patch(ctx, newRepo, client.RawPatch(client.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
klog.Errorf("update status failed, error: %s", err)
|
||||
logger.Error(err, "update status failed")
|
||||
return err
|
||||
}
|
||||
klog.Infof("update repo %s status: %s", helmRepo.GetName(), helmRepo.Status.State)
|
||||
logger.V(4).Info("update repo status", "status", helmRepo.Status.State)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RepoReconciler) skipSync(helmRepo *appv2.Repo) (bool, error) {
|
||||
logger := r.logger.WithValues("repo", helmRepo.Name)
|
||||
if helmRepo.Status.State == appv2.StatusManualTrigger || helmRepo.Status.State == appv2.StatusSyncing {
|
||||
klog.Infof("repo: %s state: %s", helmRepo.GetName(), helmRepo.Status.State)
|
||||
logger.V(4).Info(fmt.Sprintf("repo state: %s", helmRepo.Status.State))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if helmRepo.Spec.SyncPeriod == nil || *helmRepo.Spec.SyncPeriod == 0 {
|
||||
klog.Infof("repo: %s no sync SyncPeriod=0", helmRepo.GetName())
|
||||
logger.V(4).Info("repo no sync SyncPeriod=0")
|
||||
return true, nil
|
||||
}
|
||||
passed := time.Since(helmRepo.Status.LastUpdateTime.Time).Seconds()
|
||||
if helmRepo.Status.State == appv2.StatusSuccessful && passed < float64(*helmRepo.Spec.SyncPeriod) {
|
||||
klog.Infof("last sync time is %s, passed %f, no need to sync, repo: %s", helmRepo.Status.LastUpdateTime, passed, helmRepo.GetName())
|
||||
logger.V(4).Info(fmt.Sprintf("last sync time is %s, passed %f, no need to sync", helmRepo.Status.LastUpdateTime, passed))
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
@@ -155,10 +153,10 @@ func filterVersions(versions []*helmrepo.ChartVersion) []*helmrepo.ChartVersion
|
||||
}
|
||||
|
||||
func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
|
||||
logger := r.logger.WithValues("repo", request.Name)
|
||||
helmRepo := &appv2.Repo{}
|
||||
if err := r.Client.Get(ctx, request.NamespacedName, helmRepo); err != nil {
|
||||
klog.Errorf("get helm repo failed, error: %s", err)
|
||||
logger.Error(err, "get helm repo failed")
|
||||
return reconcile.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
if helmRepo.Status.State == "" {
|
||||
@@ -178,10 +176,10 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
if workspaceName != "" {
|
||||
err := r.Get(ctx, types.NamespacedName{Name: workspaceName}, workspaceTemplate)
|
||||
if apierrors.IsNotFound(err) || (err == nil && !workspaceTemplate.DeletionTimestamp.IsZero()) {
|
||||
klog.Infof("workspace not found or deleting %s %s", workspaceName, err)
|
||||
logger.V(4).Error(err, "workspace not found or deleting", "workspace", workspaceName)
|
||||
err = r.Delete(ctx, helmRepo)
|
||||
if err != nil {
|
||||
klog.Errorf("delete helm repo failed, error: %s", err)
|
||||
logger.Error(err, "delete helm repo failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
@@ -201,13 +199,13 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
err = r.UpdateStatus(ctx, helmRepo)
|
||||
|
||||
if err != nil {
|
||||
klog.Errorf("update status failed, error: %s", err)
|
||||
logger.Error(err, "update status failed")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
index, err := application.LoadRepoIndex(helmRepo.Spec.Url, helmRepo.Spec.Credential)
|
||||
if err != nil {
|
||||
klog.Errorf("load index failed, repo: %s, url: %s, err: %s", helmRepo.GetName(), helmRepo.Spec.Url, err)
|
||||
logger.Error(err, "load index failed", "url", helmRepo.Spec.Url)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -217,7 +215,7 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
}
|
||||
err = r.Client.List(ctx, appList, &opts)
|
||||
if err != nil {
|
||||
klog.Errorf("list appversion failed, error: %s", err)
|
||||
logger.Error(err, "list application failed")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
indexMap := make(map[string]struct{})
|
||||
@@ -228,10 +226,10 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
}
|
||||
for _, i := range appList.Items {
|
||||
if _, exists := indexMap[i.Name]; !exists {
|
||||
klog.Infof("app %s has been removed from the repo", i.Name)
|
||||
logger.V(4).Info("application has been removed from the repo", "application", i.Name)
|
||||
err = r.Client.Delete(ctx, &i)
|
||||
if err != nil {
|
||||
klog.Errorf("delete app %s failed, error: %s", i.Name, err)
|
||||
logger.Error(err, "delete application failed", "application", i.Name)
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -239,22 +237,22 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
|
||||
for appName, versions := range index.Entries {
|
||||
if len(versions) == 0 {
|
||||
klog.Infof("no version found for %s", appName)
|
||||
logger.V(4).Info("no version found for application", "application", appName)
|
||||
continue
|
||||
}
|
||||
|
||||
versions = filterVersions(versions)
|
||||
|
||||
vRequests, err := repoParseRequest(r.Client, versions, helmRepo, appName, appList)
|
||||
vRequests, err := r.repoParseRequest(ctx, versions, helmRepo, appName, appList)
|
||||
if err != nil {
|
||||
klog.Errorf("parse request failed, error: %s", err)
|
||||
logger.Error(err, "parse request failed")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if len(vRequests) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
klog.Infof("found %d/%d versions for %s need to upgrade or create", len(vRequests), len(versions), appName)
|
||||
logger.V(6).Info(fmt.Sprintf("found %d/%d versions for application %s need to upgrade or create", len(vRequests), len(versions), appName))
|
||||
|
||||
own := metav1.OwnerReference{
|
||||
APIVersion: appv2.SchemeGroupVersion.String(),
|
||||
@@ -263,7 +261,7 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
UID: helmRepo.UID,
|
||||
}
|
||||
if err = application.CreateOrUpdateApp(r.Client, vRequests, r.cmStore, r.ossStore, own); err != nil {
|
||||
klog.Errorf("create or update app failed, error: %s", err)
|
||||
logger.Error(err, "create or update app failed")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
@@ -271,7 +269,7 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
helmRepo.Status.State = appv2.StatusSuccessful
|
||||
err = r.UpdateStatus(ctx, helmRepo)
|
||||
if err != nil {
|
||||
klog.Errorf("update status failed, error: %s", err)
|
||||
logger.Error(err, "update status failed")
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
@@ -280,9 +278,10 @@ func (r *RepoReconciler) Reconcile(ctx context.Context, request reconcile.Reques
|
||||
return reconcile.Result{RequeueAfter: requeueAfter}, nil
|
||||
}
|
||||
|
||||
func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRepo *appv2.Repo, appName string, appList *appv2.ApplicationList) (createOrUpdateList []application.AppRequest, err error) {
|
||||
func (r *RepoReconciler) repoParseRequest(ctx context.Context, versions helmrepo.ChartVersions, helmRepo *appv2.Repo, appName string, appList *appv2.ApplicationList) (createOrUpdateList []application.AppRequest, err error) {
|
||||
appVersionList := &appv2.ApplicationVersionList{}
|
||||
|
||||
logger := r.logger.WithValues("repo", helmRepo.Name)
|
||||
appID := fmt.Sprintf("%s-%s", helmRepo.Name, application.GenerateShortNameMD5Hash(appName))
|
||||
opts := client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
@@ -290,9 +289,9 @@ func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRe
|
||||
appv2.AppIDLabelKey: appID,
|
||||
}),
|
||||
}
|
||||
err = cli.List(context.Background(), appVersionList, &opts)
|
||||
err = r.Client.List(ctx, appVersionList, &opts)
|
||||
if err != nil {
|
||||
klog.Errorf("list appversion failed, error: %s", err)
|
||||
logger.Error(err, "list application version failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -310,10 +309,10 @@ func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRe
|
||||
key := fmt.Sprintf("%s-%s", i.GetLabels()[appv2.AppIDLabelKey], LegalVersion)
|
||||
_, exists := versionMap[key]
|
||||
if !exists {
|
||||
klog.Infof("delete appversion %s", i.GetName())
|
||||
err = cli.Delete(context.Background(), &i)
|
||||
logger.V(4).Info("delete application version", "application version", i.GetName())
|
||||
err = r.Client.Delete(ctx, &i)
|
||||
if err != nil {
|
||||
klog.Errorf("delete appversion failed, error: %s", err)
|
||||
logger.Error(err, "delete application version failed")
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
@@ -330,7 +329,7 @@ func repoParseRequest(cli client.Client, versions helmrepo.ChartVersions, helmRe
|
||||
continue
|
||||
}
|
||||
if dig != "" {
|
||||
klog.Infof("digest not match, key: %s, digest: %s, ver.Digest: %s", key, dig, ver.Digest)
|
||||
logger.V(4).Info(fmt.Sprintf("digest not match, key: %s, digest: %s, ver.Digest: %s", key, dig, ver.Digest))
|
||||
}
|
||||
vRequest := generateVRequest(helmRepo, ver, shortName, appName)
|
||||
createOrUpdateList = append(createOrUpdateList, vRequest)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -49,6 +50,11 @@ func (r *Reconciler) Name() string {
|
||||
}
|
||||
|
||||
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
if mgr.KubeconfigOptions.AuthMode != kubeconfig.AuthModeClientCertificate {
|
||||
klog.Infof("Skip %s controller as the auth mode is not client certificate", controllerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
r.recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
r.Client = mgr.GetClient()
|
||||
return builder.
|
||||
|
||||
@@ -459,7 +459,7 @@ func (r *Reconciler) tryFetchKubeSphereVersion(ctx context.Context, cluster *clu
|
||||
port = "443"
|
||||
}
|
||||
response, err := clusterClient.KubernetesClient.CoreV1().Services(constants.KubeSphereNamespace).
|
||||
ProxyGet(scheme, constants.KubeSphereAPIServerName, port, "/kapis/version", nil).
|
||||
ProxyGet(scheme, constants.KubeSphereAPIServerName, port, "/version", nil).
|
||||
DoRaw(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -28,9 +28,11 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
"kubesphere.io/kubesphere/pkg/controller/cluster/predicate"
|
||||
clusterutils "kubesphere.io/kubesphere/pkg/controller/cluster/utils"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"kubesphere.io/kubesphere/pkg/utils/clusterclient"
|
||||
)
|
||||
|
||||
@@ -178,6 +180,11 @@ func (r *Reconciler) assignClusterAdminRole(ctx context.Context, clusterName str
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: username,
|
||||
},
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: fmt.Sprintf(kubeconfig.UserKubeConfigServiceAccountNameFormat, username),
|
||||
Namespace: constants.KubeSphereNamespace,
|
||||
},
|
||||
}
|
||||
clusterRoleBinding.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
|
||||
@@ -9,6 +9,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -117,6 +121,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (reconcile
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.checkServiceAccountRefPod(ctx, sa); err != nil {
|
||||
logger.Error(err, "failed check service account ref pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -180,3 +189,102 @@ func (r *Reconciler) checkSecretToken(secret *v1.Secret, subjectName string) err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) checkServiceAccountRefPod(ctx context.Context, sa *corev1alpha1.ServiceAccount) error {
|
||||
if len(sa.Secrets) == 0 {
|
||||
klog.Warningf("service account %s has no secrets", sa.Name)
|
||||
return nil
|
||||
}
|
||||
pods := &v1.PodList{}
|
||||
if err := r.Client.List(ctx, pods, client.InNamespace(sa.Namespace)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
saSecrets := sa.Secrets[0].Name
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Annotations[AnnotationServiceAccountName] != sa.Name {
|
||||
continue
|
||||
}
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.Name == ServiceAccountVolumeName &&
|
||||
len(volume.Projected.Sources) > 0 &&
|
||||
saSecrets == volume.Projected.Sources[0].Secret.Name {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := r.rolloutRestartPod(ctx, &pod); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) rolloutRestartPod(ctx context.Context, pod *v1.Pod) error {
|
||||
// check ownerReferences
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
klog.Infof("Pod has no owner references")
|
||||
return nil
|
||||
}
|
||||
|
||||
owner := pod.OwnerReferences[0]
|
||||
switch owner.Kind {
|
||||
case "ReplicaSet":
|
||||
rs := &appsv1.ReplicaSet{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{
|
||||
Namespace: pod.Namespace,
|
||||
Name: owner.Name,
|
||||
}, rs); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(rs.OwnerReferences) > 0 && rs.OwnerReferences[0].Kind == "Deployment" {
|
||||
deployName := rs.OwnerReferences[0].Name
|
||||
deploy := &appsv1.Deployment{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{
|
||||
Namespace: pod.Namespace,
|
||||
Name: deployName,
|
||||
}, deploy); err != nil {
|
||||
return err
|
||||
}
|
||||
if deploy.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
deploy.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
deploy.Spec.Template.ObjectMeta.Annotations["kubesphere.io/restartedAt"] = metav1.Now().String()
|
||||
if err := r.Client.Update(ctx, deploy); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "StatefulSet":
|
||||
sts := &appsv1.StatefulSet{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{
|
||||
Namespace: pod.Namespace,
|
||||
Name: owner.Name,
|
||||
}, sts); err != nil {
|
||||
return err
|
||||
}
|
||||
if sts.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
sts.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
sts.Spec.Template.ObjectMeta.Annotations["kubesphere.io/restartedAt"] = metav1.Now().String()
|
||||
if err := r.Client.Update(ctx, sts); err != nil {
|
||||
return err
|
||||
}
|
||||
case "DaemonSet":
|
||||
ds := &appsv1.DaemonSet{}
|
||||
if err := r.Client.Get(ctx, types.NamespacedName{
|
||||
Namespace: pod.Namespace,
|
||||
Name: owner.Name,
|
||||
}, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
if ds.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
ds.Spec.Template.ObjectMeta.Annotations["kubesphere.io/restartedAt"] = metav1.Now().String()
|
||||
if err := r.Client.Update(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
klog.Warningf("Unsupported owner kind %s", owner.Kind)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
certificatesv1 "k8s.io/api/certificates/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/rest"
|
||||
@@ -24,19 +25,17 @@ import (
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"kubesphere.io/kubesphere/pkg/utils/pkiutil"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"kubesphere.io/kubesphere/pkg/utils/pkiutil"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -50,7 +49,8 @@ var _ reconcile.Reconciler = &Reconciler{}
|
||||
// Reconciler reconciles a User object
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
config *rest.Config
|
||||
config *rest.Config
|
||||
options *kubeconfig.Options
|
||||
}
|
||||
|
||||
func (r *Reconciler) Name() string {
|
||||
@@ -60,6 +60,7 @@ func (r *Reconciler) Name() string {
|
||||
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
r.Client = mgr.GetClient()
|
||||
r.config = mgr.K8sClient.Config()
|
||||
r.options = mgr.KubeconfigOptions
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
Named(controllerName).
|
||||
WithOptions(controller.Options{MaxConcurrentReconciles: 1}).
|
||||
@@ -92,7 +93,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco
|
||||
|
||||
func (r *Reconciler) UpdateSecret(ctx context.Context, secret *corev1.Secret) error {
|
||||
// already exist and cert will not expire in 3 days
|
||||
if isValid(secret) {
|
||||
if r.isValid(secret) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -146,15 +147,24 @@ func (r *Reconciler) UpdateSecret(ctx context.Context, secret *corev1.Secret) er
|
||||
return err
|
||||
}
|
||||
|
||||
if err = r.createCSR(ctx, username); err != nil {
|
||||
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
|
||||
return err
|
||||
if r.options.AuthMode == kubeconfig.AuthModeClientCertificate {
|
||||
if err = r.createCSR(ctx, username); err != nil {
|
||||
klog.Errorf("Failed to create CSR for user %s: %v", username, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.options.AuthMode == kubeconfig.AuthModeServiceAccountToken {
|
||||
if err = r.createServiceAccount(ctx, username); err != nil {
|
||||
klog.Errorf("Failed to create sa for user %s: %v", username, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValid(secret *corev1.Secret) bool {
|
||||
func (r *Reconciler) isValid(secret *corev1.Secret) bool {
|
||||
username := secret.Labels[constants.UsernameLabelKey]
|
||||
|
||||
data := secret.Data[kubeconfig.FileName]
|
||||
@@ -169,14 +179,19 @@ func isValid(secret *corev1.Secret) bool {
|
||||
}
|
||||
|
||||
if authInfo, ok := config.AuthInfos[username]; ok {
|
||||
clientCert, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to parse client certificate for user %s: %v", username, err)
|
||||
return false
|
||||
if r.options.AuthMode == kubeconfig.AuthModeServiceAccountToken && authInfo.Token != "" {
|
||||
return true
|
||||
}
|
||||
for _, cert := range clientCert {
|
||||
if cert.NotAfter.After(time.Now().Add(residual)) {
|
||||
return true
|
||||
if r.options.AuthMode == kubeconfig.AuthModeClientCertificate {
|
||||
clientCert, err := certutil.ParseCertsPEM(authInfo.ClientCertificateData)
|
||||
if err != nil {
|
||||
klog.Warningf("Failed to parse client certificate for user %s: %v", username, err)
|
||||
return false
|
||||
}
|
||||
for _, cert := range clientCert {
|
||||
if cert.NotAfter.After(time.Now().Add(residual)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -247,3 +262,43 @@ func (r *Reconciler) createCSR(ctx context.Context, username string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) createServiceAccount(ctx context.Context, username string) error {
|
||||
saName := fmt.Sprintf("kubesphere.users.%s", username)
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: saName,
|
||||
Namespace: constants.KubeSphereNamespace,
|
||||
Labels: map[string]string{constants.UsernameLabelKey: username},
|
||||
},
|
||||
}
|
||||
|
||||
if err := r.Create(ctx, sa); err != nil {
|
||||
if !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("Failed to create service account for user %s: %v", username, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s.token", saName),
|
||||
Namespace: constants.KubeSphereNamespace,
|
||||
Annotations: map[string]string{
|
||||
corev1.ServiceAccountNameKey: saName,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
constants.UsernameLabelKey: username,
|
||||
},
|
||||
},
|
||||
Type: corev1.SecretTypeServiceAccountToken,
|
||||
}
|
||||
|
||||
if err := r.Create(ctx, secret); err != nil {
|
||||
if !errors.IsAlreadyExists(err) {
|
||||
klog.Errorf("Failed to create service account for user %s: %v", username, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
tenantv1alpha1 "kubesphere.io/api/tenant/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
@@ -200,5 +201,22 @@ func (r *Reconciler) cleanUp(ctx context.Context, namespace *corev1.Namespace) e
|
||||
if err := r.DeleteAllOf(ctx, roleBinding, client.InNamespace(namespace.Name)); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete role bindings")
|
||||
}
|
||||
updated := namespace.DeepCopy()
|
||||
modified := false
|
||||
newOwnerReferences := make([]metav1.OwnerReference, 0, len(updated.OwnerReferences))
|
||||
for _, owner := range updated.OwnerReferences {
|
||||
if owner.Kind != tenantv1alpha1.ResourceKindWorkspace {
|
||||
newOwnerReferences = append(newOwnerReferences, owner)
|
||||
} else {
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
|
||||
if modified {
|
||||
updated.OwnerReferences = newOwnerReferences
|
||||
if err := r.Patch(ctx, updated, client.MergeFrom(namespace)); err != nil {
|
||||
return errors.Wrapf(err, "failed to cleanup ownerReferences for namespace %s", namespace.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/authentication"
|
||||
"kubesphere.io/kubesphere/pkg/models/composedapp"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
"kubesphere.io/kubesphere/pkg/models/terminal"
|
||||
"kubesphere.io/kubesphere/pkg/multicluster"
|
||||
"kubesphere.io/kubesphere/pkg/simple/client/k8s"
|
||||
@@ -24,6 +25,7 @@ type Options struct {
|
||||
KubernetesOptions *k8s.Options
|
||||
AuthenticationOptions *authentication.Options
|
||||
MultiClusterOptions *multicluster.Options
|
||||
KubeconfigOptions *kubeconfig.Options
|
||||
TerminalOptions *terminal.Options
|
||||
ComposedAppOptions *composedapp.Options
|
||||
HelmExecutorOptions *HelmExecutorOptions
|
||||
|
||||
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package resourceprotection
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
)
|
||||
|
||||
const webhookName = "resource-protection-webhook"
|
||||
|
||||
func (w *Webhook) Name() string {
|
||||
return webhookName
|
||||
}
|
||||
|
||||
type Webhook struct {
|
||||
client.Client
|
||||
}
|
||||
|
||||
func (w *Webhook) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
w.Client = mgr.GetClient()
|
||||
mgr.GetWebhookServer().Register("/resource-protector", &webhook.Admission{Handler: w})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Webhook) Handle(ctx context.Context, req admission.Request) admission.Response {
|
||||
if req.Operation == admissionv1.Delete {
|
||||
gvr := req.RequestResource
|
||||
gvk, err := w.RESTMapper().KindFor(schema.GroupVersionResource{
|
||||
Group: gvr.Group,
|
||||
Version: gvr.Version,
|
||||
Resource: gvr.Resource,
|
||||
})
|
||||
if err != nil {
|
||||
return webhook.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
obj := &unstructured.Unstructured{}
|
||||
obj.SetGroupVersionKind(gvk)
|
||||
if err = w.Get(ctx, client.ObjectKey{Namespace: req.Namespace, Name: req.Name}, obj); err != nil {
|
||||
return webhook.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
if obj.GetLabels()[constants.ProtectedResourceLabel] == "true" {
|
||||
return webhook.Denied("this resource may not be deleted")
|
||||
}
|
||||
}
|
||||
return admission.Allowed("")
|
||||
}
|
||||
@@ -86,10 +86,12 @@ func (r *Reconciler) syncToKubernetes(ctx context.Context, roleBinding *iamv1bet
|
||||
for _, subject := range roleBinding.Subjects {
|
||||
newSubject := rbacv1.Subject{
|
||||
Kind: subject.Kind,
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Name: subject.Name,
|
||||
Namespace: subject.Namespace,
|
||||
}
|
||||
if subject.APIGroup != "" {
|
||||
newSubject.APIGroup = rbacv1.GroupName
|
||||
}
|
||||
subjects = append(subjects, newSubject)
|
||||
}
|
||||
k8sRolBinding.Subjects = subjects
|
||||
|
||||
@@ -9,19 +9,21 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
rbacutils "kubesphere.io/kubesphere/pkg/utils/rbac"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -56,8 +58,7 @@ func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
|
||||
|
||||
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := r.logger.WithValues("serivceaccount", req.NamespacedName)
|
||||
// ctx := context.Background()
|
||||
logger := r.logger.WithValues("serviceaccount", req.NamespacedName)
|
||||
sa := &corev1.ServiceAccount{}
|
||||
if err := r.Get(ctx, req.NamespacedName, sa); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
@@ -73,19 +74,38 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) getReferenceRole(ctx context.Context, roleName, namespace string) (*rbacv1.Role, error) {
|
||||
refRole := &rbacv1.Role{}
|
||||
refRoleName := rbacutils.RelatedK8sResourceName(roleName)
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: refRoleName, Namespace: namespace}, refRole); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if refRole.Labels[iamv1beta1.RoleReferenceLabel] != roleName {
|
||||
return nil, apierrors.NewNotFound(rbacv1.Resource("roles"), refRoleName)
|
||||
}
|
||||
return refRole, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) CreateOrUpdateRoleBinding(ctx context.Context, logger logr.Logger, sa *corev1.ServiceAccount) error {
|
||||
roleName := sa.Annotations[iamv1beta1.RoleAnnotation]
|
||||
if roleName == "" {
|
||||
return nil
|
||||
}
|
||||
var role rbacv1.Role
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: roleName, Namespace: sa.Namespace}, &role); err != nil {
|
||||
return err
|
||||
|
||||
role, err := r.getReferenceRole(ctx, roleName, sa.Namespace)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.V(4).Info("related role not found", "namespace", sa.Namespace, "role", roleName)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "cannot get reference role %s/%s", sa.Namespace, roleName)
|
||||
}
|
||||
|
||||
// Delete existing rolebindings.
|
||||
saRoleBinding := &rbacv1.RoleBinding{}
|
||||
_ = r.Client.DeleteAllOf(ctx, saRoleBinding, client.InNamespace(sa.Namespace), client.MatchingLabels{iamv1beta1.ServiceAccountReferenceLabel: sa.Name})
|
||||
if err = r.DeleteAllOf(ctx, saRoleBinding, client.InNamespace(sa.Namespace), client.MatchingLabels{iamv1beta1.ServiceAccountReferenceLabel: sa.Name}); err != nil {
|
||||
return errors.Wrapf(err, "failed to delete RoleBindings for %s/%s", sa.Namespace, sa.Name)
|
||||
}
|
||||
|
||||
saRoleBinding = &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -95,8 +115,8 @@ func (r *Reconciler) CreateOrUpdateRoleBinding(ctx context.Context, logger logr.
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: iamv1beta1.ResourceKindRole,
|
||||
Name: roleName,
|
||||
Kind: "Role",
|
||||
Name: role.Name,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
@@ -108,14 +128,12 @@ func (r *Reconciler) CreateOrUpdateRoleBinding(ctx context.Context, logger logr.
|
||||
}
|
||||
|
||||
if err := controllerutil.SetControllerReference(sa, saRoleBinding, r.Scheme()); err != nil {
|
||||
logger.Error(err, "set controller reference failed")
|
||||
return err
|
||||
return errors.Wrapf(err, "failed to set controller reference for RoleBinding %s/%s", sa.Namespace, saRoleBinding.Name)
|
||||
}
|
||||
|
||||
logger.V(4).Info("create ServiceAccount rolebinding", "ServiceAccount", sa.Name)
|
||||
if err := r.Client.Create(ctx, saRoleBinding); err != nil {
|
||||
logger.Error(err, "create rolebinding failed")
|
||||
return err
|
||||
if err := r.Create(ctx, saRoleBinding); err != nil {
|
||||
return errors.Wrapf(err, "failed to create RoleBinding %s/%s", sa.Namespace, saRoleBinding.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,17 +13,15 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
iamv1beta1 "kubesphere.io/api/iam/v1beta1"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/utils/k8sutil"
|
||||
)
|
||||
|
||||
@@ -37,6 +35,7 @@ var _ = Describe("ServiceAccount", func() {
|
||||
saName = "test-serviceaccount"
|
||||
saNamespace = "default"
|
||||
saRole = "test-role"
|
||||
refRole = "kubesphere:iam:test-role"
|
||||
)
|
||||
var role *rbacv1.Role
|
||||
var sa *corev1.ServiceAccount
|
||||
@@ -45,8 +44,11 @@ var _ = Describe("ServiceAccount", func() {
|
||||
BeforeEach(func() {
|
||||
role = &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: saRole,
|
||||
Name: refRole,
|
||||
Namespace: saNamespace,
|
||||
Labels: map[string]string{
|
||||
iamv1beta1.RoleReferenceLabel: saRole,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -74,7 +76,6 @@ var _ = Describe("ServiceAccount", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
reconciler := &Reconciler{
|
||||
//nolint:staticcheck
|
||||
Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build(),
|
||||
logger: ctrl.Log.WithName("controllers").WithName("serviceaccount"),
|
||||
recorder: record.NewFakeRecorder(5),
|
||||
@@ -89,16 +90,15 @@ var _ = Describe("ServiceAccount", func() {
|
||||
By("Expecting to bind role successfully")
|
||||
rolebindings := &rbacv1.RoleBindingList{}
|
||||
Expect(func() bool {
|
||||
reconciler.List(ctx, rolebindings, client.InNamespace(sa.Namespace), client.MatchingLabels{iamv1beta1.ServiceAccountReferenceLabel: sa.Name})
|
||||
_ = reconciler.List(ctx, rolebindings, client.InNamespace(sa.Namespace), client.MatchingLabels{iamv1beta1.ServiceAccountReferenceLabel: sa.Name})
|
||||
return len(rolebindings.Items) == 1 && k8sutil.IsControlledBy(rolebindings.Items[0].OwnerReferences, "ServiceAccount", saName)
|
||||
}()).Should(BeTrue())
|
||||
})
|
||||
|
||||
It("Should report NotFound error when role doesn't exist", func() {
|
||||
It("Should not report NotFound error when role doesn't exist", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
reconciler := &Reconciler{
|
||||
//nolint:staticcheck
|
||||
Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build(),
|
||||
logger: ctrl.Log.WithName("controllers").WithName("serviceaccount"),
|
||||
recorder: record.NewFakeRecorder(5),
|
||||
@@ -106,7 +106,7 @@ var _ = Describe("ServiceAccount", func() {
|
||||
|
||||
Expect(reconciler.Create(ctx, sa)).Should(Succeed())
|
||||
_, err := reconciler.Reconcile(ctx, req)
|
||||
Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
Expect(err).Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package serviceaccounttoken
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
|
||||
kscontroller "kubesphere.io/kubesphere/pkg/controller"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerName = "service-account-token"
|
||||
userKubeConfigSecretNameFormat = "kubeconfig-%s"
|
||||
kubeconfigFileName = "config"
|
||||
)
|
||||
|
||||
var _ kscontroller.Controller = &Reconciler{}
|
||||
var _ reconcile.Reconciler = &Reconciler{}
|
||||
|
||||
type Reconciler struct {
|
||||
client.Client
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *Reconciler) Name() string {
|
||||
return controllerName
|
||||
}
|
||||
|
||||
func (r *Reconciler) SetupWithManager(mgr *kscontroller.Manager) error {
|
||||
if mgr.KubeconfigOptions.AuthMode != kubeconfig.AuthModeServiceAccountToken {
|
||||
klog.Infof("Skip %s controller as the auth mode is not service account token", controllerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
r.recorder = mgr.GetEventRecorderFor(controllerName)
|
||||
r.Client = mgr.GetClient()
|
||||
return builder.
|
||||
ControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{},
|
||||
builder.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool {
|
||||
if object.GetNamespace() == constants.KubeSphereNamespace && object.GetLabels()[constants.UsernameLabelKey] != "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})),
|
||||
).
|
||||
Named(controllerName).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
secret := &corev1.Secret{}
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if username := secret.Labels[constants.UsernameLabelKey]; username != "" {
|
||||
if secret.Data == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
token := secret.Data["token"]
|
||||
|
||||
if len(token) > 0 {
|
||||
if err := r.UpdateKubeConfigServiceAccountToken(ctx, username, string(token)); err != nil {
|
||||
// kubeconfig not generated
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.recorder.Event(secret, corev1.EventTypeNormal, kscontroller.Synced, kscontroller.MessageResourceSynced)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *Reconciler) UpdateKubeConfigServiceAccountToken(ctx context.Context, username string, token string) error {
|
||||
secretName := fmt.Sprintf(userKubeConfigSecretNameFormat, username)
|
||||
kubeconfigSecret := &corev1.Secret{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: constants.KubeSphereNamespace, Name: secretName}, kubeconfigSecret); err != nil {
|
||||
return client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
kubeconfigSecret = applyToken(kubeconfigSecret, token)
|
||||
|
||||
if err := r.Update(ctx, kubeconfigSecret); err != nil {
|
||||
klog.Errorf("Failed to update secret %s: %v", secretName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func applyToken(secret *corev1.Secret, token string) *corev1.Secret {
|
||||
data := secret.Data[kubeconfigFileName]
|
||||
kubeconfig, err := clientcmd.Load(data)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return secret
|
||||
}
|
||||
|
||||
username := secret.Labels[constants.UsernameLabelKey]
|
||||
kubeconfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{
|
||||
username: {
|
||||
Token: token,
|
||||
},
|
||||
}
|
||||
|
||||
data, err = clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return secret
|
||||
}
|
||||
|
||||
secret.StringData = map[string]string{kubeconfigFileName: string(data)}
|
||||
return secret
|
||||
}
|
||||
@@ -121,14 +121,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
// Cloning and volumeSnapshot support only available for CSI drivers.
|
||||
// However, the CSIDriver object is optional and may not be present for some CSI storage systems.
|
||||
isCSIStorage := r.hasCSIDriver(ctx, storageClass)
|
||||
// Annotate storageClass
|
||||
storageClassUpdated := storageClass.DeepCopy()
|
||||
if isCSIStorage {
|
||||
r.updateSnapshotAnnotation(storageClassUpdated, isCSIStorage)
|
||||
r.updateCloneVolumeAnnotation(storageClassUpdated, isCSIStorage)
|
||||
} else {
|
||||
r.removeAnnotations(storageClassUpdated)
|
||||
r.updateSnapshotAnnotation(storageClassUpdated, true)
|
||||
r.updateCloneVolumeAnnotation(storageClassUpdated, true)
|
||||
}
|
||||
|
||||
pvcCount, err := r.countPersistentVolumeClaims(ctx, storageClass)
|
||||
@@ -174,11 +173,6 @@ func (r *Reconciler) updateCloneVolumeAnnotation(storageClass *storagev1.Storage
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reconciler) removeAnnotations(storageClass *storagev1.StorageClass) {
|
||||
delete(storageClass.Annotations, annotationAllowClone)
|
||||
delete(storageClass.Annotations, annotationAllowSnapshot)
|
||||
}
|
||||
|
||||
func (r *Reconciler) countPersistentVolumeClaims(ctx context.Context, storageClass *storagev1.StorageClass) (int, error) {
|
||||
pvcs := &corev1.PersistentVolumeClaimList{}
|
||||
if err := r.List(ctx, pvcs); err != nil {
|
||||
|
||||
@@ -37,7 +37,7 @@ func (h *appHandler) exampleCr(req *restful.Request, resp *restful.Response) {
|
||||
func convertCRDToCR(crd v1.CustomResourceDefinition) (dstCr unstructured.Unstructured, err error) {
|
||||
|
||||
cr := unstructured.Unstructured{}
|
||||
cr.SetName(fmt.Sprintf("%s-Instance", crd.Spec.Names.Singular))
|
||||
cr.SetName(fmt.Sprintf("%s-set-your-own-name", crd.Spec.Names.Singular))
|
||||
cr.SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: crd.Spec.Group,
|
||||
Kind: crd.Spec.Names.Kind,
|
||||
|
||||
@@ -38,7 +38,7 @@ func (h *handler) AddToContainer(container *restful.Container) error {
|
||||
versionFunc := func(request *restful.Request, response *restful.Response) {
|
||||
ksVersion := version.Get()
|
||||
ksVersion.Kubernetes = h.k8sVersionInfo
|
||||
response.WriteAsJson(ksVersion)
|
||||
_ = response.WriteAsJson(ksVersion)
|
||||
}
|
||||
legacy.Route(legacy.GET("/version").
|
||||
To(versionFunc).
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
|
||||
"kubesphere.io/kubesphere/pkg/api"
|
||||
"kubesphere.io/kubesphere/pkg/apiserver/query"
|
||||
"kubesphere.io/kubesphere/pkg/constants"
|
||||
"kubesphere.io/kubesphere/pkg/models/kubeconfig"
|
||||
resourcev1beta1 "kubesphere.io/kubesphere/pkg/models/resources/v1beta1"
|
||||
"kubesphere.io/kubesphere/pkg/utils/sliceutil"
|
||||
)
|
||||
@@ -588,6 +590,11 @@ func (am *amOperator) CreateOrUpdateNamespaceRoleBinding(username string, namesp
|
||||
APIGroup: iamv1beta1.SchemeGroupVersion.Group,
|
||||
Name: username,
|
||||
},
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: fmt.Sprintf(kubeconfig.UserKubeConfigServiceAccountNameFormat, username),
|
||||
Namespace: constants.KubeSphereNamespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: iamv1beta1.SchemeGroupVersion.Group,
|
||||
@@ -637,6 +644,11 @@ func (am *amOperator) CreateOrUpdateClusterRoleBinding(username string, role str
|
||||
APIGroup: iamv1beta1.SchemeGroupVersion.Group,
|
||||
Name: username,
|
||||
},
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: fmt.Sprintf(kubeconfig.UserKubeConfigServiceAccountNameFormat, username),
|
||||
Namespace: constants.KubeSphereNamespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: iamv1beta1.SchemeGroupVersion.Group,
|
||||
|
||||
@@ -18,14 +18,15 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
ConfigTypeKubeConfig = "kubeconfig"
|
||||
SecretTypeKubeConfig = "config.kubesphere.io/" + ConfigTypeKubeConfig
|
||||
FileName = "config"
|
||||
DefaultClusterName = "local"
|
||||
DefaultNamespace = "default"
|
||||
InClusterCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
PrivateKeyAnnotation = "kubesphere.io/private-key"
|
||||
UserKubeConfigSecretNameFormat = "kubeconfig-%s"
|
||||
ConfigTypeKubeConfig = "kubeconfig"
|
||||
SecretTypeKubeConfig = "config.kubesphere.io/" + ConfigTypeKubeConfig
|
||||
FileName = "config"
|
||||
DefaultClusterName = "local"
|
||||
DefaultNamespace = "default"
|
||||
InClusterCAFilePath = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
PrivateKeyAnnotation = "kubesphere.io/private-key"
|
||||
UserKubeConfigSecretNameFormat = "kubeconfig-%s"
|
||||
UserKubeConfigServiceAccountNameFormat = "kubesphere.users.%s"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
|
||||
23
pkg/models/kubeconfig/options.go
Normal file
23
pkg/models/kubeconfig/options.go
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Please refer to the LICENSE file in the root directory of the project.
|
||||
* https://github.com/kubesphere/kubesphere/blob/master/LICENSE
|
||||
*/
|
||||
|
||||
package kubeconfig
|
||||
|
||||
const (
|
||||
AuthModeServiceAccountToken AuthMode = "service-account-token"
|
||||
AuthModeClientCertificate AuthMode = "client-certificate"
|
||||
AuthModeOIDCToken AuthMode = "oidc-token"
|
||||
AuthModeWebhookToken AuthMode = "webhook-token"
|
||||
)
|
||||
|
||||
type AuthMode string
|
||||
|
||||
type Options struct {
|
||||
AuthMode AuthMode `json:"authMode" yaml:"authMode" mapstructure:"authMode"`
|
||||
}
|
||||
|
||||
func NewOptions() *Options {
|
||||
return &Options{AuthMode: AuthModeClientCertificate}
|
||||
}
|
||||
@@ -109,7 +109,7 @@ func DefaultObjectMetaFilter(item metav1.ObjectMeta, filter query.Filter) bool {
|
||||
}
|
||||
return false
|
||||
// /namespaces?page=1&limit=10&name=default
|
||||
case query.FieldName:
|
||||
case query.FieldName, query.FieldNameAndAlias:
|
||||
displayName := item.GetAnnotations()[constants.DisplayNameAnnotationKey]
|
||||
if displayName != "" && strings.Contains(displayName, string(filter.Value)) {
|
||||
return true
|
||||
|
||||
@@ -137,7 +137,7 @@ func DefaultObjectMetaFilter(item metav1.Object, filter query.Filter) bool {
|
||||
}
|
||||
return false
|
||||
// /namespaces?page=1&limit=10&name=default
|
||||
case query.FieldName:
|
||||
case query.FieldName, query.FieldNameAndAlias:
|
||||
displayName := item.GetAnnotations()[constants.DisplayNameAnnotationKey]
|
||||
if displayName != "" && strings.Contains(strings.ToLower(displayName), strings.ToLower(string(filter.Value))) {
|
||||
return true
|
||||
|
||||
@@ -21,29 +21,24 @@ import (
|
||||
)
|
||||
|
||||
func AddToContainer(container *restful.Container, path string, checks ...HealthChecker) error {
|
||||
if len(checks) == 0 {
|
||||
klog.V(4).Info("No default health checks specified. Installing the ping handler.")
|
||||
checks = []HealthChecker{PingHealthz}
|
||||
}
|
||||
name := strings.Split(strings.TrimPrefix(path, "/"), "/")[0]
|
||||
container.Handle(path, handleRootHealth(name, nil, checks...))
|
||||
|
||||
for _, check := range checks {
|
||||
container.Handle(fmt.Sprintf("%s/%v", path, check.Name()), adaptCheckToHandler(check))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func InstallHandler(container *restful.Container, checks ...HealthChecker) error {
|
||||
if len(checks) == 0 {
|
||||
klog.V(4).Info("No default health checks specified. Installing the ping handler.")
|
||||
checks = []HealthChecker{PingHealthz}
|
||||
}
|
||||
return AddToContainer(container, "/healthz", checks...)
|
||||
}
|
||||
|
||||
func InstallLivezHandler(container *restful.Container, checks ...HealthChecker) error {
|
||||
if len(checks) == 0 {
|
||||
klog.V(4).Info("No default health checks specified. Installing the ping handler.")
|
||||
checks = []HealthChecker{PingHealthz}
|
||||
}
|
||||
return AddToContainer(container, "/livez", checks...)
|
||||
}
|
||||
|
||||
|
||||
@@ -219,7 +219,7 @@ func CreateOrUpdateAppVersion(ctx context.Context, client runtimeclient.Client,
|
||||
Maintainer: vRequest.Maintainers,
|
||||
PullUrl: vRequest.PullUrl,
|
||||
}
|
||||
appVersion.Finalizers = []string{appv2.StoreCleanFinalizer}
|
||||
appVersion.Finalizers = []string{appv2.CleanupFinalizer}
|
||||
|
||||
labels := appVersion.GetLabels()
|
||||
if labels == nil {
|
||||
|
||||
@@ -38,6 +38,7 @@ const (
|
||||
MaxImageWidth = 128
|
||||
ApplicationNamespace = "extension-openpitrix"
|
||||
StoreCleanFinalizer = "storeCleanFinalizer.application.kubesphere.io"
|
||||
CleanupFinalizer = "application.kubesphere.io/cleanup"
|
||||
SystemWorkspace = "system-workspace"
|
||||
// App review status: draft, submitted, passed, rejected, suspended, active
|
||||
ReviewStatusDraft = "draft"
|
||||
|
||||
Reference in New Issue
Block a user