diff --git a/config/ks-core/templates/globalroles.yaml b/config/ks-core/templates/globalroles.yaml index 49b393ecf..728955d5c 100644 --- a/config/ks-core/templates/globalroles.yaml +++ b/config/ks-core/templates/globalroles.yaml @@ -10,14 +10,6 @@ rules: verbs: - get - list - - apiGroups: - - extensions.kubesphere.io - resources: - - '*' - verbs: - - get - - list - - watch - nonResourceURLs: - '/static/images/*' verbs: @@ -256,4 +248,20 @@ rules: - users verbs: - create - - list \ No newline at end of file + - list + +--- +apiVersion: iam.kubesphere.io/v1beta1 +kind: GlobalRole +metadata: + name: ks-console +rules: + - apiGroups: + - extensions.kubesphere.io + - config.kubesphere.io + resources: + - '*' + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/config/ks-core/templates/ks-console.yaml b/config/ks-core/templates/ks-console.yaml index 91705f6cf..655d67d24 100644 --- a/config/ks-core/templates/ks-console.yaml +++ b/config/ks-core/templates/ks-console.yaml @@ -1,4 +1,29 @@ {{ if eq (include "multicluster.role" .) "host" }} +apiVersion: kubesphere.io/v1alpha1 +kind: ServiceAccount +metadata: + name: ks-console + namespace: kubesphere-system +secrets: [] + +--- +apiVersion: iam.kubesphere.io/v1beta1 +kind: GlobalRoleBinding +metadata: + labels: + iam.kubesphere.io/role-ref: ks-console + name: ks-console +roleRef: + apiGroup: iam.kubesphere.io + kind: GlobalRole + name: ks-console +subjects: + - apiGroup: kubesphere.io + kind: ServiceAccount + name: ks-console + namespace: kubesphere-system + +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -25,6 +50,7 @@ spec: annotations: # force restart ks-console after the upgrade is complete if ks-console-config changes checksum/config: {{ include (print $.Template.BasePath "/ks-console-config.yaml") . | sha256sum }} + kubesphere.io/serviceaccount-name: ks-console spec: {{- if .Values.global.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 8 }} diff --git a/pkg/controller/ksserviceaccount/serviceaccount_controller.go b/pkg/controller/ksserviceaccount/serviceaccount_controller.go index 8942edfac..4a83789d9 100644 --- a/pkg/controller/ksserviceaccount/serviceaccount_controller.go +++ b/pkg/controller/ksserviceaccount/serviceaccount_controller.go @@ -9,6 +9,10 @@ import ( "context" "fmt" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -117,6 +121,11 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (reconcile return ctrl.Result{}, err } + if err := r.checkServiceAccountRefPod(ctx, sa); err != nil { + logger.Error(err, "failed check service account ref pod") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil } @@ -180,3 +189,102 @@ func (r *Reconciler) checkSecretToken(secret *v1.Secret, subjectName string) err } return nil } + +func (r *Reconciler) checkServiceAccountRefPod(ctx context.Context, sa *corev1alpha1.ServiceAccount) error { + if len(sa.Secrets) == 0 { + klog.Warningf("service account %s has no secrets", sa.Name) + return nil + } + pods := &v1.PodList{} + if err := r.Client.List(ctx, pods, client.InNamespace(sa.Namespace)); err != nil { + return err + } + + saSecrets := sa.Secrets[0].Name + for _, pod := range pods.Items { + if pod.Annotations[AnnotationServiceAccountName] != sa.Name { + continue + } + for _, volume := range pod.Spec.Volumes { + if volume.Name == ServiceAccountVolumeName && + len(volume.Projected.Sources) > 0 && + saSecrets == volume.Projected.Sources[0].Secret.Name { + continue + } + } + if err := r.rolloutRestartPod(ctx, &pod); err != nil { + return nil + } + } + return nil +} + +func (r *Reconciler) rolloutRestartPod(ctx context.Context, pod *v1.Pod) error { + // check ownerReferences + if len(pod.OwnerReferences) == 0 { + klog.Infof("Pod has no owner references") + return nil + } + + owner := pod.OwnerReferences[0] + switch owner.Kind { + case "ReplicaSet": + rs := &appsv1.ReplicaSet{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Namespace: pod.Namespace, + Name: owner.Name, + }, rs); err != nil { + return err + } + if len(rs.OwnerReferences) > 0 && rs.OwnerReferences[0].Kind == "Deployment" { + deployName := rs.OwnerReferences[0].Name + deploy := &appsv1.Deployment{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Namespace: pod.Namespace, + Name: deployName, + }, deploy); err != nil { + return err + } + if deploy.Spec.Template.ObjectMeta.Annotations == nil { + deploy.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + deploy.Spec.Template.ObjectMeta.Annotations["kubesphere.io/restartedAt"] = metav1.Now().String() + if err := r.Client.Update(ctx, deploy); err != nil { + return err + } + } + case "StatefulSet": + sts := &appsv1.StatefulSet{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Namespace: pod.Namespace, + Name: owner.Name, + }, sts); err != nil { + return err + } + if sts.Spec.Template.ObjectMeta.Annotations == nil { + sts.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + sts.Spec.Template.ObjectMeta.Annotations["kubesphere.io/restartedAt"] = metav1.Now().String() + if err := r.Client.Update(ctx, sts); err != nil { + return err + } + case "DaemonSet": + ds := &appsv1.DaemonSet{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Namespace: pod.Namespace, + Name: owner.Name, + }, ds); err != nil { + return err + } + if ds.Spec.Template.ObjectMeta.Annotations == nil { + ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string) + } + ds.Spec.Template.ObjectMeta.Annotations["kubesphere.io/restartedAt"] = metav1.Now().String() + if err := r.Client.Update(ctx, ds); err != nil { + return err + } + default: + klog.Warningf("Unsupported owner kind %s", owner.Kind) + } + return nil +}