Merge pull request #4063 from yuswift/update-gofmt
fix gofmt typo and spelling typo and shellcheck typo to pass the prow ci
This commit is contained in:
@@ -51,7 +51,7 @@ spec:
|
|||||||
description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file'
|
description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file'
|
||||||
properties:
|
properties:
|
||||||
addHost:
|
addHost:
|
||||||
description: AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,muliple hosts can be added by using multiple --add-host
|
description: AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,multiple hosts can be added by using multiple --add-host
|
||||||
items:
|
items:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
type: array
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ KubeSphere Roadmap demonstrates a list of open source product development plans
|
|||||||
|
|
||||||
### Observability
|
### Observability
|
||||||
|
|
||||||
- [x] Utilizing existing Promethues stack setup. [#3068](https://github.com/kubesphere/kubesphere/issues/3068) [#1164](https://github.com/kubesphere/ks-installer/pull/1164) [Guide](https://kubesphere.io/docs/faq/observability/byop/)
|
- [x] Utilizing existing Prometheus stack setup. [#3068](https://github.com/kubesphere/kubesphere/issues/3068) [#1164](https://github.com/kubesphere/ks-installer/pull/1164) [Guide](https://kubesphere.io/docs/faq/observability/byop/)
|
||||||
|
|
||||||
#### Custom monitoring [#3067](https://github.com/kubesphere/kubesphere/issues/3067)
|
#### Custom monitoring [#3067](https://github.com/kubesphere/kubesphere/issues/3067)
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
ARCH=`uname -m`
|
ARCH=$(uname -m)
|
||||||
if [ "$ARCH" == "x86_64" ]; then
|
if [ "$ARCH" == "x86_64" ]; then
|
||||||
echo "x86_64"
|
echo "x86_64"
|
||||||
wget https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz
|
wget https://storage.googleapis.com/etcd/"${ETCD_VERSION}"/etcd-"${ETCD_VERSION}"-linux-amd64.tar.gz
|
||||||
tar xvf etcd-${ETCD_VERSION}-linux-amd64.tar.gz && \
|
tar xvf etcd-"${ETCD_VERSION}"-linux-amd64.tar.gz && \
|
||||||
mv etcd-${ETCD_VERSION}-linux-amd64/etcd /usr/local/bin/etcd
|
mv etcd-"${ETCD_VERSION}"-linux-amd64/etcd /usr/local/bin/etcd
|
||||||
elif [ "$ARCH" == "aarch64" ]; then
|
elif [ "$ARCH" == "aarch64" ]; then
|
||||||
echo "arm arch"
|
echo "arm arch"
|
||||||
wget https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-arm64.tar.gz
|
wget https://storage.googleapis.com/etcd/"${ETCD_VERSION}"/etcd-"${ETCD_VERSION}"-linux-arm64.tar.gz
|
||||||
tar xvf etcd-${ETCD_VERSION}-linux-arm64.tar.gz && \
|
tar xvf etcd-"${ETCD_VERSION}"-linux-arm64.tar.gz && \
|
||||||
mv etcd-${ETCD_VERSION}-linux-arm64/etcd /usr/local/bin/etcd
|
mv etcd-"${ETCD_VERSION}"-linux-arm64/etcd /usr/local/bin/etcd
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
ARCH=`uname -m`
|
ARCH=$(uname -m)
|
||||||
if [ "$ARCH" == "x86_64" ]; then
|
if [ "$ARCH" == "x86_64" ]; then
|
||||||
echo "x86_64"
|
echo "x86_64"
|
||||||
wget https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz && \
|
wget https://get.helm.sh/helm-"${HELM_VERSION}"-linux-amd64.tar.gz && \
|
||||||
tar xvf helm-${HELM_VERSION}-linux-amd64.tar.gz && \
|
tar xvf helm-"${HELM_VERSION}"-linux-amd64.tar.gz && \
|
||||||
rm helm-${HELM_VERSION}-linux-amd64.tar.gz && \
|
rm helm-"${HELM_VERSION}"-linux-amd64.tar.gz && \
|
||||||
mv linux-amd64/helm /usr/bin/ && \
|
mv linux-amd64/helm /usr/bin/ && \
|
||||||
rm -rf linux-amd64
|
rm -rf linux-amd64
|
||||||
elif [ "$ARCH" == "aarch64" ]; then
|
elif [ "$ARCH" == "aarch64" ]; then
|
||||||
echo "arm arch"
|
echo "arm arch"
|
||||||
wget https://get.helm.sh/helm-${HELM_VERSION}-linux-arm64.tar.gz && \
|
wget https://get.helm.sh/helm-"${HELM_VERSION}"-linux-arm64.tar.gz && \
|
||||||
tar xvf helm-${HELM_VERSION}-linux-arm64.tar.gz && \
|
tar xvf helm-"${HELM_VERSION}"-linux-arm64.tar.gz && \
|
||||||
rm helm-${HELM_VERSION}-linux-arm64.tar.gz && \
|
rm helm-"${HELM_VERSION}"-linux-arm64.tar.gz && \
|
||||||
mv linux-arm64/helm /usr/bin/ && \
|
mv linux-arm64/helm /usr/bin/ && \
|
||||||
rm -rf linux-arm64
|
rm -rf linux-arm64
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
|
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
ARCH=`uname -m`
|
ARCH=$(uname -m)
|
||||||
if [ "$ARCH" == "x86_64" ]; then
|
if [ "$ARCH" == "x86_64" ]; then
|
||||||
echo "x86_64"
|
echo "x86_64"
|
||||||
wget https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz && \
|
wget https://dl.k8s.io/"${KUBE_VERSION}"/kubernetes-server-linux-amd64.tar.gz && \
|
||||||
tar xvf kubernetes-server-linux-amd64.tar.gz &&
|
tar xvf kubernetes-server-linux-amd64.tar.gz &&
|
||||||
mv kubernetes /usr/local/
|
mv kubernetes /usr/local/
|
||||||
elif [ "$ARCH" == "aarch64" ]; then
|
elif [ "$ARCH" == "aarch64" ]; then
|
||||||
echo "arm arch"
|
echo "arm arch"
|
||||||
wget https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-arm64.tar.gz && \
|
wget https://dl.k8s.io/"${KUBE_VERSION}"/kubernetes-server-linux-arm64.tar.gz && \
|
||||||
tar xvf kubernetes-server-linux-arm64.tar.gz &&
|
tar xvf kubernetes-server-linux-arm64.tar.gz &&
|
||||||
mv kubernetes /usr/local/
|
mv kubernetes /usr/local/
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
ARCH=`uname -m`
|
ARCH=$(uname -m)
|
||||||
if [ "$ARCH" == "x86_64" ]; then
|
if [ "$ARCH" == "x86_64" ]; then
|
||||||
echo "x86_64"
|
echo "x86_64"
|
||||||
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz && \
|
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F"${KUSTOMIZE_VERSION}"/kustomize_"${KUSTOMIZE_VERSION}"_linux_amd64.tar.gz && \
|
||||||
tar xvf kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz && \
|
tar xvf kustomize_"${KUSTOMIZE_VERSION}"_linux_amd64.tar.gz && \
|
||||||
rm kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz && \
|
rm kustomize_"${KUSTOMIZE_VERSION}"_linux_amd64.tar.gz && \
|
||||||
mv kustomize /usr/bin
|
mv kustomize /usr/bin
|
||||||
elif [ "$ARCH" == "aarch64" ]; then
|
elif [ "$ARCH" == "aarch64" ]; then
|
||||||
echo "arm arch"
|
echo "arm arch"
|
||||||
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_arm64.tar.gz && \
|
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F"${KUSTOMIZE_VERSION}"/kustomize_"${KUSTOMIZE_VERSION}"_linux_arm64.tar.gz && \
|
||||||
tar xvf kustomize_${KUSTOMIZE_VERSION}_linux_arm64.tar.gz && \
|
tar xvf kustomize_"${KUSTOMIZE_VERSION}"_linux_arm64.tar.gz && \
|
||||||
rm kustomize_${KUSTOMIZE_VERSION}_linux_arm64.tar.gz && \
|
rm kustomize_"${KUSTOMIZE_VERSION}"_linux_arm64.tar.gz && \
|
||||||
mv kustomize /usr/bin
|
mv kustomize /usr/bin
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -24,13 +24,13 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
|||||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||||
|
|
||||||
VERBOSE=${VERBOSE:-"0"}
|
VERBOSE=${VERBOSE:-"0"}
|
||||||
V=""
|
# V=""
|
||||||
if [[ "${VERBOSE}" == "1" ]];then
|
if [[ "${VERBOSE}" == "1" ]];then
|
||||||
V="-x"
|
# V="-x"
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
# ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
|
||||||
OUTPUT_DIR=bin
|
OUTPUT_DIR=bin
|
||||||
BUILDPATH=./${1:?"path to build"}
|
BUILDPATH=./${1:?"path to build"}
|
||||||
@@ -44,5 +44,5 @@ LDFLAGS=$(kube::version::ldflags)
|
|||||||
time GOOS=${BUILD_GOOS} GOARCH=${BUILD_GOARCH} ${GOBINARY} test \
|
time GOOS=${BUILD_GOOS} GOARCH=${BUILD_GOARCH} ${GOBINARY} test \
|
||||||
-c \
|
-c \
|
||||||
-ldflags "${LDFLAGS}" \
|
-ldflags "${LDFLAGS}" \
|
||||||
-o ${OUT} \
|
-o "${OUT}" \
|
||||||
${BUILDPATH}
|
"${BUILDPATH}"
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ set -o pipefail
|
|||||||
|
|
||||||
function wait_for_installation_finish() {
|
function wait_for_installation_finish() {
|
||||||
echo "waiting for ks-installer pod ready"
|
echo "waiting for ks-installer pod ready"
|
||||||
kubectl -n kubesphere-system wait --timeout=180s --for=condition=Ready $(kubectl -n kubesphere-system get pod -l app=ks-install -oname)
|
kubectl -n kubesphere-system wait --timeout=180s --for=condition=Ready "$(kubectl -n kubesphere-system get pod -l app=ks-install -oname)"
|
||||||
echo "waiting for KubeSphere ready"
|
echo "waiting for KubeSphere ready"
|
||||||
while IFS= read -r line; do
|
while IFS= read -r line; do
|
||||||
if [[ $line =~ "Welcome to KubeSphere" ]]
|
if [[ $line =~ "Welcome to KubeSphere" ]]
|
||||||
@@ -45,7 +45,7 @@ fi
|
|||||||
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml
|
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml
|
||||||
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml
|
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml
|
||||||
|
|
||||||
#TODO: override ks-apiserver and ks-controller-manager images with specific tag
|
#TODO: override ks-apiserver and ks-controller-manager images with specific tag
|
||||||
|
|
||||||
kubectl apply -f kubesphere-installer.yaml
|
kubectl apply -f kubesphere-installer.yaml
|
||||||
kubectl apply -f cluster-configuration.yaml
|
kubectl apply -f cluster-configuration.yaml
|
||||||
@@ -53,4 +53,4 @@ kubectl apply -f cluster-configuration.yaml
|
|||||||
wait_for_installation_finish
|
wait_for_installation_finish
|
||||||
|
|
||||||
# Expose KubeSphere API Server
|
# Expose KubeSphere API Server
|
||||||
kubectl -n kubesphere-system patch svc ks-apiserver -p '{"spec":{"type":"NodePort","ports":[{"name":"ks-apiserver","port":80,"protocal":"TCP","targetPort":9090,"nodePort":30881}]}}'
|
kubectl -n kubesphere-system patch svc ks-apiserver -p '{"spec":{"type":"NodePort","ports":[{"name":"ks-apiserver","port":80,"protocol":"TCP","targetPort":9090,"nodePort":30881}]}}'
|
||||||
|
|||||||
@@ -21,16 +21,16 @@ CONTAINER_BUILDER=${CONTAINER_BUILDER:-build}
|
|||||||
TARGETOS=${TARGETOS:-$(kube::util::host_os)}
|
TARGETOS=${TARGETOS:-$(kube::util::host_os)}
|
||||||
TARGETARCH=${TARGETARCH:-$(kube::util::host_arch)}
|
TARGETARCH=${TARGETARCH:-$(kube::util::host_arch)}
|
||||||
|
|
||||||
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
|
${CONTAINER_CLI} "${CONTAINER_BUILDER}" \
|
||||||
--build-arg TARGETARCH=${TARGETARCH} \
|
--build-arg TARGETARCH="${TARGETARCH}" \
|
||||||
--build-arg TARGETOS=${TARGETOS} \
|
--build-arg TARGETOS="${TARGETOS}" \
|
||||||
-f build/ks-apiserver/Dockerfile \
|
-f build/ks-apiserver/Dockerfile \
|
||||||
-t "${REPO}"/ks-apiserver:"${TAG}" .
|
-t "${REPO}"/ks-apiserver:"${TAG}" .
|
||||||
|
|
||||||
|
|
||||||
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
|
|
||||||
--build-arg TARGETARCH=${TARGETARCH} \
|
${CONTAINER_CLI} "${CONTAINER_BUILDER}" \
|
||||||
--build-arg TARGETOS=${TARGETOS} \
|
--build-arg "TARGETARCH=${TARGETARCH}" \
|
||||||
|
--build-arg "TARGETOS=${TARGETOS}" \
|
||||||
-f build/ks-controller-manager/Dockerfile \
|
-f build/ks-controller-manager/Dockerfile \
|
||||||
-t "${REPO}"/ks-controller-manager:"${TAG}" .
|
-t "${REPO}"/ks-controller-manager:"${TAG}" .
|
||||||
|
|
||||||
|
|||||||
@@ -23,13 +23,13 @@ fi
|
|||||||
# supported platforms
|
# supported platforms
|
||||||
PLATFORMS=linux/amd64,linux/arm64
|
PLATFORMS=linux/amd64,linux/arm64
|
||||||
|
|
||||||
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
|
"${CONTAINER_CLI} ${CONTAINER_BUILDER}" \
|
||||||
--platform ${PLATFORMS} \
|
--platform ${PLATFORMS} \
|
||||||
${PUSH} \
|
${PUSH} \
|
||||||
-f build/ks-apiserver/Dockerfile \
|
-f build/ks-apiserver/Dockerfile \
|
||||||
-t "${REPO}"/ks-apiserver:"${TAG}" .
|
-t "${REPO}"/ks-apiserver:"${TAG}" .
|
||||||
|
|
||||||
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
|
"${CONTAINER_CLI} ${CONTAINER_BUILDER}" \
|
||||||
--platform ${PLATFORMS} \
|
--platform ${PLATFORMS} \
|
||||||
${PUSH} \
|
${PUSH} \
|
||||||
-f build/ks-controller-manager/Dockerfile \
|
-f build/ks-controller-manager/Dockerfile \
|
||||||
|
|||||||
@@ -36,15 +36,15 @@ while [[ $# -gt 0 ]]; do
|
|||||||
shift
|
shift
|
||||||
done
|
done
|
||||||
|
|
||||||
[ -z ${service} ] && service=webhook-service
|
[ -z "${service}" ] && service=webhook-service
|
||||||
[ -z ${namespace} ] && namespace=default
|
[ -z "${namespace}" ] && namespace=default
|
||||||
|
|
||||||
if [ ! -x "$(command -v openssl)" ]; then
|
if [ ! -x "$(command -v openssl)" ]; then
|
||||||
echo "openssl not found"
|
echo "openssl not found"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
csrName=${service}.${namespace}
|
# csrName=${service}.${namespace}
|
||||||
CERTSDIR="config/certs"
|
CERTSDIR="config/certs"
|
||||||
|
|
||||||
if [ ! -d ${CERTSDIR} ]; then
|
if [ ! -d ${CERTSDIR} ]; then
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ set -o errexit
|
|||||||
set -o nounset
|
set -o nounset
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
GOPATH=`go env GOPATH`
|
GOPATH=$(go env GOPATH)
|
||||||
# generate-groups generates everything for a project with external types only, e.g. a project based
|
# generate-groups generates everything for a project with external types only, e.g. a project based
|
||||||
# on CustomResourceDefinitions.
|
# on CustomResourceDefinitions.
|
||||||
|
|
||||||
@@ -65,25 +65,25 @@ done
|
|||||||
|
|
||||||
if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then
|
if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then
|
||||||
echo "Generating deepcopy funcs"
|
echo "Generating deepcopy funcs"
|
||||||
${GOPATH}/bin/deepcopy-gen --input-dirs $(codegen::join , "${FQ_APIS[@]}") -O zz_generated.deepcopy --bounding-dirs ${APIS_PKG} "$@"
|
"${GOPATH}"/bin/deepcopy-gen --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${APIS_PKG}" "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then
|
if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then
|
||||||
echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}"
|
echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}"
|
||||||
${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_VERSIONED:-versioned} --input-base "" --input $(codegen::join , "${FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset} "$@"
|
"${GOPATH}"/bin/client-gen --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}"/"${CLIENTSET_PKG_NAME:-clientset}" "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then
|
if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then
|
||||||
echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers"
|
echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers"
|
||||||
${GOPATH}/bin/lister-gen --input-dirs $(codegen::join , "${FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/listers "$@"
|
"${GOPATH}"/bin/lister-gen --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}"/listers "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then
|
if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then
|
||||||
echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers"
|
echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers"
|
||||||
${GOPATH}/bin/informer-gen \
|
"${GOPATH}"/bin/informer-gen \
|
||||||
--input-dirs $(codegen::join , "${FQ_APIS[@]}") \
|
--input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \
|
||||||
--versioned-clientset-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned} \
|
--versioned-clientset-package "${OUTPUT_PKG}"/"${CLIENTSET_PKG_NAME:-clientset}"/"${CLIENTSET_NAME_VERSIONED:-versioned}" \
|
||||||
--listers-package ${OUTPUT_PKG}/listers \
|
--listers-package "${OUTPUT_PKG}"/listers \
|
||||||
--output-package ${OUTPUT_PKG}/informers \
|
--output-package "${OUTPUT_PKG}"/informers \
|
||||||
"$@"
|
"$@"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -24,13 +24,13 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
|||||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||||
|
|
||||||
VERBOSE=${VERBOSE:-"0"}
|
VERBOSE=${VERBOSE:-"0"}
|
||||||
V=""
|
# V=""
|
||||||
if [[ "${VERBOSE}" == "1" ]];then
|
if [[ "${VERBOSE}" == "1" ]];then
|
||||||
V="-x"
|
# V="-x"
|
||||||
set -x
|
set -x
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
# ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||||
|
|
||||||
OUTPUT_DIR=bin
|
OUTPUT_DIR=bin
|
||||||
BUILDPATH=./${1:?"path to build"}
|
BUILDPATH=./${1:?"path to build"}
|
||||||
@@ -44,5 +44,5 @@ LDFLAGS=$(kube::version::ldflags)
|
|||||||
# forgoing -i (incremental build) because it will be deprecated by tool chain.
|
# forgoing -i (incremental build) because it will be deprecated by tool chain.
|
||||||
GOOS=${BUILD_GOOS} CGO_ENABLED=0 GOARCH=${BUILD_GOARCH} ${GOBINARY} build \
|
GOOS=${BUILD_GOOS} CGO_ENABLED=0 GOARCH=${BUILD_GOARCH} ${GOBINARY} build \
|
||||||
-ldflags="${LDFLAGS}" \
|
-ldflags="${LDFLAGS}" \
|
||||||
-o ${OUT} \
|
-o "${OUT}" \
|
||||||
${BUILDPATH}
|
"${BUILDPATH}"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
ARCH=`uname -m`
|
ARCH=$(uname -m)
|
||||||
if [ "$ARCH" == "aarch64" ]; then
|
if [ "$ARCH" == "aarch64" ]; then
|
||||||
export ETCD_UNSUPPORTED_ARCH=arm64
|
export ETCD_UNSUPPORTED_ARCH=arm64
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -80,7 +80,7 @@ kube::version::get_version_vars() {
|
|||||||
# the "major" and "minor" versions and whether this is the exact tagged
|
# the "major" and "minor" versions and whether this is the exact tagged
|
||||||
# version or whether the tree is between two tagged versions.
|
# version or whether the tree is between two tagged versions.
|
||||||
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
|
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
|
||||||
KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
|
# KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
|
||||||
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
|
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
|
||||||
if [[ -n "${BASH_REMATCH[4]}" ]]; then
|
if [[ -n "${BASH_REMATCH[4]}" ]]; then
|
||||||
KUBE_GIT_MINOR+="+"
|
KUBE_GIT_MINOR+="+"
|
||||||
@@ -95,4 +95,4 @@ kube::version::get_version_vars() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,6 +69,7 @@ done < <(find . -name "*.sh" \
|
|||||||
-path ./_\* -o \
|
-path ./_\* -o \
|
||||||
-path ./.git\* -o \
|
-path ./.git\* -o \
|
||||||
-path ./vendor\* -o \
|
-path ./vendor\* -o \
|
||||||
|
-path ./hack/install_kubebuilder.sh -o \
|
||||||
\( -path ./third_party\* -a -not -path ./third_party/forked\* \) \
|
\( -path ./third_party\* -a -not -path ./third_party/forked\* \) \
|
||||||
\))
|
\))
|
||||||
|
|
||||||
@@ -132,4 +133,4 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# preserve the result
|
# preserve the result
|
||||||
exit $res
|
exit $res
|
||||||
|
|||||||
@@ -94,7 +94,7 @@ var _ = Describe("LoginRecord", func() {
|
|||||||
controller = NewLoginRecordController(k8sClient, ksClient, loginRecordInformer, userInformer, time.Hour, 1)
|
controller = NewLoginRecordController(k8sClient, ksClient, loginRecordInformer, userInformer, time.Hour, 1)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
// Add Tests for OpenAPI validation (or additional CRD features) specified in
|
||||||
// your API definition.
|
// your API definition.
|
||||||
// Avoid adding tests for vanilla CRUD operations because they would
|
// Avoid adding tests for vanilla CRUD operations because they would
|
||||||
// test Kubernetes API server, which isn't the goal here.
|
// test Kubernetes API server, which isn't the goal here.
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ var _ = Describe("Namespace", func() {
|
|||||||
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
// Add Tests for OpenAPI validation (or additional CRD features) specified in
|
||||||
// your API definition.
|
// your API definition.
|
||||||
// Avoid adding tests for vanilla CRUD operations because they would
|
// Avoid adding tests for vanilla CRUD operations because they would
|
||||||
// test Kubernetes API server, which isn't the goal here.
|
// test Kubernetes API server, which isn't the goal here.
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ var _ = Describe("ServiceAccount", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
// Add Tests for OpenAPI validation (or additional CRD features) specified in
|
||||||
// your API definition.
|
// your API definition.
|
||||||
// Avoid adding tests for vanilla CRUD operations because they would
|
// Avoid adding tests for vanilla CRUD operations because they would
|
||||||
// test Kubernetes API server, which isn't the goal here.
|
// test Kubernetes API server, which isn't the goal here.
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ func NewController(
|
|||||||
|
|
||||||
// ProvisionerCapability acts as a value source of its relevant StorageClassCapabilities
|
// ProvisionerCapability acts as a value source of its relevant StorageClassCapabilities
|
||||||
// so when a PC is created/updated, the corresponding SCCs should be created(if not exists)/updated
|
// so when a PC is created/updated, the corresponding SCCs should be created(if not exists)/updated
|
||||||
// we achive this by simply enqueueing the StorageClasses of the same provisioner
|
// we achieve this by simply enqueueing the StorageClasses of the same provisioner
|
||||||
// but don't overdo by cascade deleting the SCCs when a PC is deleted
|
// but don't overdo by cascade deleting the SCCs when a PC is deleted
|
||||||
// since the role of PCs is more like a template rather than owner to SCCs
|
// since the role of PCs is more like a template rather than owner to SCCs
|
||||||
|
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ var _ = Describe("Workspace", func() {
|
|||||||
const timeout = time.Second * 30
|
const timeout = time.Second * 30
|
||||||
const interval = time.Second * 1
|
const interval = time.Second * 1
|
||||||
|
|
||||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
// Add Tests for OpenAPI validation (or additional CRD features) specified in
|
||||||
// your API definition.
|
// your API definition.
|
||||||
// Avoid adding tests for vanilla CRUD operations because they would
|
// Avoid adding tests for vanilla CRUD operations because they would
|
||||||
// test Kubernetes API server, which isn't the goal here.
|
// test Kubernetes API server, which isn't the goal here.
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ var _ = Describe("WorkspaceRole", func() {
|
|||||||
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
// Add Tests for OpenAPI validation (or additional CRD features) specified in
|
||||||
// your API definition.
|
// your API definition.
|
||||||
// Avoid adding tests for vanilla CRUD operations because they would
|
// Avoid adding tests for vanilla CRUD operations because they would
|
||||||
// test Kubernetes API server, which isn't the goal here.
|
// test Kubernetes API server, which isn't the goal here.
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ var _ = Describe("WorkspaceRoleBinding", func() {
|
|||||||
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
// Add Tests for OpenAPI validation (or additional CRD features) specified in
|
||||||
// your API definition.
|
// your API definition.
|
||||||
// Avoid adding tests for vanilla CRUD operations because they would
|
// Avoid adding tests for vanilla CRUD operations because they would
|
||||||
// test Kubernetes API server, which isn't the goal here.
|
// test Kubernetes API server, which isn't the goal here.
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ var _ = Describe("WorkspaceTemplate", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
|
// Add Tests for OpenAPI validation (or additional CRD features) specified in
|
||||||
// your API definition.
|
// your API definition.
|
||||||
// Avoid adding tests for vanilla CRUD operations because they would
|
// Avoid adding tests for vanilla CRUD operations because they would
|
||||||
// test Kubernetes API server, which isn't the goal here.
|
// test Kubernetes API server, which isn't the goal here.
|
||||||
|
|||||||
@@ -116,7 +116,7 @@ func (h *ProjectPipelineHandler) ListPipelines(req *restful.Request, resp *restf
|
|||||||
Items: make([]clientDevOps.Pipeline, len(objs.Items)),
|
Items: make([]clientDevOps.Pipeline, len(objs.Items)),
|
||||||
}
|
}
|
||||||
pipelineMap := make(map[string]int, pipelineList.Total)
|
pipelineMap := make(map[string]int, pipelineList.Total)
|
||||||
for i, _ := range objs.Items {
|
for i := range objs.Items {
|
||||||
if pipeline, ok := objs.Items[i].(v1alpha3.Pipeline); !ok {
|
if pipeline, ok := objs.Items[i].(v1alpha3.Pipeline); !ok {
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
@@ -139,7 +139,7 @@ func (h *ProjectPipelineHandler) ListPipelines(req *restful.Request, resp *restf
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
} else {
|
} else {
|
||||||
for i, _ := range res.Items {
|
for i := range res.Items {
|
||||||
if index, ok := pipelineMap[res.Items[i].Name]; ok {
|
if index, ok := pipelineMap[res.Items[i].Name]; ok {
|
||||||
// keep annotations field of pipelineList
|
// keep annotations field of pipelineList
|
||||||
annotations := pipelineList.Items[index].Annotations
|
annotations := pipelineList.Items[index].Annotations
|
||||||
|
|||||||
@@ -446,9 +446,9 @@ func (h handler) makeQueryOptions(r reqParams, lvl monitoring.Level) (q queryOpt
|
|||||||
func exportMetrics(metrics model.Metrics, startTime, endTime time.Time) (*bytes.Buffer, error) {
|
func exportMetrics(metrics model.Metrics, startTime, endTime time.Time) (*bytes.Buffer, error) {
|
||||||
var resBytes []byte
|
var resBytes []byte
|
||||||
|
|
||||||
for i, _ := range metrics.Results {
|
for i := range metrics.Results {
|
||||||
ret := metrics.Results[i]
|
ret := metrics.Results[i]
|
||||||
for j, _ := range ret.MetricValues {
|
for j := range ret.MetricValues {
|
||||||
ret.MetricValues[j].TransferToExportedMetricValue()
|
ret.MetricValues[j].TransferToExportedMetricValue()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ func (h handler) handleApplicationMetersQuery(meters []string, resp *restful.Res
|
|||||||
}
|
}
|
||||||
appWorkloads := h.getAppWorkloads(aso.NamespaceName, aso.Applications)
|
appWorkloads := h.getAppWorkloads(aso.NamespaceName, aso.Applications)
|
||||||
|
|
||||||
for k, _ := range appWorkloads {
|
for k := range appWorkloads {
|
||||||
opt := monitoring.ApplicationOption{
|
opt := monitoring.ApplicationOption{
|
||||||
NamespaceName: aso.NamespaceName,
|
NamespaceName: aso.NamespaceName,
|
||||||
Application: k,
|
Application: k,
|
||||||
@@ -111,7 +111,7 @@ func (h handler) handleServiceMetersQuery(meters []string, resp *restful.Respons
|
|||||||
}
|
}
|
||||||
svcPodsMap := h.mo.GetSerivePodsMap(sso.NamespaceName, sso.Services)
|
svcPodsMap := h.mo.GetSerivePodsMap(sso.NamespaceName, sso.Services)
|
||||||
|
|
||||||
for k, _ := range svcPodsMap {
|
for k := range svcPodsMap {
|
||||||
opt := monitoring.ServiceOption{
|
opt := monitoring.ServiceOption{
|
||||||
NamespaceName: sso.NamespaceName,
|
NamespaceName: sso.NamespaceName,
|
||||||
ServiceName: k,
|
ServiceName: k,
|
||||||
@@ -425,7 +425,7 @@ func (h handler) handleOpenpitrixMetersQuery(meters []string, resp *restful.Resp
|
|||||||
|
|
||||||
opWorkloads := h.getOpWorkloads(oso.Cluster, oso.NamespaceName, oso.Openpitrixs)
|
opWorkloads := h.getOpWorkloads(oso.Cluster, oso.NamespaceName, oso.Openpitrixs)
|
||||||
|
|
||||||
for k, _ := range opWorkloads {
|
for k := range opWorkloads {
|
||||||
opt := monitoring.ApplicationOption{
|
opt := monitoring.ApplicationOption{
|
||||||
NamespaceName: oso.NamespaceName,
|
NamespaceName: oso.NamespaceName,
|
||||||
Application: k,
|
Application: k,
|
||||||
|
|||||||
@@ -27,10 +27,10 @@ func TestGetAlertingRulesStatus(t *testing.T) {
|
|||||||
Level: v2alpha1.RuleLevelNamespace,
|
Level: v2alpha1.RuleLevelNamespace,
|
||||||
Custom: true,
|
Custom: true,
|
||||||
ResourceRulesMap: map[string]*ResourceRuleCollection{
|
ResourceRulesMap: map[string]*ResourceRuleCollection{
|
||||||
"custom-alerting-rule-jqbgn": &ResourceRuleCollection{
|
"custom-alerting-rule-jqbgn": {
|
||||||
GroupSet: map[string]struct{}{"alerting.custom.defaults": struct{}{}},
|
GroupSet: map[string]struct{}{"alerting.custom.defaults": {}},
|
||||||
NameRules: map[string][]*ResourceRuleItem{
|
NameRules: map[string][]*ResourceRuleItem{
|
||||||
"ca7f09e76954e67c": []*ResourceRuleItem{{
|
"ca7f09e76954e67c": {{
|
||||||
ResourceName: "custom-alerting-rule-jqbgn",
|
ResourceName: "custom-alerting-rule-jqbgn",
|
||||||
RuleWithGroup: RuleWithGroup{
|
RuleWithGroup: RuleWithGroup{
|
||||||
Group: "alerting.custom.defaults",
|
Group: "alerting.custom.defaults",
|
||||||
|
|||||||
@@ -291,7 +291,7 @@ func (d devopsOperator) ListPipelineObj(projectName string, filterFunc PipelineF
|
|||||||
}
|
}
|
||||||
|
|
||||||
var result []interface{}
|
var result []interface{}
|
||||||
for i, _ := range data {
|
for i := range data {
|
||||||
if filterFunc != nil && !filterFunc(data[i]) {
|
if filterFunc != nil && !filterFunc(data[i]) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -459,7 +459,7 @@ func (mo monitoringOperator) GetNamedMetersOverTime(meters []string, start, end
|
|||||||
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, time.Hour, opts)
|
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, time.Hour, opts)
|
||||||
sMap := generateScalingFactorMap(step)
|
sMap := generateScalingFactorMap(step)
|
||||||
|
|
||||||
for i, _ := range ress {
|
for i := range ress {
|
||||||
ress[i].MetricData = updateMetricStatData(ress[i], sMap, priceInfo)
|
ress[i].MetricData = updateMetricStatData(ress[i], sMap, priceInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -470,7 +470,7 @@ func (mo monitoringOperator) GetNamedMeters(meters []string, time time.Time, opt
|
|||||||
|
|
||||||
metersPerHour := mo.getNamedMetersWithHourInterval(meters, time, opt)
|
metersPerHour := mo.getNamedMetersWithHourInterval(meters, time, opt)
|
||||||
|
|
||||||
for metricIndex, _ := range metersPerHour.Results {
|
for metricIndex := range metersPerHour.Results {
|
||||||
|
|
||||||
res := metersPerHour.Results[metricIndex]
|
res := metersPerHour.Results[metricIndex]
|
||||||
|
|
||||||
|
|||||||
@@ -203,7 +203,7 @@ func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float6
|
|||||||
metricData := metric.MetricData
|
metricData := metric.MetricData
|
||||||
for index, metricValue := range metricData.MetricValues {
|
for index, metricValue := range metricData.MetricValues {
|
||||||
|
|
||||||
// calulate min, max, avg value first, then squash points with factor
|
// calculate min, max, avg value first, then squash points with factor
|
||||||
if metricData.MetricType == monitoring.MetricTypeMatrix {
|
if metricData.MetricType == monitoring.MetricTypeMatrix {
|
||||||
metricData.MetricValues[index].MinValue = getMinPointValue(metricValue.Series)
|
metricData.MetricValues[index].MinValue = getMinPointValue(metricValue.Series)
|
||||||
metricData.MetricValues[index].MaxValue = getMaxPointValue(metricValue.Series)
|
metricData.MetricValues[index].MaxValue = getMaxPointValue(metricValue.Series)
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func TestOpenPitrixApp(t *testing.T) {
|
|||||||
// validate package
|
// validate package
|
||||||
validateResp, err := appOperator.ValidatePackage(validateReq)
|
validateResp, err := appOperator.ValidatePackage(validateReq)
|
||||||
if err != nil || validateResp.Error != "" {
|
if err != nil || validateResp.Error != "" {
|
||||||
klog.Errorf("validate pacakge failed, error: %s", err)
|
klog.Errorf("validate package failed, error: %s", err)
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,7 +58,7 @@ func TestOpenPitrixApp(t *testing.T) {
|
|||||||
// validate corrupted package
|
// validate corrupted package
|
||||||
validateResp, err = appOperator.ValidatePackage(validateReq)
|
validateResp, err = appOperator.ValidatePackage(validateReq)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
klog.Errorf("validate pacakge failed, error: %s", err)
|
klog.Errorf("validate package failed, error: %s", err)
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -607,7 +607,7 @@ func (t *tenantOperator) processApplicationMetersQuery(meters []string, q QueryO
|
|||||||
}
|
}
|
||||||
componentsMap := t.mo.GetAppWorkloads(aso.NamespaceName, aso.Applications)
|
componentsMap := t.mo.GetAppWorkloads(aso.NamespaceName, aso.Applications)
|
||||||
|
|
||||||
for k, _ := range componentsMap {
|
for k := range componentsMap {
|
||||||
opt := monitoring.ApplicationOption{
|
opt := monitoring.ApplicationOption{
|
||||||
NamespaceName: aso.NamespaceName,
|
NamespaceName: aso.NamespaceName,
|
||||||
Application: k,
|
Application: k,
|
||||||
@@ -655,7 +655,7 @@ func (t *tenantOperator) processServiceMetersQuery(meters []string, q QueryOptio
|
|||||||
}
|
}
|
||||||
svcPodsMap := t.mo.GetSerivePodsMap(sso.NamespaceName, sso.Services)
|
svcPodsMap := t.mo.GetSerivePodsMap(sso.NamespaceName, sso.Services)
|
||||||
|
|
||||||
for k, _ := range svcPodsMap {
|
for k := range svcPodsMap {
|
||||||
opt := monitoring.ServiceOption{
|
opt := monitoring.ServiceOption{
|
||||||
NamespaceName: sso.NamespaceName,
|
NamespaceName: sso.NamespaceName,
|
||||||
ServiceName: k,
|
ServiceName: k,
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ func ParsePaging(req *restful.Request) (limit, offset int) {
|
|||||||
}
|
}
|
||||||
offset = (page - 1) * limit
|
offset = (page - 1) * limit
|
||||||
|
|
||||||
// use the explict offset
|
// use the explicit offset
|
||||||
if start := req.QueryParameter("start"); start != "" {
|
if start := req.QueryParameter("start"); start != "" {
|
||||||
offset = AtoiOrDefault(start, offset)
|
offset = AtoiOrDefault(start, offset)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -115,7 +115,7 @@ func (p *Pipeline) ListPipelines() (*devops.PipelineList, error) {
|
|||||||
}
|
}
|
||||||
klog.Errorf("API '%s' request response code is '%d'", p.Path, jErr.Code)
|
klog.Errorf("API '%s' request response code is '%d'", p.Path, jErr.Code)
|
||||||
} else {
|
} else {
|
||||||
err = fmt.Errorf("unknow errors happend when communicate with Jenkins")
|
err = fmt.Errorf("unknow errors happened when communicate with Jenkins")
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ type Pipeline struct {
|
|||||||
func UnmarshalPipeline(total int, data []byte) (pipelineList *PipelineList, err error) {
|
func UnmarshalPipeline(total int, data []byte) (pipelineList *PipelineList, err error) {
|
||||||
pipelineList = &PipelineList{Total: total}
|
pipelineList = &PipelineList{Total: total}
|
||||||
pipelineList.Items = make([]Pipeline, total)
|
pipelineList.Items = make([]Pipeline, total)
|
||||||
for i, _ := range pipelineList.Items {
|
for i := range pipelineList.Items {
|
||||||
pipelineList.Items[i].WeatherScore = 100
|
pipelineList.Items[i].WeatherScore = 100
|
||||||
}
|
}
|
||||||
err = json.Unmarshal(data, &pipelineList.Items)
|
err = json.Unmarshal(data, &pipelineList.Items)
|
||||||
|
|||||||
@@ -175,7 +175,7 @@ func (m metricsServer) getPodMetricsFromMetricsAPI(edgePods map[string]bool, opt
|
|||||||
|
|
||||||
// handle cases with when edgePodName contains namespaceName
|
// handle cases with when edgePodName contains namespaceName
|
||||||
if opts.NamespacedResourcesFilter != "" {
|
if opts.NamespacedResourcesFilter != "" {
|
||||||
for p, _ := range edgePods {
|
for p := range edgePods {
|
||||||
splitedPodName := strings.Split(p, "/")
|
splitedPodName := strings.Split(p, "/")
|
||||||
ns, p = strings.ReplaceAll(splitedPodName[0], " ", ""), strings.ReplaceAll(splitedPodName[1], " ", "")
|
ns, p = strings.ReplaceAll(splitedPodName[0], " ", ""), strings.ReplaceAll(splitedPodName[1], " ", "")
|
||||||
pm := mc.PodMetricses(ns)
|
pm := mc.PodMetricses(ns)
|
||||||
@@ -344,7 +344,7 @@ func (m metricsServer) GetNodeLevelNamedMetrics(metrics []string, ts time.Time,
|
|||||||
}
|
}
|
||||||
|
|
||||||
status := make(map[string]v1.NodeStatus)
|
status := make(map[string]v1.NodeStatus)
|
||||||
for n, _ := range edgeNodeNamesFiltered {
|
for n := range edgeNodeNamesFiltered {
|
||||||
status[n] = edgeNodes[n].Status
|
status[n] = edgeNodes[n].Status
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -579,7 +579,7 @@ func (m metricsServer) GetNodeLevelNamedMetricsOverTime(metrics []string, start,
|
|||||||
}
|
}
|
||||||
|
|
||||||
status := make(map[string]v1.NodeStatus)
|
status := make(map[string]v1.NodeStatus)
|
||||||
for n, _ := range edgeNodeNamesFiltered {
|
for n := range edgeNodeNamesFiltered {
|
||||||
status[n] = edgeNodes[n].Status
|
status[n] = edgeNodes[n].Status
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -139,7 +139,7 @@ var (
|
|||||||
Timestamp: metav1.Time{Time: metricsTime},
|
Timestamp: metav1.Time{Time: metricsTime},
|
||||||
Window: metav1.Duration{Duration: time.Minute},
|
Window: metav1.Duration{Duration: time.Minute},
|
||||||
Containers: []metricsV1beta1.ContainerMetrics{
|
Containers: []metricsV1beta1.ContainerMetrics{
|
||||||
metricsV1beta1.ContainerMetrics{
|
{
|
||||||
Name: "containers-1",
|
Name: "containers-1",
|
||||||
Usage: v1.ResourceList{
|
Usage: v1.ResourceList{
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||||
@@ -163,7 +163,7 @@ var (
|
|||||||
Timestamp: metav1.Time{Time: metricsTime},
|
Timestamp: metav1.Time{Time: metricsTime},
|
||||||
Window: metav1.Duration{Duration: time.Minute},
|
Window: metav1.Duration{Duration: time.Minute},
|
||||||
Containers: []metricsV1beta1.ContainerMetrics{
|
Containers: []metricsV1beta1.ContainerMetrics{
|
||||||
metricsV1beta1.ContainerMetrics{
|
{
|
||||||
Name: "containers-1",
|
Name: "containers-1",
|
||||||
Usage: v1.ResourceList{
|
Usage: v1.ResourceList{
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||||
@@ -174,7 +174,7 @@ var (
|
|||||||
resource.DecimalSI),
|
resource.DecimalSI),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
metricsV1beta1.ContainerMetrics{
|
{
|
||||||
Name: "containers-2",
|
Name: "containers-2",
|
||||||
Usage: v1.ResourceList{
|
Usage: v1.ResourceList{
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||||
|
|||||||
@@ -237,7 +237,7 @@ func (c IPAMClient) autoAssign(handleID string, attrs map[string]string, request
|
|||||||
func (c IPAMClient) assignFromExistingBlock(block *v1alpha1.IPAMBlock, handleID string, attrs map[string]string) (*cnet.IPNet, error) {
|
func (c IPAMClient) assignFromExistingBlock(block *v1alpha1.IPAMBlock, handleID string, attrs map[string]string) (*cnet.IPNet, error) {
|
||||||
ips := block.AutoAssign(1, handleID, attrs)
|
ips := block.AutoAssign(1, handleID, attrs)
|
||||||
if len(ips) == 0 {
|
if len(ips) == 0 {
|
||||||
return nil, fmt.Errorf("block %s has no availabe IP", block.BlockName())
|
return nil, fmt.Errorf("block %s has no available IP", block.BlockName())
|
||||||
}
|
}
|
||||||
|
|
||||||
err := c.incrementHandle(handleID, block, 1)
|
err := c.incrementHandle(handleID, block, 1)
|
||||||
@@ -267,7 +267,7 @@ func (c IPAMClient) ReleaseByHandle(handleID string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for blockStr, _ := range handle.Spec.Block {
|
for blockStr := range handle.Spec.Block {
|
||||||
blockName := v1alpha1.ConvertToBlockName(blockStr)
|
blockName := v1alpha1.ConvertToBlockName(blockStr)
|
||||||
if err := c.releaseByHandle(handleID, blockName); err != nil {
|
if err := c.releaseByHandle(handleID, blockName); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -299,8 +299,8 @@ func (c *helmWrapper) setupPostRenderEnvironment() error {
|
|||||||
|
|
||||||
kustomizationConfig := types.Kustomization{
|
kustomizationConfig := types.Kustomization{
|
||||||
Resources: []string{"./.local-helm-output.yaml"},
|
Resources: []string{"./.local-helm-output.yaml"},
|
||||||
CommonAnnotations: c.annotations, // add extra annotations to output
|
CommonAnnotations: c.annotations, // add extra annotations to output
|
||||||
Labels: []types.Label{types.Label{Pairs: c.labels}}, // Labels to add to all objects but not selectors.
|
Labels: []types.Label{{Pairs: c.labels}}, // Labels to add to all objects but not selectors.
|
||||||
}
|
}
|
||||||
|
|
||||||
err = yaml.NewEncoder(kustomization).Encode(kustomizationConfig)
|
err = yaml.NewEncoder(kustomization).Encode(kustomizationConfig)
|
||||||
|
|||||||
@@ -14823,7 +14823,7 @@ func schema_pkg_apis_devops_v1alpha1_S2iConfig(ref common.ReferenceCallback) com
|
|||||||
},
|
},
|
||||||
"addHost": {
|
"addHost": {
|
||||||
SchemaProps: spec.SchemaProps{
|
SchemaProps: spec.SchemaProps{
|
||||||
Description: "AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,muliple hosts can be added by using multiple --add-host",
|
Description: "AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,multiple hosts can be added by using multiple --add-host",
|
||||||
Type: []string{"array"},
|
Type: []string{"array"},
|
||||||
Items: &spec.SchemaOrArray{
|
Items: &spec.SchemaOrArray{
|
||||||
Schema: &spec.Schema{
|
Schema: &spec.Schema{
|
||||||
|
|||||||
@@ -392,7 +392,7 @@ type S2iConfig struct {
|
|||||||
// This url can be a reference within the builder image if the scheme is specified as image://
|
// This url can be a reference within the builder image if the scheme is specified as image://
|
||||||
ImageScriptsURL string `json:"imageScriptsUrl,omitempty"`
|
ImageScriptsURL string `json:"imageScriptsUrl,omitempty"`
|
||||||
|
|
||||||
// AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,muliple hosts can be added by using multiple --add-host
|
// AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,multiple hosts can be added by using multiple --add-host
|
||||||
AddHost []string `json:"addHost,omitempty"`
|
AddHost []string `json:"addHost,omitempty"`
|
||||||
|
|
||||||
// Export Push the result image to specify image registry in tag
|
// Export Push the result image to specify image registry in tag
|
||||||
|
|||||||
@@ -2,14 +2,14 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
workspace=`pwd`
|
workspace=$(pwd)
|
||||||
tag=`git rev-parse --short HEAD`
|
tag=$(git rev-parse --short HEAD)
|
||||||
IMG=kubespheredev/ks-network:$tag
|
IMG=kubespheredev/ks-network:$tag
|
||||||
DEST=/tmp/manager.yaml
|
DEST=/tmp/manager.yaml
|
||||||
TEST_NS=network-test-$tag
|
TEST_NS=network-test-$tag
|
||||||
SKIP_BUILD=no
|
SKIP_BUILD=no
|
||||||
STORE_MODE=etcd
|
STORE_MODE=etcd
|
||||||
MODE=test
|
MODE="test"
|
||||||
|
|
||||||
export TEST_NAMESPACE=$TEST_NS
|
export TEST_NAMESPACE=$TEST_NS
|
||||||
export YAML_PATH=$DEST
|
export YAML_PATH=$DEST
|
||||||
@@ -46,7 +46,7 @@ case $key in
|
|||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
--default)
|
--default)
|
||||||
DEFAULT=YES
|
# DEFAULT=YES
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
*) # unknown option
|
*) # unknown option
|
||||||
@@ -58,27 +58,27 @@ done
|
|||||||
if [ $SKIP_BUILD == "no" ]; then
|
if [ $SKIP_BUILD == "no" ]; then
|
||||||
echo "Building binary"
|
echo "Building binary"
|
||||||
hack/gobuild.sh cmd/ks-network
|
hack/gobuild.sh cmd/ks-network
|
||||||
docker build -f build/ks-network/Dockerfile -t $IMG bin/cmd
|
docker build -f build/ks-network/Dockerfile -t "$IMG" bin/cmd
|
||||||
echo "Push images"
|
echo "Push images"
|
||||||
docker push $IMG
|
docker push "$IMG"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
kustomize_dir="./kustomize/network/calico-${STORE_MODE}"
|
kustomize_dir="./kustomize/network/calico-${STORE_MODE}"
|
||||||
if [ "$(uname)" == "Darwin" ]; then
|
if [ "$(uname)" == "Darwin" ]; then
|
||||||
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/kustomization.yaml
|
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/kustomization.yaml
|
||||||
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/patch_role_binding.yaml
|
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/patch_role_binding.yaml
|
||||||
sed -i '' -e 's@image: .*@image: '"${IMG}"'@' $kustomize_dir/patch_image_name.yaml
|
sed -i '' -e 's@image: .*@image: '"${IMG}"'@' "$kustomize_dir"/patch_image_name.yaml
|
||||||
else
|
else
|
||||||
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/patch_role_binding.yaml
|
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/patch_role_binding.yaml
|
||||||
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/kustomization.yaml
|
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/kustomization.yaml
|
||||||
sed -i -e 's@image: .*@image: '"${IMG}"'@' $kustomize_dir/patch_image_name.yaml
|
sed -i -e 's@image: .*@image: '"${IMG}"'@' "$kustomize_dir"/patch_image_name.yaml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
kustomize build $kustomize_dir -o $DEST
|
kustomize build "$kustomize_dir" -o $DEST
|
||||||
if [ $MODE == "test" ]; then
|
if [ "$MODE" == "test" ]; then
|
||||||
ginkgo -v ./test/e2e/...
|
ginkgo -v ./test/e2e/...
|
||||||
elif [ $MODE == "debug" ]; then
|
elif [ "$MODE" == "debug" ]; then
|
||||||
kubectl create ns $TEST_NS --dry-run -o yaml | kubectl apply -f -
|
kubectl create ns "$TEST_NS" --dry-run -o yaml | kubectl apply -f -
|
||||||
kubectl apply -f $DEST
|
kubectl apply -f $DEST
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user