Merge pull request #4063 from yuswift/update-gofmt

fix gofmt typo and spelling typo and shellcheck typo to pass the prow ci
This commit is contained in:
KubeSphere CI Bot
2021-07-19 14:45:10 +08:00
committed by GitHub
43 changed files with 127 additions and 127 deletions

View File

@@ -51,7 +51,7 @@ spec:
description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster Important: Run "make" to regenerate code after modifying this file'
properties:
addHost:
description: AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,muliple hosts can be added by using multiple --add-host
description: AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,multiple hosts can be added by using multiple --add-host
items:
type: string
type: array

View File

@@ -67,7 +67,7 @@ KubeSphere Roadmap demonstrates a list of open source product development plans
### Observability
- [x] Utilizing existing Promethues stack setup. [#3068](https://github.com/kubesphere/kubesphere/issues/3068) [#1164](https://github.com/kubesphere/ks-installer/pull/1164) [Guide](https://kubesphere.io/docs/faq/observability/byop/)
- [x] Utilizing existing Prometheus stack setup. [#3068](https://github.com/kubesphere/kubesphere/issues/3068) [#1164](https://github.com/kubesphere/ks-installer/pull/1164) [Guide](https://kubesphere.io/docs/faq/observability/byop/)
#### Custom monitoring [#3067](https://github.com/kubesphere/kubesphere/issues/3067)

View File

@@ -1,13 +1,13 @@
#!/bin/bash
ARCH=`uname -m`
ARCH=$(uname -m)
if [ "$ARCH" == "x86_64" ]; then
echo "x86_64"
wget https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz
tar xvf etcd-${ETCD_VERSION}-linux-amd64.tar.gz && \
mv etcd-${ETCD_VERSION}-linux-amd64/etcd /usr/local/bin/etcd
wget https://storage.googleapis.com/etcd/"${ETCD_VERSION}"/etcd-"${ETCD_VERSION}"-linux-amd64.tar.gz
tar xvf etcd-"${ETCD_VERSION}"-linux-amd64.tar.gz && \
mv etcd-"${ETCD_VERSION}"-linux-amd64/etcd /usr/local/bin/etcd
elif [ "$ARCH" == "aarch64" ]; then
echo "arm arch"
wget https://storage.googleapis.com/etcd/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-arm64.tar.gz
tar xvf etcd-${ETCD_VERSION}-linux-arm64.tar.gz && \
mv etcd-${ETCD_VERSION}-linux-arm64/etcd /usr/local/bin/etcd
fi
wget https://storage.googleapis.com/etcd/"${ETCD_VERSION}"/etcd-"${ETCD_VERSION}"-linux-arm64.tar.gz
tar xvf etcd-"${ETCD_VERSION}"-linux-arm64.tar.gz && \
mv etcd-"${ETCD_VERSION}"-linux-arm64/etcd /usr/local/bin/etcd
fi

View File

@@ -1,17 +1,17 @@
#!/bin/bash
ARCH=`uname -m`
ARCH=$(uname -m)
if [ "$ARCH" == "x86_64" ]; then
echo "x86_64"
wget https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz && \
tar xvf helm-${HELM_VERSION}-linux-amd64.tar.gz && \
rm helm-${HELM_VERSION}-linux-amd64.tar.gz && \
wget https://get.helm.sh/helm-"${HELM_VERSION}"-linux-amd64.tar.gz && \
tar xvf helm-"${HELM_VERSION}"-linux-amd64.tar.gz && \
rm helm-"${HELM_VERSION}"-linux-amd64.tar.gz && \
mv linux-amd64/helm /usr/bin/ && \
rm -rf linux-amd64
elif [ "$ARCH" == "aarch64" ]; then
echo "arm arch"
wget https://get.helm.sh/helm-${HELM_VERSION}-linux-arm64.tar.gz && \
tar xvf helm-${HELM_VERSION}-linux-arm64.tar.gz && \
rm helm-${HELM_VERSION}-linux-arm64.tar.gz && \
wget https://get.helm.sh/helm-"${HELM_VERSION}"-linux-arm64.tar.gz && \
tar xvf helm-"${HELM_VERSION}"-linux-arm64.tar.gz && \
rm helm-"${HELM_VERSION}"-linux-arm64.tar.gz && \
mv linux-arm64/helm /usr/bin/ && \
rm -rf linux-arm64
fi
fi

View File

@@ -1,14 +1,13 @@
#!/bin/bash
ARCH=`uname -m`
ARCH=$(uname -m)
if [ "$ARCH" == "x86_64" ]; then
echo "x86_64"
wget https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz && \
wget https://dl.k8s.io/"${KUBE_VERSION}"/kubernetes-server-linux-amd64.tar.gz && \
tar xvf kubernetes-server-linux-amd64.tar.gz &&
mv kubernetes /usr/local/
elif [ "$ARCH" == "aarch64" ]; then
echo "arm arch"
wget https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-arm64.tar.gz && \
wget https://dl.k8s.io/"${KUBE_VERSION}"/kubernetes-server-linux-arm64.tar.gz && \
tar xvf kubernetes-server-linux-arm64.tar.gz &&
mv kubernetes /usr/local/
fi
fi

View File

@@ -1,15 +1,15 @@
#!/bin/bash
ARCH=`uname -m`
ARCH=$(uname -m)
if [ "$ARCH" == "x86_64" ]; then
echo "x86_64"
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz && \
tar xvf kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz && \
rm kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz && \
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F"${KUSTOMIZE_VERSION}"/kustomize_"${KUSTOMIZE_VERSION}"_linux_amd64.tar.gz && \
tar xvf kustomize_"${KUSTOMIZE_VERSION}"_linux_amd64.tar.gz && \
rm kustomize_"${KUSTOMIZE_VERSION}"_linux_amd64.tar.gz && \
mv kustomize /usr/bin
elif [ "$ARCH" == "aarch64" ]; then
echo "arm arch"
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_arm64.tar.gz && \
tar xvf kustomize_${KUSTOMIZE_VERSION}_linux_arm64.tar.gz && \
rm kustomize_${KUSTOMIZE_VERSION}_linux_arm64.tar.gz && \
wget https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F"${KUSTOMIZE_VERSION}"/kustomize_"${KUSTOMIZE_VERSION}"_linux_arm64.tar.gz && \
tar xvf kustomize_"${KUSTOMIZE_VERSION}"_linux_arm64.tar.gz && \
rm kustomize_"${KUSTOMIZE_VERSION}"_linux_arm64.tar.gz && \
mv kustomize /usr/bin
fi
fi

View File

@@ -24,13 +24,13 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
VERBOSE=${VERBOSE:-"0"}
V=""
# V=""
if [[ "${VERBOSE}" == "1" ]];then
V="-x"
# V="-x"
set -x
fi
ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
OUTPUT_DIR=bin
BUILDPATH=./${1:?"path to build"}
@@ -44,5 +44,5 @@ LDFLAGS=$(kube::version::ldflags)
time GOOS=${BUILD_GOOS} GOARCH=${BUILD_GOARCH} ${GOBINARY} test \
-c \
-ldflags "${LDFLAGS}" \
-o ${OUT} \
${BUILDPATH}
-o "${OUT}" \
"${BUILDPATH}"

View File

@@ -20,7 +20,7 @@ set -o pipefail
function wait_for_installation_finish() {
echo "waiting for ks-installer pod ready"
kubectl -n kubesphere-system wait --timeout=180s --for=condition=Ready $(kubectl -n kubesphere-system get pod -l app=ks-install -oname)
kubectl -n kubesphere-system wait --timeout=180s --for=condition=Ready "$(kubectl -n kubesphere-system get pod -l app=ks-install -oname)"
echo "waiting for KubeSphere ready"
while IFS= read -r line; do
if [[ $line =~ "Welcome to KubeSphere" ]]
@@ -45,7 +45,7 @@ fi
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/kubesphere-installer.yaml
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 3 https://raw.githubusercontent.com/kubesphere/ks-installer/master/deploy/cluster-configuration.yaml
#TODO: override ks-apiserver and ks-controller-manager images with specific tag
#TODO: override ks-apiserver and ks-controller-manager images with specific tag
kubectl apply -f kubesphere-installer.yaml
kubectl apply -f cluster-configuration.yaml
@@ -53,4 +53,4 @@ kubectl apply -f cluster-configuration.yaml
wait_for_installation_finish
# Expose KubeSphere API Server
kubectl -n kubesphere-system patch svc ks-apiserver -p '{"spec":{"type":"NodePort","ports":[{"name":"ks-apiserver","port":80,"protocal":"TCP","targetPort":9090,"nodePort":30881}]}}'
kubectl -n kubesphere-system patch svc ks-apiserver -p '{"spec":{"type":"NodePort","ports":[{"name":"ks-apiserver","port":80,"protocol":"TCP","targetPort":9090,"nodePort":30881}]}}'

View File

@@ -21,16 +21,16 @@ CONTAINER_BUILDER=${CONTAINER_BUILDER:-build}
TARGETOS=${TARGETOS:-$(kube::util::host_os)}
TARGETARCH=${TARGETARCH:-$(kube::util::host_arch)}
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
--build-arg TARGETARCH=${TARGETARCH} \
--build-arg TARGETOS=${TARGETOS} \
${CONTAINER_CLI} "${CONTAINER_BUILDER}" \
--build-arg TARGETARCH="${TARGETARCH}" \
--build-arg TARGETOS="${TARGETOS}" \
-f build/ks-apiserver/Dockerfile \
-t "${REPO}"/ks-apiserver:"${TAG}" .
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
--build-arg TARGETARCH=${TARGETARCH} \
--build-arg TARGETOS=${TARGETOS} \
${CONTAINER_CLI} "${CONTAINER_BUILDER}" \
--build-arg "TARGETARCH=${TARGETARCH}" \
--build-arg "TARGETOS=${TARGETOS}" \
-f build/ks-controller-manager/Dockerfile \
-t "${REPO}"/ks-controller-manager:"${TAG}" .

View File

@@ -23,13 +23,13 @@ fi
# supported platforms
PLATFORMS=linux/amd64,linux/arm64
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
"${CONTAINER_CLI} ${CONTAINER_BUILDER}" \
--platform ${PLATFORMS} \
${PUSH} \
-f build/ks-apiserver/Dockerfile \
-t "${REPO}"/ks-apiserver:"${TAG}" .
${CONTAINER_CLI} ${CONTAINER_BUILDER} \
"${CONTAINER_CLI} ${CONTAINER_BUILDER}" \
--platform ${PLATFORMS} \
${PUSH} \
-f build/ks-controller-manager/Dockerfile \

View File

@@ -36,15 +36,15 @@ while [[ $# -gt 0 ]]; do
shift
done
[ -z ${service} ] && service=webhook-service
[ -z ${namespace} ] && namespace=default
[ -z "${service}" ] && service=webhook-service
[ -z "${namespace}" ] && namespace=default
if [ ! -x "$(command -v openssl)" ]; then
echo "openssl not found"
exit 1
fi
csrName=${service}.${namespace}
# csrName=${service}.${namespace}
CERTSDIR="config/certs"
if [ ! -d ${CERTSDIR} ]; then

View File

@@ -18,7 +18,7 @@ set -o errexit
set -o nounset
set -o pipefail
GOPATH=`go env GOPATH`
GOPATH=$(go env GOPATH)
# generate-groups generates everything for a project with external types only, e.g. a project based
# on CustomResourceDefinitions.
@@ -65,25 +65,25 @@ done
if [ "${GENS}" = "all" ] || grep -qw "deepcopy" <<<"${GENS}"; then
echo "Generating deepcopy funcs"
${GOPATH}/bin/deepcopy-gen --input-dirs $(codegen::join , "${FQ_APIS[@]}") -O zz_generated.deepcopy --bounding-dirs ${APIS_PKG} "$@"
"${GOPATH}"/bin/deepcopy-gen --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" -O zz_generated.deepcopy --bounding-dirs "${APIS_PKG}" "$@"
fi
if [ "${GENS}" = "all" ] || grep -qw "client" <<<"${GENS}"; then
echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}"
${GOPATH}/bin/client-gen --clientset-name ${CLIENTSET_NAME_VERSIONED:-versioned} --input-base "" --input $(codegen::join , "${FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset} "$@"
"${GOPATH}"/bin/client-gen --clientset-name "${CLIENTSET_NAME_VERSIONED:-versioned}" --input-base "" --input "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}"/"${CLIENTSET_PKG_NAME:-clientset}" "$@"
fi
if [ "${GENS}" = "all" ] || grep -qw "lister" <<<"${GENS}"; then
echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers"
${GOPATH}/bin/lister-gen --input-dirs $(codegen::join , "${FQ_APIS[@]}") --output-package ${OUTPUT_PKG}/listers "$@"
"${GOPATH}"/bin/lister-gen --input-dirs "$(codegen::join , "${FQ_APIS[@]}")" --output-package "${OUTPUT_PKG}"/listers "$@"
fi
if [ "${GENS}" = "all" ] || grep -qw "informer" <<<"${GENS}"; then
echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers"
${GOPATH}/bin/informer-gen \
--input-dirs $(codegen::join , "${FQ_APIS[@]}") \
--versioned-clientset-package ${OUTPUT_PKG}/${CLIENTSET_PKG_NAME:-clientset}/${CLIENTSET_NAME_VERSIONED:-versioned} \
--listers-package ${OUTPUT_PKG}/listers \
--output-package ${OUTPUT_PKG}/informers \
"${GOPATH}"/bin/informer-gen \
--input-dirs "$(codegen::join , "${FQ_APIS[@]}")" \
--versioned-clientset-package "${OUTPUT_PKG}"/"${CLIENTSET_PKG_NAME:-clientset}"/"${CLIENTSET_NAME_VERSIONED:-versioned}" \
--listers-package "${OUTPUT_PKG}"/listers \
--output-package "${OUTPUT_PKG}"/informers \
"$@"
fi

View File

@@ -24,13 +24,13 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
VERBOSE=${VERBOSE:-"0"}
V=""
# V=""
if [[ "${VERBOSE}" == "1" ]];then
V="-x"
# V="-x"
set -x
fi
ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# ROOTDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
OUTPUT_DIR=bin
BUILDPATH=./${1:?"path to build"}
@@ -44,5 +44,5 @@ LDFLAGS=$(kube::version::ldflags)
# forgoing -i (incremental build) because it will be deprecated by tool chain.
GOOS=${BUILD_GOOS} CGO_ENABLED=0 GOARCH=${BUILD_GOARCH} ${GOBINARY} build \
-ldflags="${LDFLAGS}" \
-o ${OUT} \
${BUILDPATH}
-o "${OUT}" \
"${BUILDPATH}"

View File

@@ -1,5 +1,5 @@
#!/bin/bash
ARCH=`uname -m`
ARCH=$(uname -m)
if [ "$ARCH" == "aarch64" ]; then
export ETCD_UNSUPPORTED_ARCH=arm64
fi
fi

View File

@@ -80,7 +80,7 @@ kube::version::get_version_vars() {
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
# KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
if [[ -n "${BASH_REMATCH[4]}" ]]; then
KUBE_GIT_MINOR+="+"
@@ -95,4 +95,4 @@ kube::version::get_version_vars() {
fi
fi
fi
}
}

View File

@@ -69,6 +69,7 @@ done < <(find . -name "*.sh" \
-path ./_\* -o \
-path ./.git\* -o \
-path ./vendor\* -o \
-path ./hack/install_kubebuilder.sh -o \
\( -path ./third_party\* -a -not -path ./third_party/forked\* \) \
\))
@@ -132,4 +133,4 @@ else
fi
# preserve the result
exit $res
exit $res

View File

@@ -94,7 +94,7 @@ var _ = Describe("LoginRecord", func() {
controller = NewLoginRecordController(k8sClient, ksClient, loginRecordInformer, userInformer, time.Hour, 1)
})
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.

View File

@@ -46,7 +46,7 @@ var _ = Describe("Namespace", func() {
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
})
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.

View File

@@ -76,7 +76,7 @@ var _ = Describe("ServiceAccount", func() {
}
})
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.

View File

@@ -128,7 +128,7 @@ func NewController(
// ProvisionerCapability acts as a value source of its relevant StorageClassCapabilities
// so when a PC is created/updated, the corresponding SCCs should be created(if not exists)/updated
// we achive this by simply enqueueing the StorageClasses of the same provisioner
// we achieve this by simply enqueueing the StorageClasses of the same provisioner
// but don't overdo by cascade deleting the SCCs when a PC is deleted
// since the role of PCs is more like a template rather than owner to SCCs

View File

@@ -33,7 +33,7 @@ var _ = Describe("Workspace", func() {
const timeout = time.Second * 30
const interval = time.Second * 1
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.

View File

@@ -47,7 +47,7 @@ var _ = Describe("WorkspaceRole", func() {
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
})
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.

View File

@@ -47,7 +47,7 @@ var _ = Describe("WorkspaceRoleBinding", func() {
Expect(k8sClient.Create(context.Background(), workspace)).Should(Succeed())
})
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.

View File

@@ -61,7 +61,7 @@ var _ = Describe("WorkspaceTemplate", func() {
Expect(err).NotTo(HaveOccurred())
})
// Add Tests for OpenAPI validation (or additonal CRD features) specified in
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.

View File

@@ -116,7 +116,7 @@ func (h *ProjectPipelineHandler) ListPipelines(req *restful.Request, resp *restf
Items: make([]clientDevOps.Pipeline, len(objs.Items)),
}
pipelineMap := make(map[string]int, pipelineList.Total)
for i, _ := range objs.Items {
for i := range objs.Items {
if pipeline, ok := objs.Items[i].(v1alpha3.Pipeline); !ok {
continue
} else {
@@ -139,7 +139,7 @@ func (h *ProjectPipelineHandler) ListPipelines(req *restful.Request, resp *restf
if err != nil {
log.Error(err)
} else {
for i, _ := range res.Items {
for i := range res.Items {
if index, ok := pipelineMap[res.Items[i].Name]; ok {
// keep annotations field of pipelineList
annotations := pipelineList.Items[index].Annotations

View File

@@ -446,9 +446,9 @@ func (h handler) makeQueryOptions(r reqParams, lvl monitoring.Level) (q queryOpt
func exportMetrics(metrics model.Metrics, startTime, endTime time.Time) (*bytes.Buffer, error) {
var resBytes []byte
for i, _ := range metrics.Results {
for i := range metrics.Results {
ret := metrics.Results[i]
for j, _ := range ret.MetricValues {
for j := range ret.MetricValues {
ret.MetricValues[j].TransferToExportedMetricValue()
}
}

View File

@@ -53,7 +53,7 @@ func (h handler) handleApplicationMetersQuery(meters []string, resp *restful.Res
}
appWorkloads := h.getAppWorkloads(aso.NamespaceName, aso.Applications)
for k, _ := range appWorkloads {
for k := range appWorkloads {
opt := monitoring.ApplicationOption{
NamespaceName: aso.NamespaceName,
Application: k,
@@ -111,7 +111,7 @@ func (h handler) handleServiceMetersQuery(meters []string, resp *restful.Respons
}
svcPodsMap := h.mo.GetSerivePodsMap(sso.NamespaceName, sso.Services)
for k, _ := range svcPodsMap {
for k := range svcPodsMap {
opt := monitoring.ServiceOption{
NamespaceName: sso.NamespaceName,
ServiceName: k,
@@ -425,7 +425,7 @@ func (h handler) handleOpenpitrixMetersQuery(meters []string, resp *restful.Resp
opWorkloads := h.getOpWorkloads(oso.Cluster, oso.NamespaceName, oso.Openpitrixs)
for k, _ := range opWorkloads {
for k := range opWorkloads {
opt := monitoring.ApplicationOption{
NamespaceName: oso.NamespaceName,
Application: k,

View File

@@ -27,10 +27,10 @@ func TestGetAlertingRulesStatus(t *testing.T) {
Level: v2alpha1.RuleLevelNamespace,
Custom: true,
ResourceRulesMap: map[string]*ResourceRuleCollection{
"custom-alerting-rule-jqbgn": &ResourceRuleCollection{
GroupSet: map[string]struct{}{"alerting.custom.defaults": struct{}{}},
"custom-alerting-rule-jqbgn": {
GroupSet: map[string]struct{}{"alerting.custom.defaults": {}},
NameRules: map[string][]*ResourceRuleItem{
"ca7f09e76954e67c": []*ResourceRuleItem{{
"ca7f09e76954e67c": {{
ResourceName: "custom-alerting-rule-jqbgn",
RuleWithGroup: RuleWithGroup{
Group: "alerting.custom.defaults",

View File

@@ -291,7 +291,7 @@ func (d devopsOperator) ListPipelineObj(projectName string, filterFunc PipelineF
}
var result []interface{}
for i, _ := range data {
for i := range data {
if filterFunc != nil && !filterFunc(data[i]) {
continue
}

View File

@@ -459,7 +459,7 @@ func (mo monitoringOperator) GetNamedMetersOverTime(meters []string, start, end
ress := mo.prometheus.GetNamedMetersOverTime(meters, start, end, time.Hour, opts)
sMap := generateScalingFactorMap(step)
for i, _ := range ress {
for i := range ress {
ress[i].MetricData = updateMetricStatData(ress[i], sMap, priceInfo)
}
@@ -470,7 +470,7 @@ func (mo monitoringOperator) GetNamedMeters(meters []string, time time.Time, opt
metersPerHour := mo.getNamedMetersWithHourInterval(meters, time, opt)
for metricIndex, _ := range metersPerHour.Results {
for metricIndex := range metersPerHour.Results {
res := metersPerHour.Results[metricIndex]

View File

@@ -203,7 +203,7 @@ func updateMetricStatData(metric monitoring.Metric, scalingMap map[string]float6
metricData := metric.MetricData
for index, metricValue := range metricData.MetricValues {
// calulate min, max, avg value first, then squash points with factor
// calculate min, max, avg value first, then squash points with factor
if metricData.MetricType == monitoring.MetricTypeMatrix {
metricData.MetricValues[index].MinValue = getMinPointValue(metricValue.Series)
metricData.MetricValues[index].MaxValue = getMaxPointValue(metricValue.Series)

View File

@@ -47,7 +47,7 @@ func TestOpenPitrixApp(t *testing.T) {
// validate package
validateResp, err := appOperator.ValidatePackage(validateReq)
if err != nil || validateResp.Error != "" {
klog.Errorf("validate pacakge failed, error: %s", err)
klog.Errorf("validate package failed, error: %s", err)
t.FailNow()
}
@@ -58,7 +58,7 @@ func TestOpenPitrixApp(t *testing.T) {
// validate corrupted package
validateResp, err = appOperator.ValidatePackage(validateReq)
if err == nil {
klog.Errorf("validate pacakge failed, error: %s", err)
klog.Errorf("validate package failed, error: %s", err)
t.FailNow()
}

View File

@@ -607,7 +607,7 @@ func (t *tenantOperator) processApplicationMetersQuery(meters []string, q QueryO
}
componentsMap := t.mo.GetAppWorkloads(aso.NamespaceName, aso.Applications)
for k, _ := range componentsMap {
for k := range componentsMap {
opt := monitoring.ApplicationOption{
NamespaceName: aso.NamespaceName,
Application: k,
@@ -655,7 +655,7 @@ func (t *tenantOperator) processServiceMetersQuery(meters []string, q QueryOptio
}
svcPodsMap := t.mo.GetSerivePodsMap(sso.NamespaceName, sso.Services)
for k, _ := range svcPodsMap {
for k := range svcPodsMap {
opt := monitoring.ServiceOption{
NamespaceName: sso.NamespaceName,
ServiceName: k,

View File

@@ -56,7 +56,7 @@ func ParsePaging(req *restful.Request) (limit, offset int) {
}
offset = (page - 1) * limit
// use the explict offset
// use the explicit offset
if start := req.QueryParameter("start"); start != "" {
offset = AtoiOrDefault(start, offset)
}

View File

@@ -115,7 +115,7 @@ func (p *Pipeline) ListPipelines() (*devops.PipelineList, error) {
}
klog.Errorf("API '%s' request response code is '%d'", p.Path, jErr.Code)
} else {
err = fmt.Errorf("unknow errors happend when communicate with Jenkins")
err = fmt.Errorf("unknow errors happened when communicate with Jenkins")
}
return nil, err
}

View File

@@ -102,7 +102,7 @@ type Pipeline struct {
func UnmarshalPipeline(total int, data []byte) (pipelineList *PipelineList, err error) {
pipelineList = &PipelineList{Total: total}
pipelineList.Items = make([]Pipeline, total)
for i, _ := range pipelineList.Items {
for i := range pipelineList.Items {
pipelineList.Items[i].WeatherScore = 100
}
err = json.Unmarshal(data, &pipelineList.Items)

View File

@@ -175,7 +175,7 @@ func (m metricsServer) getPodMetricsFromMetricsAPI(edgePods map[string]bool, opt
// handle cases with when edgePodName contains namespaceName
if opts.NamespacedResourcesFilter != "" {
for p, _ := range edgePods {
for p := range edgePods {
splitedPodName := strings.Split(p, "/")
ns, p = strings.ReplaceAll(splitedPodName[0], " ", ""), strings.ReplaceAll(splitedPodName[1], " ", "")
pm := mc.PodMetricses(ns)
@@ -344,7 +344,7 @@ func (m metricsServer) GetNodeLevelNamedMetrics(metrics []string, ts time.Time,
}
status := make(map[string]v1.NodeStatus)
for n, _ := range edgeNodeNamesFiltered {
for n := range edgeNodeNamesFiltered {
status[n] = edgeNodes[n].Status
}
@@ -579,7 +579,7 @@ func (m metricsServer) GetNodeLevelNamedMetricsOverTime(metrics []string, start,
}
status := make(map[string]v1.NodeStatus)
for n, _ := range edgeNodeNamesFiltered {
for n := range edgeNodeNamesFiltered {
status[n] = edgeNodes[n].Status
}

View File

@@ -139,7 +139,7 @@ var (
Timestamp: metav1.Time{Time: metricsTime},
Window: metav1.Duration{Duration: time.Minute},
Containers: []metricsV1beta1.ContainerMetrics{
metricsV1beta1.ContainerMetrics{
{
Name: "containers-1",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
@@ -163,7 +163,7 @@ var (
Timestamp: metav1.Time{Time: metricsTime},
Window: metav1.Duration{Duration: time.Minute},
Containers: []metricsV1beta1.ContainerMetrics{
metricsV1beta1.ContainerMetrics{
{
Name: "containers-1",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
@@ -174,7 +174,7 @@ var (
resource.DecimalSI),
},
},
metricsV1beta1.ContainerMetrics{
{
Name: "containers-2",
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(

View File

@@ -237,7 +237,7 @@ func (c IPAMClient) autoAssign(handleID string, attrs map[string]string, request
func (c IPAMClient) assignFromExistingBlock(block *v1alpha1.IPAMBlock, handleID string, attrs map[string]string) (*cnet.IPNet, error) {
ips := block.AutoAssign(1, handleID, attrs)
if len(ips) == 0 {
return nil, fmt.Errorf("block %s has no availabe IP", block.BlockName())
return nil, fmt.Errorf("block %s has no available IP", block.BlockName())
}
err := c.incrementHandle(handleID, block, 1)
@@ -267,7 +267,7 @@ func (c IPAMClient) ReleaseByHandle(handleID string) error {
return err
}
for blockStr, _ := range handle.Spec.Block {
for blockStr := range handle.Spec.Block {
blockName := v1alpha1.ConvertToBlockName(blockStr)
if err := c.releaseByHandle(handleID, blockName); err != nil {
return err

View File

@@ -299,8 +299,8 @@ func (c *helmWrapper) setupPostRenderEnvironment() error {
kustomizationConfig := types.Kustomization{
Resources: []string{"./.local-helm-output.yaml"},
CommonAnnotations: c.annotations, // add extra annotations to output
Labels: []types.Label{types.Label{Pairs: c.labels}}, // Labels to add to all objects but not selectors.
CommonAnnotations: c.annotations, // add extra annotations to output
Labels: []types.Label{{Pairs: c.labels}}, // Labels to add to all objects but not selectors.
}
err = yaml.NewEncoder(kustomization).Encode(kustomizationConfig)

View File

@@ -14823,7 +14823,7 @@ func schema_pkg_apis_devops_v1alpha1_S2iConfig(ref common.ReferenceCallback) com
},
"addHost": {
SchemaProps: spec.SchemaProps{
Description: "AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,muliple hosts can be added by using multiple --add-host",
Description: "AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,multiple hosts can be added by using multiple --add-host",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{

View File

@@ -392,7 +392,7 @@ type S2iConfig struct {
// This url can be a reference within the builder image if the scheme is specified as image://
ImageScriptsURL string `json:"imageScriptsUrl,omitempty"`
// AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,muliple hosts can be added by using multiple --add-host
// AddHost Add a line to /etc/hosts for test purpose or private use in LAN. Its format is host:IP,multiple hosts can be added by using multiple --add-host
AddHost []string `json:"addHost,omitempty"`
// Export Push the result image to specify image registry in tag

View File

@@ -2,14 +2,14 @@
set -e
workspace=`pwd`
tag=`git rev-parse --short HEAD`
workspace=$(pwd)
tag=$(git rev-parse --short HEAD)
IMG=kubespheredev/ks-network:$tag
DEST=/tmp/manager.yaml
TEST_NS=network-test-$tag
SKIP_BUILD=no
STORE_MODE=etcd
MODE=test
MODE="test"
export TEST_NAMESPACE=$TEST_NS
export YAML_PATH=$DEST
@@ -46,7 +46,7 @@ case $key in
shift # past value
;;
--default)
DEFAULT=YES
# DEFAULT=YES
shift # past argument
;;
*) # unknown option
@@ -58,27 +58,27 @@ done
if [ $SKIP_BUILD == "no" ]; then
echo "Building binary"
hack/gobuild.sh cmd/ks-network
docker build -f build/ks-network/Dockerfile -t $IMG bin/cmd
docker build -f build/ks-network/Dockerfile -t "$IMG" bin/cmd
echo "Push images"
docker push $IMG
docker push "$IMG"
fi
kustomize_dir="./kustomize/network/calico-${STORE_MODE}"
if [ "$(uname)" == "Darwin" ]; then
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/kustomization.yaml
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/patch_role_binding.yaml
sed -i '' -e 's@image: .*@image: '"${IMG}"'@' $kustomize_dir/patch_image_name.yaml
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/kustomization.yaml
sed -i '' -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/patch_role_binding.yaml
sed -i '' -e 's@image: .*@image: '"${IMG}"'@' "$kustomize_dir"/patch_image_name.yaml
else
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/patch_role_binding.yaml
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' $kustomize_dir/kustomization.yaml
sed -i -e 's@image: .*@image: '"${IMG}"'@' $kustomize_dir/patch_image_name.yaml
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/patch_role_binding.yaml
sed -i -e 's/namespace: .*/namespace: '"${TEST_NS}"'/' "$kustomize_dir"/kustomization.yaml
sed -i -e 's@image: .*@image: '"${IMG}"'@' "$kustomize_dir"/patch_image_name.yaml
fi
kustomize build $kustomize_dir -o $DEST
if [ $MODE == "test" ]; then
kustomize build "$kustomize_dir" -o $DEST
if [ "$MODE" == "test" ]; then
ginkgo -v ./test/e2e/...
elif [ $MODE == "debug" ]; then
kubectl create ns $TEST_NS --dry-run -o yaml | kubectl apply -f -
elif [ "$MODE" == "debug" ]; then
kubectl create ns "$TEST_NS" --dry-run -o yaml | kubectl apply -f -
kubectl apply -f $DEST
fi