diff --git a/cmd/ks-apiserver/app/options/options.go b/cmd/ks-apiserver/app/options/options.go index 1267a09f1..bb09b5834 100644 --- a/cmd/ks-apiserver/app/options/options.go +++ b/cmd/ks-apiserver/app/options/options.go @@ -103,8 +103,13 @@ func (s *ServerRunOptions) NewAPIServer(stopCh <-chan struct{}) (*apiserver.APIS informerFactory := informers.NewInformerFactories(kubernetesClient.Kubernetes(), kubernetesClient.KubeSphere(), kubernetesClient.Istio(), kubernetesClient.Application()) apiServer.InformerFactory = informerFactory - monitoringClient := prometheus.NewPrometheus(s.MonitoringOptions) - apiServer.MonitoringClient = monitoringClient + if s.MonitoringOptions.Endpoint != "" { + monitoringClient, err := prometheus.NewPrometheus(s.MonitoringOptions) + if err != nil { + return nil, err + } + apiServer.MonitoringClient = monitoringClient + } if s.LoggingOptions.Host != "" { loggingClient, err := esclient.NewElasticsearch(s.LoggingOptions) diff --git a/go.mod b/go.mod index 8def09c3c..4a41dfc01 100644 --- a/go.mod +++ b/go.mod @@ -68,6 +68,7 @@ require ( github.com/openshift/api v0.0.0-20180801171038-322a19404e37 // indirect github.com/pkg/errors v0.8.1 github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce + github.com/prometheus/client_golang v0.9.3 github.com/prometheus/common v0.4.0 github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009 github.com/speps/go-hashids v2.0.0+incompatible @@ -77,7 +78,7 @@ require ( github.com/stretchr/testify v1.4.0 github.com/xanzy/ssh-agent v0.2.1 // indirect golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 - golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 + golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 google.golang.org/grpc v1.23.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/go-playground/validator.v9 v9.29.1 // indirect @@ -86,12 +87,12 @@ require ( gopkg.in/yaml.v2 v2.2.4 istio.io/api v0.0.0-20191111210003-35e06ef8d838 istio.io/client-go v0.0.0-20191113122552-9bd0ba57c3d2 - k8s.io/api v0.18.0 + k8s.io/api v0.0.0-20191114100352-16d7abae0d2a k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 - k8s.io/apimachinery v0.18.0 + k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 - k8s.io/code-generator v0.18.0 + k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e // indirect k8s.io/klog v1.0.0 diff --git a/go.sum b/go.sum index d39f730cd..e5ab35c79 100644 --- a/go.sum +++ b/go.sum @@ -315,9 +315,6 @@ github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVo github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/openshift/api v0.0.0-20180801171038-322a19404e37 h1:05irGU4HK4IauGGDbsk+ZHrm1wOzMLYjMlfaiqMrBYc= github.com/openshift/api v0.0.0-20180801171038-322a19404e37/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= -github.com/openshift/api v0.0.0-20200331152225-585af27e34fd h1:f4iPC9iCf1an7qEWpEFvp/swsM79vvBRsJ2twU4D30s= -github.com/openshift/api v0.0.0-20200331152225-585af27e34fd/go.mod h1:RKMJ5CBnljLfnej+BJ/xnOWc3kZDvJUaIAEq2oKSPtE= -github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA= diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 0c6e1c9d3..f4b52ddd1 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -28,7 +28,7 @@ import ( configv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/config/v1alpha2" iamv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/iam/v1alpha2" loggingv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/logging/v1alpha2" - monitoringv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha2" + monitoringv1alpha3 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha3" "kubesphere.io/kubesphere/pkg/kapis/oauth" openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1" operationsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/operations/v1alpha2" @@ -138,7 +138,7 @@ func (s *APIServer) installKubeSphereAPIs() { // Need to refactor devops api registration, too much dependencies //urlruntime.Must(devopsv1alpha2.AddToContainer(s.container, s.DevopsClient, s.DBClient.Database(), nil, s.KubernetesClient.KubeSphere(), s.InformerFactory.KubeSphereSharedInformerFactory(), s.S3Client)) urlruntime.Must(loggingv1alpha2.AddToContainer(s.container, s.KubernetesClient, s.LoggingClient)) - urlruntime.Must(monitoringv1alpha2.AddToContainer(s.container, s.KubernetesClient, s.MonitoringClient)) + urlruntime.Must(monitoringv1alpha3.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.MonitoringClient)) urlruntime.Must(openpitrixv1.AddToContainer(s.container, s.InformerFactory, s.OpenpitrixClient)) urlruntime.Must(operationsv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes())) urlruntime.Must(resourcesv1alpha2.AddToContainer(s.container, s.KubernetesClient.Kubernetes(), s.InformerFactory)) diff --git a/pkg/kapis/monitoring/v1alpha2/handler.go b/pkg/kapis/monitoring/v1alpha2/handler.go deleted file mode 100644 index 04f266ebe..000000000 --- a/pkg/kapis/monitoring/v1alpha2/handler.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - - Copyright 2019 The KubeSphere Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ - -package v1alpha2 - -import ( - "github.com/emicklei/go-restful" - "kubesphere.io/kubesphere/pkg/api" - "kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2" - model "kubesphere.io/kubesphere/pkg/models/monitoring" - "kubesphere.io/kubesphere/pkg/simple/client/k8s" - "kubesphere.io/kubesphere/pkg/simple/client/monitoring" -) - -type handler struct { - k k8s.Client - mo model.MonitoringOperator -} - -func newHandler(k k8s.Client, m monitoring.Interface) *handler { - return &handler{k, model.NewMonitoringOperator(m)} -} - -func (h handler) handleClusterMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelCluster) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handleNodeMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelNode) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handleWorkspaceMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelWorkspace) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handleNamespaceMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelNamespace) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handleWorkloadMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelWorkload) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handlePodMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelPod) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handleContainerMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelContainer) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handlePVCMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelPVC) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handleComponentMetricsQuery(req *restful.Request, resp *restful.Response) { - p, err := h.parseRequestParams(req, monitoring.LevelComponent) - if err != nil { - api.HandleBadRequest(resp, nil, err) - return - } - h.handleNamedMetricsQuery(resp, p) -} - -func (h handler) handleNamedMetricsQuery(resp *restful.Response, p params) { - var res v1alpha2.APIResponse - var err error - - if p.isRangeQuery() { - res, err = h.mo.GetNamedMetricsOverTime(p.start, p.end, p.step, p.option) - if err != nil { - api.HandleInternalError(resp, nil, err) - return - } - } else { - res, err = h.mo.GetNamedMetrics(p.time, p.option) - if err != nil { - api.HandleInternalError(resp, nil, err) - return - } - - if p.shouldSort() { - var rows int - res, rows = h.mo.SortMetrics(res, p.target, p.order, p.identifier) - res = h.mo.PageMetrics(res, p.page, p.limit, rows) - } - } - - resp.WriteAsJson(res) -} diff --git a/pkg/kapis/monitoring/v1alpha2/helper.go b/pkg/kapis/monitoring/v1alpha2/helper.go deleted file mode 100644 index 1896c5e0b..000000000 --- a/pkg/kapis/monitoring/v1alpha2/helper.go +++ /dev/null @@ -1,217 +0,0 @@ -package v1alpha2 - -import ( - "fmt" - "github.com/emicklei/go-restful" - "github.com/pkg/errors" - corev1 "k8s.io/apimachinery/pkg/apis/meta/v1" - model "kubesphere.io/kubesphere/pkg/models/monitoring" - "kubesphere.io/kubesphere/pkg/simple/client/monitoring" - "strconv" - "time" -) - -const ( - DefaultStep = 10 * time.Minute - DefaultFilter = ".*" - DefaultOrder = model.OrderDescending - DefaultPage = 1 - DefaultLimit = 5 -) - -type params struct { - time time.Time - start, end time.Time - step time.Duration - - target string - identifier string - order string - page int - limit int - - option monitoring.QueryOption -} - -func (p params) isRangeQuery() bool { - return !p.time.IsZero() -} - -func (p params) shouldSort() bool { - return p.target != "" -} - -func (h handler) parseRequestParams(req *restful.Request, lvl monitoring.MonitoringLevel) (params, error) { - timestamp := req.QueryParameter("time") - start := req.QueryParameter("start") - end := req.QueryParameter("end") - step := req.QueryParameter("step") - target := req.QueryParameter("sort_metric") - order := req.QueryParameter("sort_type") - page := req.QueryParameter("page") - limit := req.QueryParameter("limit") - metricFilter := req.QueryParameter("metrics_filter") - resourceFilter := req.QueryParameter("resources_filter") - nodeName := req.PathParameter("node") - workspaceName := req.PathParameter("workspace") - namespaceName := req.PathParameter("namespace") - workloadKind := req.PathParameter("kind") - workloadName := req.PathParameter("workload") - podName := req.PathParameter("pod") - containerName := req.PathParameter("container") - pvcName := req.PathParameter("pvc") - storageClassName := req.PathParameter("storageclass") - componentType := req.PathParameter("component") - - var p params - var err error - if start != "" && end != "" { - p.start, err = time.Parse(time.RFC3339, start) - if err != nil { - return p, err - } - p.end, err = time.Parse(time.RFC3339, end) - if err != nil { - return p, err - } - if step == "" { - p.step = DefaultStep - } else { - p.step, err = time.ParseDuration(step) - if err != nil { - return p, err - } - } - } else if start == "" && end == "" { - if timestamp == "" { - p.time = time.Now() - } else { - p.time, err = time.Parse(time.RFC3339, req.QueryParameter("time")) - if err != nil { - return p, err - } - } - } else { - return p, errors.Errorf("'time' and the combination of 'start' and 'end' are mutually exclusive.") - } - - // hide metrics from a deleted namespace having the same name - namespace := req.QueryParameter("namespace") - if req.QueryParameter("namespace") != "" { - ns, err := h.k.Kubernetes().CoreV1().Namespaces().Get(namespace, corev1.GetOptions{}) - if err != nil { - return p, err - } - - cts := ns.CreationTimestamp.Time - if p.start.Before(cts) { - p.start = cts - } - if p.end.Before(cts) { - return p, errors.Errorf("End timestamp must not be before namespace creation time.") - } - } - - if resourceFilter == "" { - resourceFilter = DefaultFilter - } - - if metricFilter == "" { - metricFilter = DefaultFilter - } - if componentType != "" { - metricFilter = fmt.Sprintf("/^(?=.*%s)(?=.*%s)/s", componentType, metricFilter) - } - - // should sort - if target != "" { - p.page = DefaultPage - p.limit = DefaultLimit - if order != model.OrderAscending { - p.order = DefaultOrder - } - if page != "" { - p.page, err = strconv.Atoi(req.QueryParameter("page")) - if err != nil || p.page <= 0 { - return p, errors.Errorf("Invalid parameter 'page'.") - } - } - if limit != "" { - p.limit, err = strconv.Atoi(req.QueryParameter("limit")) - if err != nil || p.limit <= 0 { - return p, errors.Errorf("Invalid parameter 'limit'.") - } - } - } - - switch lvl { - case monitoring.LevelCluster: - p.option = monitoring.ClusterOption{MetricFilter: metricFilter} - case monitoring.LevelNode: - p.identifier = model.IdentifierNode - p.option = monitoring.NodeOption{ - MetricFilter: metricFilter, - ResourceFilter: resourceFilter, - NodeName: nodeName, - } - case monitoring.LevelWorkspace: - p.identifier = model.IdentifierWorkspace - p.option = monitoring.WorkspaceOption{ - MetricFilter: metricFilter, - ResourceFilter: resourceFilter, - WorkspaceName: workspaceName, - } - case monitoring.LevelNamespace: - p.identifier = model.IdentifierNamespace - p.option = monitoring.NamespaceOption{ - MetricFilter: metricFilter, - ResourceFilter: resourceFilter, - WorkspaceName: workspaceName, - NamespaceName: namespaceName, - } - case monitoring.LevelWorkload: - p.identifier = model.IdentifierWorkload - p.option = monitoring.WorkloadOption{ - MetricFilter: metricFilter, - ResourceFilter: resourceFilter, - NamespaceName: namespaceName, - WorkloadKind: workloadKind, - WorkloadName: workloadName, - } - case monitoring.LevelPod: - p.identifier = model.IdentifierPod - p.option = monitoring.PodOption{ - MetricFilter: metricFilter, - ResourceFilter: resourceFilter, - NodeName: nodeName, - NamespaceName: namespaceName, - WorkloadKind: workloadKind, - WorkloadName: workloadName, - PodName: podName, - } - case monitoring.LevelContainer: - p.identifier = model.IdentifierContainer - p.option = monitoring.ContainerOption{ - MetricFilter: metricFilter, - ResourceFilter: resourceFilter, - NamespaceName: namespaceName, - PodName: podName, - ContainerName: containerName, - } - case monitoring.LevelPVC: - p.identifier = model.IdentifierPVC - p.option = monitoring.PVCOption{ - MetricFilter: metricFilter, - ResourceFilter: resourceFilter, - NamespaceName: namespaceName, - StorageClassName: storageClassName, - PersistentVolumeClaimName: pvcName, - } - case monitoring.LevelComponent: - p.option = monitoring.ComponentOption{ - MetricFilter: metricFilter, - } - } - - return p, nil -} diff --git a/pkg/kapis/monitoring/v1alpha3/handler.go b/pkg/kapis/monitoring/v1alpha3/handler.go new file mode 100644 index 000000000..9f7220876 --- /dev/null +++ b/pkg/kapis/monitoring/v1alpha3/handler.go @@ -0,0 +1,194 @@ +/* + + Copyright 2019 The KubeSphere Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +*/ + +package v1alpha3 + +import ( + "github.com/emicklei/go-restful" + "k8s.io/client-go/kubernetes" + "kubesphere.io/kubesphere/pkg/api" + model "kubesphere.io/kubesphere/pkg/models/monitoring" + "kubesphere.io/kubesphere/pkg/simple/client/monitoring" + "regexp" +) + +type handler struct { + k kubernetes.Interface + mo model.MonitoringOperator +} + +func newHandler(k kubernetes.Interface, m monitoring.Interface) *handler { + return &handler{k, model.NewMonitoringOperator(m)} +} + +func (h handler) handleClusterMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelCluster) + if err != nil { + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handleNodeMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelNode) + if err != nil { + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handleWorkspaceMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelWorkspace) + if err != nil { + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handleNamespaceMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelNamespace) + if err != nil { + if err.Error() == ErrNoHit { + res := handleNoHit(opt.namedMetrics) + resp.WriteAsJson(res) + return + } + + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handleWorkloadMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelWorkload) + if err != nil { + if err.Error() == ErrNoHit { + res := handleNoHit(opt.namedMetrics) + resp.WriteAsJson(res) + return + } + + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handlePodMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelPod) + if err != nil { + if err.Error() == ErrNoHit { + res := handleNoHit(opt.namedMetrics) + resp.WriteAsJson(res) + return + } + + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handleContainerMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelContainer) + if err != nil { + if err.Error() == ErrNoHit { + res := handleNoHit(opt.namedMetrics) + resp.WriteAsJson(res) + return + } + + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handlePVCMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelPVC) + if err != nil { + if err.Error() == ErrNoHit { + res := handleNoHit(opt.namedMetrics) + resp.WriteAsJson(res) + return + } + + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func (h handler) handleComponentMetricsQuery(req *restful.Request, resp *restful.Response) { + params := parseRequestParams(req) + opt, err := h.makeQueryOptions(params, monitoring.LevelComponent) + if err != nil { + api.HandleBadRequest(resp, nil, err) + return + } + h.handleNamedMetricsQuery(resp, opt) +} + +func handleNoHit(namedMetrics []string) model.Metrics { + var res model.Metrics + for _, metic := range namedMetrics { + res.Results = append(res.Results, monitoring.Metric{ + MetricName: metic, + MetricData: monitoring.MetricData{}, + }) + } + return res +} + +func (h handler) handleNamedMetricsQuery(resp *restful.Response, q queryOptions) { + var res model.Metrics + + var metrics []string + for _, metric := range q.namedMetrics { + ok, _ := regexp.MatchString(q.metricFilter, metric) + if ok { + metrics = append(metrics, metric) + } + } + if len(metrics) == 0 { + resp.WriteAsJson(res) + return + } + + if q.isRangeQuery() { + res = h.mo.GetNamedMetricsOverTime(metrics, q.start, q.end, q.step, q.option) + } else { + res = h.mo.GetNamedMetrics(metrics, q.time, q.option) + if q.shouldSort() { + res = *res.Sort(q.target, q.order, q.identifier).Page(q.page, q.limit) + } + } + resp.WriteAsJson(res) +} diff --git a/pkg/kapis/monitoring/v1alpha3/helper.go b/pkg/kapis/monitoring/v1alpha3/helper.go new file mode 100644 index 000000000..d5442b7e0 --- /dev/null +++ b/pkg/kapis/monitoring/v1alpha3/helper.go @@ -0,0 +1,268 @@ +package v1alpha3 + +import ( + "github.com/emicklei/go-restful" + "github.com/pkg/errors" + corev1 "k8s.io/apimachinery/pkg/apis/meta/v1" + model "kubesphere.io/kubesphere/pkg/models/monitoring" + "kubesphere.io/kubesphere/pkg/simple/client/monitoring" + "strconv" + "time" +) + +const ( + DefaultStep = 10 * time.Minute + DefaultFilter = ".*" + DefaultOrder = model.OrderDescending + DefaultPage = 1 + DefaultLimit = 5 + + ComponentEtcd = "etcd" + ComponentAPIServer = "apiserver" + ComponentScheduler = "scheduler" + + ErrNoHit = "'end' must be after the namespace creation time." + ErrParamConflict = "'time' and the combination of 'start' and 'end' are mutually exclusive." + ErrInvalidStartEnd = "'start' must be before 'end'." + ErrInvalidPage = "Invalid parameter 'page'." + ErrInvalidLimit = "Invalid parameter 'limit'." +) + +type reqParams struct { + time string + start string + end string + step string + target string + order string + page string + limit string + metricFilter string + resourceFilter string + nodeName string + workspaceName string + namespaceName string + workloadKind string + workloadName string + podName string + containerName string + pvcName string + storageClassName string + componentType string +} + +type queryOptions struct { + metricFilter string + namedMetrics []string + + start time.Time + end time.Time + time time.Time + step time.Duration + + target string + identifier string + order string + page int + limit int + + option monitoring.QueryOption +} + +func (q queryOptions) isRangeQuery() bool { + return !q.time.IsZero() +} + +func (q queryOptions) shouldSort() bool { + return q.target != "" && q.identifier != "" +} + +func parseRequestParams(req *restful.Request) reqParams { + var r reqParams + r.time = req.QueryParameter("time") + r.start = req.QueryParameter("start") + r.end = req.QueryParameter("end") + r.step = req.QueryParameter("step") + r.target = req.QueryParameter("sort_metric") + r.order = req.QueryParameter("sort_type") + r.page = req.QueryParameter("page") + r.limit = req.QueryParameter("limit") + r.metricFilter = req.QueryParameter("metrics_filter") + r.resourceFilter = req.QueryParameter("resources_filter") + r.nodeName = req.PathParameter("node") + r.workspaceName = req.PathParameter("workspace") + r.namespaceName = req.PathParameter("namespace") + r.workloadKind = req.PathParameter("kind") + r.workloadName = req.PathParameter("workload") + r.podName = req.PathParameter("pod") + r.containerName = req.PathParameter("container") + r.pvcName = req.PathParameter("pvc") + r.storageClassName = req.PathParameter("storageclass") + r.componentType = req.PathParameter("component") + return r +} + +func (h handler) makeQueryOptions(r reqParams, lvl monitoring.Level) (q queryOptions, err error) { + if r.resourceFilter == "" { + r.resourceFilter = DefaultFilter + } + + q.metricFilter = r.metricFilter + if r.metricFilter == "" { + q.metricFilter = DefaultFilter + } + + switch lvl { + case monitoring.LevelCluster: + q.option = monitoring.ClusterOption{} + q.namedMetrics = model.ClusterMetrics + case monitoring.LevelNode: + q.identifier = model.IdentifierNode + q.namedMetrics = model.NodeMetrics + q.option = monitoring.NodeOption{ + ResourceFilter: r.resourceFilter, + NodeName: r.nodeName, + } + case monitoring.LevelWorkspace: + q.identifier = model.IdentifierWorkspace + q.namedMetrics = model.WorkspaceMetrics + q.option = monitoring.WorkspaceOption{ + ResourceFilter: r.resourceFilter, + WorkspaceName: r.workspaceName, + } + case monitoring.LevelNamespace: + q.identifier = model.IdentifierNamespace + q.namedMetrics = model.NamespaceMetrics + q.option = monitoring.NamespaceOption{ + ResourceFilter: r.resourceFilter, + WorkspaceName: r.workspaceName, + NamespaceName: r.namespaceName, + } + case monitoring.LevelWorkload: + q.identifier = model.IdentifierWorkload + q.namedMetrics = model.WorkloadMetrics + q.option = monitoring.WorkloadOption{ + ResourceFilter: r.resourceFilter, + NamespaceName: r.namespaceName, + WorkloadKind: r.workloadKind, + } + case monitoring.LevelPod: + q.identifier = model.IdentifierPod + q.namedMetrics = model.PodMetrics + q.option = monitoring.PodOption{ + ResourceFilter: r.resourceFilter, + NodeName: r.nodeName, + NamespaceName: r.namespaceName, + WorkloadKind: r.workloadKind, + WorkloadName: r.workloadName, + PodName: r.podName, + } + case monitoring.LevelContainer: + q.identifier = model.IdentifierContainer + q.namedMetrics = model.ContainerMetrics + q.option = monitoring.ContainerOption{ + ResourceFilter: r.resourceFilter, + NamespaceName: r.namespaceName, + PodName: r.podName, + ContainerName: r.containerName, + } + case monitoring.LevelPVC: + q.identifier = model.IdentifierPVC + q.namedMetrics = model.PVCMetrics + q.option = monitoring.PVCOption{ + ResourceFilter: r.resourceFilter, + NamespaceName: r.namespaceName, + StorageClassName: r.storageClassName, + PersistentVolumeClaimName: r.pvcName, + } + case monitoring.LevelComponent: + q.option = monitoring.ComponentOption{} + switch r.componentType { + case ComponentEtcd: + q.namedMetrics = model.EtcdMetrics + case ComponentAPIServer: + q.namedMetrics = model.APIServerMetrics + case ComponentScheduler: + q.namedMetrics = model.SchedulerMetrics + } + } + + // Parse time params + if r.start != "" && r.end != "" { + startInt, err := strconv.ParseInt(r.start, 10, 64) + if err != nil { + return q, err + } + q.start = time.Unix(startInt, 0) + + endInt, err := strconv.ParseInt(r.end, 10, 64) + if err != nil { + return q, err + } + q.end = time.Unix(endInt, 0) + + if r.step == "" { + q.step = DefaultStep + } else { + q.step, err = time.ParseDuration(r.step) + if err != nil { + return q, err + } + } + + if q.start.After(q.end) { + return q, errors.New(ErrInvalidStartEnd) + } + } else if r.start == "" && r.end == "" { + if r.time == "" { + q.time = time.Now() + } else { + timeInt, err := strconv.ParseInt(r.time, 10, 64) + if err != nil { + return q, err + } + q.time = time.Unix(timeInt, 0) + } + } else { + return q, errors.Errorf(ErrParamConflict) + } + + // Ensure query start time to be after the namespace creation time + if r.namespaceName != "" { + ns, err := h.k.CoreV1().Namespaces().Get(r.namespaceName, corev1.GetOptions{}) + if err != nil { + return q, err + } + + cts := ns.CreationTimestamp.Time + if q.start.Before(cts) { + q.start = cts + } + if q.end.Before(cts) { + return q, errors.New(ErrNoHit) + } + } + + // Parse sorting and paging params + if r.target != "" { + q.page = DefaultPage + q.limit = DefaultLimit + if q.order != model.OrderAscending { + r.order = DefaultOrder + } + if r.page != "" { + q.page, err = strconv.Atoi(r.page) + if err != nil || q.page <= 0 { + return q, errors.New(ErrInvalidPage) + } + } + if r.limit != "" { + q.limit, err = strconv.Atoi(r.limit) + if err != nil || q.limit <= 0 { + return q, errors.New(ErrInvalidLimit) + } + } + } + + return q, nil +} diff --git a/pkg/kapis/monitoring/v1alpha3/helper_test.go b/pkg/kapis/monitoring/v1alpha3/helper_test.go new file mode 100644 index 000000000..46f72dc1b --- /dev/null +++ b/pkg/kapis/monitoring/v1alpha3/helper_test.go @@ -0,0 +1,148 @@ +package v1alpha3 + +import ( + "fmt" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + model "kubesphere.io/kubesphere/pkg/models/monitoring" + "kubesphere.io/kubesphere/pkg/simple/client/monitoring" + "reflect" + "testing" + "time" +) + +func TestParseRequestParams(t *testing.T) { + tests := []struct { + params reqParams + lvl monitoring.Level + namespace corev1.Namespace + expected queryOptions + expectedErr bool + }{ + { + params: reqParams{ + time: "abcdef", + }, + lvl: monitoring.LevelCluster, + expectedErr: true, + }, + { + params: reqParams{ + time: "1585831995", + }, + lvl: monitoring.LevelCluster, + expected: queryOptions{ + time: time.Unix(1585831995, 0), + metricFilter: ".*", + namedMetrics: model.ClusterMetrics, + option: monitoring.ClusterOption{}, + }, + expectedErr: false, + }, + { + params: reqParams{ + start: "1585830000", + end: "1585839999", + step: "1m", + namespaceName: "default", + }, + lvl: monitoring.LevelNamespace, + namespace: corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + CreationTimestamp: metav1.Time{ + Time: time.Unix(1585836666, 0), + }, + }, + }, + expected: queryOptions{ + start: time.Unix(1585836666, 0), + end: time.Unix(1585839999, 0), + step: time.Minute, + identifier: model.IdentifierNamespace, + metricFilter: ".*", + namedMetrics: model.NamespaceMetrics, + option: monitoring.NamespaceOption{ + ResourceFilter: ".*", + NamespaceName: "default", + }, + }, + expectedErr: false, + }, + { + params: reqParams{ + start: "1585830000", + end: "1585839999", + step: "1m", + namespaceName: "default", + }, + lvl: monitoring.LevelNamespace, + namespace: corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + CreationTimestamp: metav1.Time{ + Time: time.Unix(1589999999, 0), + }, + }, + }, + expectedErr: true, + }, + { + params: reqParams{ + start: "1585830000", + end: "1585839999", + step: "1m", + namespaceName: "non-exist", + }, + lvl: monitoring.LevelNamespace, + namespace: corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + CreationTimestamp: metav1.Time{ + Time: time.Unix(1589999999, 0), + }, + }, + }, + expectedErr: true, + }, + { + params: reqParams{ + time: "1585830000", + componentType: "etcd", + metricFilter: "etcd_server_list", + }, + lvl: monitoring.LevelComponent, + expected: queryOptions{ + time: time.Unix(1585830000, 0), + metricFilter: "etcd_server_list", + namedMetrics: model.EtcdMetrics, + option: monitoring.ComponentOption{}, + }, + expectedErr: false, + }, + } + + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + client := fake.NewSimpleClientset(&tt.namespace) + handler := newHandler(client, nil) + + result, err := handler.makeQueryOptions(tt.params, tt.lvl) + if err != nil { + if !tt.expectedErr { + t.Fatalf("unexpected err: %s.", err.Error()) + } + return + } + + if tt.expectedErr { + t.Fatalf("failed to catch error.") + } + + if !reflect.DeepEqual(result, tt.expected) { + t.Fatalf("unexpected return: %v.", result) + } + }) + } +} diff --git a/pkg/kapis/monitoring/v1alpha2/register.go b/pkg/kapis/monitoring/v1alpha3/register.go similarity index 95% rename from pkg/kapis/monitoring/v1alpha2/register.go rename to pkg/kapis/monitoring/v1alpha3/register.go index 3d0675d4e..92110eeba 100644 --- a/pkg/kapis/monitoring/v1alpha2/register.go +++ b/pkg/kapis/monitoring/v1alpha3/register.go @@ -15,16 +15,16 @@ limitations under the License. */ -package v1alpha2 +package v1alpha3 import ( "github.com/emicklei/go-restful" "github.com/emicklei/go-restful-openapi" "k8s.io/apimachinery/pkg/runtime/schema" - "kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2" + "k8s.io/client-go/kubernetes" "kubesphere.io/kubesphere/pkg/apiserver/runtime" "kubesphere.io/kubesphere/pkg/constants" - "kubesphere.io/kubesphere/pkg/simple/client/k8s" + model "kubesphere.io/kubesphere/pkg/models/monitoring" "kubesphere.io/kubesphere/pkg/simple/client/monitoring" "net/http" ) @@ -36,7 +36,7 @@ const ( var GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} -func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient monitoring.Interface) error { +func AddToContainer(c *restful.Container, k8sClient kubernetes.Interface, monitoringClient monitoring.Interface) error { ws := runtime.NewWebService(GroupVersion) h := newHandler(k8sClient, monitoringClient) @@ -50,8 +50,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.ClusterMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/nodes"). @@ -68,8 +68,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.NodeMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/nodes/{node}"). @@ -82,8 +82,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.NodeMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/workspaces"). @@ -100,8 +100,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/workspaces/{workspace}"). @@ -114,8 +114,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkspaceMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/workspaces/{workspace}/namespaces"). @@ -133,8 +133,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.NamespaceMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces"). @@ -151,8 +151,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.NamespaceMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}"). @@ -165,8 +165,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.NamespaceMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/workloads"). @@ -184,8 +184,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkloadMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/workloads/{kind}"). @@ -204,8 +204,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.WorkloadMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/pods"). @@ -223,8 +223,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}"). @@ -238,8 +238,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/workloads/{kind}/{workload}/pods"). @@ -259,8 +259,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/nodes/{node}/pods"). @@ -278,8 +278,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/nodes/{node}/pods/{pod}"). @@ -293,8 +293,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PodMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers"). @@ -313,8 +313,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.ContainerMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/pods/{pod}/containers/{container}"). @@ -329,8 +329,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.ContainerMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/storageclasses/{storageclass}/persistentvolumeclaims"). @@ -348,8 +348,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PVCMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/persistentvolumeclaims"). @@ -367,8 +367,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("page", "The page number. This field paginates result data of each metric, then returns a specific page. For example, setting **page** to 2 returns the second page. It only applies to sorted metric data.").DataType("integer").Required(false)). Param(ws.QueryParameter("limit", "Page size, the maximum number of results in a single page. Defaults to 5.").DataType("integer").Required(false).DefaultValue("5")). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PVCMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/namespaces/{namespace}/persistentvolumeclaims/{pvc}"). @@ -382,8 +382,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.PVCMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) ws.Route(ws.GET("/components/{component}"). @@ -396,8 +396,8 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, monitoringClient Param(ws.QueryParameter("step", "Time interval. Retrieve metric data at a fixed interval within the time range of start and end. It requires both **start** and **end** are provided. The format is [0-9]+[smhdwy]. Defaults to 10m (i.e. 10 min).").DataType("string").DefaultValue("10m").Required(false)). Param(ws.QueryParameter("time", "A timestamp in Unix time format. Retrieve metric data at a single point in time. Defaults to now. Time and the combination of start, end, step are mutually exclusive.").DataType("string").Required(false)). Metadata(restfulspec.KeyOpenAPITags, []string{constants.ComponentMetricsTag}). - Writes(v1alpha2.APIResponse{}). - Returns(http.StatusOK, RespOK, v1alpha2.APIResponse{})). + Writes(model.Metrics{}). + Returns(http.StatusOK, RespOK, model.Metrics{})). Produces(restful.MIME_JSON) c.Add(ws) diff --git a/pkg/models/monitoring/monitoring.go b/pkg/models/monitoring/monitoring.go index 0f24484af..b7443f1bd 100644 --- a/pkg/models/monitoring/monitoring.go +++ b/pkg/models/monitoring/monitoring.go @@ -19,19 +19,15 @@ package monitoring import ( - "k8s.io/klog" - "kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2" "kubesphere.io/kubesphere/pkg/simple/client/monitoring" "time" ) type MonitoringOperator interface { - GetMetrics(stmts []string, time time.Time) (v1alpha2.APIResponse, error) - GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) (v1alpha2.APIResponse, error) - GetNamedMetrics(time time.Time, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) - GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) - SortMetrics(raw v1alpha2.APIResponse, target, order, identifier string) (v1alpha2.APIResponse, int) - PageMetrics(raw v1alpha2.APIResponse, page, limit, rows int) v1alpha2.APIResponse + GetMetrics(stmts []string, time time.Time) Metrics + GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) Metrics + GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics + GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics } type monitoringOperator struct { @@ -43,27 +39,21 @@ func NewMonitoringOperator(client monitoring.Interface) MonitoringOperator { } // TODO(huanggze): reserve for custom monitoring -func (mo monitoringOperator) GetMetrics(stmts []string, time time.Time) (v1alpha2.APIResponse, error) { +func (mo monitoringOperator) GetMetrics(stmts []string, time time.Time) Metrics { panic("implement me") } // TODO(huanggze): reserve for custom monitoring -func (mo monitoringOperator) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) (v1alpha2.APIResponse, error) { +func (mo monitoringOperator) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) Metrics { panic("implement me") } -func (mo monitoringOperator) GetNamedMetrics(time time.Time, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) { - metrics, err := mo.c.GetNamedMetrics(time, opt) - if err != nil { - klog.Error(err) - } - return v1alpha2.APIResponse{Results: metrics}, err +func (mo monitoringOperator) GetNamedMetrics(metrics []string, time time.Time, opt monitoring.QueryOption) Metrics { + ress := mo.c.GetNamedMetrics(metrics, time, opt) + return Metrics{Results: ress} } -func (mo monitoringOperator) GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt monitoring.QueryOption) (v1alpha2.APIResponse, error) { - metrics, err := mo.c.GetNamedMetricsOverTime(start, end, step, opt) - if err != nil { - klog.Error(err) - } - return v1alpha2.APIResponse{Results: metrics}, err +func (mo monitoringOperator) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt monitoring.QueryOption) Metrics { + ress := mo.c.GetNamedMetricsOverTime(metrics, start, end, step, opt) + return Metrics{Results: ress} } diff --git a/pkg/simple/client/monitoring/named_metrics.go b/pkg/models/monitoring/named_metrics.go similarity index 89% rename from pkg/simple/client/monitoring/named_metrics.go rename to pkg/models/monitoring/named_metrics.go index 882b12e66..322e01f94 100644 --- a/pkg/simple/client/monitoring/named_metrics.go +++ b/pkg/models/monitoring/named_metrics.go @@ -1,19 +1,5 @@ package monitoring -type MonitoringLevel int - -const ( - LevelCluster = MonitoringLevel(1) << iota - LevelNode - LevelWorkspace - LevelNamespace - LevelWorkload - LevelPod - LevelContainer - LevelPVC - LevelComponent -) - var ClusterMetrics = []string{ "cluster_cpu_utilisation", "cluster_cpu_usage", @@ -161,7 +147,6 @@ var WorkloadMetrics = []string{ "workload_memory_usage_wo_cache", "workload_net_bytes_transmitted", "workload_net_bytes_received", - "workload_deployment_replica", "workload_deployment_replica_available", "workload_statefulset_replica", @@ -198,7 +183,7 @@ var PVCMetrics = []string{ "pvc_bytes_utilisation", } -var ComponentMetrics = []string{ +var EtcdMetrics = []string{ "etcd_server_list", "etcd_server_total", "etcd_server_up_total", @@ -219,34 +204,20 @@ var ComponentMetrics = []string{ "etcd_disk_wal_fsync_duration_quantile", "etcd_disk_backend_commit_duration", "etcd_disk_backend_commit_duration_quantile", +} +var APIServerMetrics = []string{ "apiserver_up_sum", "apiserver_request_rate", "apiserver_request_by_verb_rate", "apiserver_request_latencies", "apiserver_request_by_verb_latencies", +} +var SchedulerMetrics = []string{ "scheduler_up_sum", "scheduler_schedule_attempts", "scheduler_schedule_attempt_rate", "scheduler_e2e_scheduling_latency", "scheduler_e2e_scheduling_latency_quantile", - - "controller_manager_up_sum", - - "coredns_up_sum", - "coredns_cache_hits", - "coredns_cache_misses", - "coredns_dns_request_rate", - "coredns_dns_request_duration", - "coredns_dns_request_duration_quantile", - "coredns_dns_request_by_type_rate", - "coredns_dns_request_by_rcode_rate", - "coredns_panic_rate", - "coredns_proxy_request_rate", - "coredns_proxy_request_duration", - "coredns_proxy_request_duration_quantile", - - "prometheus_up_sum", - "prometheus_tsdb_head_samples_appended_rate", } diff --git a/pkg/models/monitoring/sort_page.go b/pkg/models/monitoring/sort_page.go index 1d280c177..678a3ec5e 100644 --- a/pkg/models/monitoring/sort_page.go +++ b/pkg/models/monitoring/sort_page.go @@ -19,7 +19,6 @@ package monitoring import ( - "kubesphere.io/kubesphere/pkg/api/monitoring/v1alpha2" "kubesphere.io/kubesphere/pkg/simple/client/monitoring" "math" "sort" @@ -41,7 +40,7 @@ const ( type wrapper struct { monitoring.MetricData - by func(p, q *monitoring.MetricValue) bool + identifier, order string } func (w wrapper) Len() int { @@ -49,156 +48,142 @@ func (w wrapper) Len() int { } func (w wrapper) Less(i, j int) bool { - return w.by(&w.MetricValues[i], &w.MetricValues[j]) + p := w.MetricValues[i] + q := w.MetricValues[j] + + if p.Sample.Value() == q.Sample.Value() { + return p.Metadata[w.identifier] < q.Metadata[w.identifier] + } + + switch w.order { + case OrderAscending: + return p.Sample.Value() < q.Sample.Value() + default: + return p.Sample.Value() > q.Sample.Value() + } } -func (w wrapper) Swap(i, j int) { - w.MetricValues[i], w.MetricValues[j] = w.MetricValues[j], w.MetricValues[i] +func (id wrapper) Swap(i, j int) { + id.MetricValues[i], id.MetricValues[j] = id.MetricValues[j], id.MetricValues[i] } -// The sortMetrics sorts a group of resources by a given metric +// SortMetrics sorts a group of resources by a given metric. Range query doesn't support ranking. // Example: // -// before sorting -// |------| Metric 1 | Metric 2 | Metric 3 | -// | ID a | 1 | XL | | -// | ID b | 1 | S | | -// | ID c | 3 | M | | +// Before sorting: +// | ID | Metric 1 | Metric 2 | Metric 3 | +// | a | 1 | XL | | +// | b | 1 | S | | +// | c | 3 | M | | // -// sort by metrics_2 -// |------| Metric 1 | Metric 2 (asc) | Metric 3 | -// | ID a | 1 | XL | | -// | ID c | 3 | M | | -// | ID b | 1 | S | | -// -// ranking can only be applied to instant query results, not range query -func (mo monitoringOperator) SortMetrics(raw v1alpha2.APIResponse, target, order, identifier string) (v1alpha2.APIResponse, int) { - if target == "" || len(raw.Results) == 0 { - return raw, -1 - } - - if order == "" { - order = OrderDescending - } - - var currentResourceMap = make(map[string]int) - - // resource-ordinal map - var indexMap = make(map[string]int) - i := 0 - - for _, item := range raw.Results { - if item.MetricType == monitoring.MetricTypeVector && item.Status == monitoring.StatusSuccess { - if item.MetricName == target { - if order == OrderAscending { - sort.Sort(wrapper{item.MetricData, func(p, q *monitoring.MetricValue) bool { - if p.Sample[1] == q.Sample[1] { - return p.Metadata[identifier] < q.Metadata[identifier] - } - return p.Sample[1] < q.Sample[1] - }}) - } else { - sort.Sort(wrapper{item.MetricData, func(p, q *monitoring.MetricValue) bool { - if p.Sample[1] == q.Sample[1] { - return p.Metadata[identifier] > q.Metadata[identifier] - } - return p.Sample[1] > q.Sample[1] - }}) - } - - for _, r := range item.MetricValues { - // record the ordinal of resource to indexMap - resourceName, exist := r.Metadata[identifier] - if exist { - if _, exist := indexMap[resourceName]; !exist { - indexMap[resourceName] = i - i = i + 1 - } - } - } - } - - // get total number of rows - for _, r := range item.MetricValues { - k, ok := r.Metadata[identifier] - if ok { - currentResourceMap[k] = 1 - } - } - - } - } - - var keys []string - for k := range currentResourceMap { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, resource := range keys { - if _, exist := indexMap[resource]; !exist { - indexMap[resource] = i - i = i + 1 - } - } - - // sort other metrics - for i := 0; i < len(raw.Results); i++ { - item := raw.Results[i] - if item.MetricType == monitoring.MetricTypeVector && item.Status == monitoring.StatusSuccess { - sortedMetric := make([]monitoring.MetricValue, len(indexMap)) - for j := 0; j < len(item.MetricValues); j++ { - r := item.MetricValues[j] - k, exist := r.Metadata[identifier] - if exist { - index, exist := indexMap[k] - if exist { - sortedMetric[index] = r - } - } - } - - raw.Results[i].MetricValues = sortedMetric - } - } - - return raw, len(indexMap) -} - -func (mo monitoringOperator) PageMetrics(raw v1alpha2.APIResponse, page, limit, rows int) v1alpha2.APIResponse { - if page <= 0 || limit <= 0 || rows <= 0 || len(raw.Results) == 0 { +// After sorting: target=metric_2, order=asc, identifier=id +// | ID | Metric 1 | Metric 2 (asc) | Metric 3 | +// | a | 1 | XL | | +// | c | 3 | M | | +// | b | 1 | S | | +func (raw *Metrics) Sort(target, order, identifier string) *Metrics { + if target == "" || identifier == "" || len(raw.Results) == 0 { return raw } - // matrix type can not be sorted + resourceSet := make(map[string]bool) // resource set records possible values of the identifier + resourceOrdinal := make(map[string]int) // resource-ordinal map + + ordinal := 0 for _, item := range raw.Results { - if item.MetricType != monitoring.MetricTypeVector { - return raw + if item.MetricType != monitoring.MetricTypeVector || item.Error != "" { + continue + } + + if item.MetricName == target { + sort.Sort(wrapper{ + MetricData: item.MetricData, + identifier: identifier, + order: order, + }) + + for _, mv := range item.MetricValues { + // Record ordinals in the final result + v, ok := mv.Metadata[identifier] + if ok && v != "" { + resourceOrdinal[v] = ordinal + ordinal++ + } + } + } + + // Add every unique identifier value to the set + for _, mv := range item.MetricValues { + v, ok := mv.Metadata[identifier] + if ok && v != "" { + resourceSet[v] = true + } } } - // the i page: [(page-1) * limit, (page) * limit - 1] - start := (page - 1) * limit - end := (page)*limit - 1 + var resourceList []string + for k := range resourceSet { + resourceList = append(resourceList, k) + } + sort.Strings(resourceList) - for i := 0; i < len(raw.Results); i++ { - if raw.Results[i].MetricType != monitoring.MetricTypeVector || raw.Results[i].Status != monitoring.StatusSuccess { + // Fill resource-ordinal map with resources never present in the target, and give them ordinals. + for _, r := range resourceList { + if _, ok := resourceOrdinal[r]; !ok { + resourceOrdinal[r] = ordinal + ordinal++ + } + } + + // Sort metrics + for i, item := range raw.Results { + if item.MetricType != monitoring.MetricTypeVector || item.Error != "" { continue } - resultLen := len(raw.Results[i].MetricValues) - if start >= resultLen { + + sorted := make([]monitoring.MetricValue, len(resourceList)) + for _, mv := range item.MetricValues { + v, ok := mv.Metadata[identifier] + if ok && v != "" { + ordinal, _ := resourceOrdinal[v] + sorted[ordinal] = mv + } + } + raw.Results[i].MetricValues = sorted + } + + raw.CurrentPage = 1 + raw.TotalPages = 1 + raw.TotalItems = len(resourceList) + return raw +} + +func (raw *Metrics) Page(page, limit int) *Metrics { + if page < 1 || limit < 1 || len(raw.Results) == 0 { + return raw + } + + start := (page - 1) * limit + end := page * limit + + for i, item := range raw.Results { + if item.MetricType != monitoring.MetricTypeVector || item.Error != "" { + continue + } + + total := len(item.MetricValues) + if start >= total { raw.Results[i].MetricValues = nil continue } - if end >= resultLen { - end = resultLen - 1 + if end >= total { + end = total } - slice := raw.Results[i].MetricValues[start : end+1] - raw.Results[i].MetricValues = slice + + raw.Results[i].MetricValues = item.MetricValues[start:end] } raw.CurrentPage = page - raw.TotalPage = int(math.Ceil(float64(rows) / float64(limit))) - raw.TotalItem = rows + raw.TotalPages = int(math.Ceil(float64(raw.TotalItems) / float64(limit))) return raw } diff --git a/pkg/models/monitoring/sort_page_test.go b/pkg/models/monitoring/sort_page_test.go new file mode 100644 index 000000000..6dce082d3 --- /dev/null +++ b/pkg/models/monitoring/sort_page_test.go @@ -0,0 +1,91 @@ +package monitoring + +import ( + "fmt" + "github.com/google/go-cmp/cmp" + "github.com/json-iterator/go" + "io/ioutil" + "testing" +) + +func TestSort(t *testing.T) { + tests := []struct { + name string + target string + order string + identifier string + source string + expected string + }{ + {"sort in ascending order", "node_cpu_utilisation", "asc", "node", "source-node-metrics.json", "sorted-node-metrics-asc.json"}, + {"sort in descending order", "node_memory_utilisation", "desc", "node", "source-node-metrics.json", "sorted-node-metrics-desc.json"}, + {"sort faulty metrics", "node_memory_utilisation", "desc", "node", "faulty-node-metrics.json", "faulty-node-metrics-sorted.json"}, + {"sort metrics with an blank node", "node_memory_utilisation", "desc", "node", "blank-node-metrics.json", "blank-node-metrics-sorted.json"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + source, expected, err := jsonFromFile(tt.source, tt.expected) + if err != nil { + t.Fatal(err) + } + + result := source.Sort(tt.target, tt.order, tt.identifier) + if diff := cmp.Diff(*result, *expected); diff != "" { + t.Fatalf("%T differ (-got, +want): %s", expected, diff) + } + }) + } +} + +func TestPage(t *testing.T) { + tests := []struct { + name string + page int + limit int + source string + expected string + }{ + {"page 0 limit 5", 0, 5, "sorted-node-metrics-asc.json", "sorted-node-metrics-asc.json"}, + {"page 1 limit 5", 1, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-1.json"}, + {"page 2 limit 5", 2, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-2.json"}, + {"page 3 limit 5", 3, 5, "sorted-node-metrics-asc.json", "paged-node-metrics-3.json"}, + {"page faulty metrics", 1, 2, "faulty-node-metrics-sorted.json", "faulty-node-metrics-paged.json"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + source, expected, err := jsonFromFile(tt.source, tt.expected) + if err != nil { + t.Fatal(err) + } + + result := source.Page(tt.page, tt.limit) + if diff := cmp.Diff(*result, *expected); diff != "" { + t.Fatalf("%T differ (-got, +want): %s", expected, diff) + } + }) + } +} + +func jsonFromFile(sourceFile, expectedFile string) (*Metrics, *Metrics, error) { + sourceJson := &Metrics{} + expectedJson := &Metrics{} + + json, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", sourceFile)) + if err != nil { + return nil, nil, err + } + err = jsoniter.Unmarshal(json, sourceJson) + if err != nil { + return nil, nil, err + } + + json, err = ioutil.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile)) + if err != nil { + return nil, nil, err + } + err = jsoniter.Unmarshal(json, expectedJson) + if err != nil { + return nil, nil, err + } + return sourceJson, expectedJson, nil +} diff --git a/pkg/models/monitoring/testdata/blank-node-metrics-sorted.json b/pkg/models/monitoring/testdata/blank-node-metrics-sorted.json new file mode 100644 index 000000000..968fcf750 --- /dev/null +++ b/pkg/models/monitoring/testdata/blank-node-metrics-sorted.json @@ -0,0 +1,77 @@ +{ + "results":[ + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.42012898861983516 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.2588273152865106 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.29849334024542695 + ] + } + ] + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.195, + 0.5286875837861773 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.195, + 0.2497060264216553 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.195, + 0.23637090535053928 + ] + } + ] + } + } + ], + "page":1, + "total_page":1, + "total_item":3 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/blank-node-metrics.json b/pkg/models/monitoring/testdata/blank-node-metrics.json new file mode 100644 index 000000000..e70a6b291 --- /dev/null +++ b/pkg/models/monitoring/testdata/blank-node-metrics.json @@ -0,0 +1,92 @@ +{ + "results": [ + { + "metric_name": "node_disk_size_utilisation", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "node": "i-2dazc1d6" + }, + "value": [ + 1585658599.193, + 0.42012898861983516 + ] + }, + { + "metric": { + "node": "" + }, + "value": [ + 1585658599.193, + 0.2601006025131434 + ] + }, + { + "metric": { + "node": "i-ezjb7gsk" + }, + "value": [ + 1585658599.193, + 0.29849334024542695 + ] + }, + { + "metric": { + "node": "i-hgcoippu" + }, + "value": [ + 1585658599.193, + 0.2588273152865106 + ] + } + ] + } + }, + { + "metric_name": "node_memory_utilisation", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "node": "i-2dazc1d6" + }, + "value": [ + 1585658599.195, + 0.5286875837861773 + ] + }, + { + "metric": { + "node": "" + }, + "value": [ + 1585658599.195, + 0.1446648505469157 + ] + }, + { + "metric": { + "node": "i-ezjb7gsk" + }, + "value": [ + 1585658599.195, + 0.23637090535053928 + ] + }, + { + "metric": { + "node": "i-hgcoippu" + }, + "value": [ + 1585658599.195, + 0.2497060264216553 + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/faulty-node-metrics-paged.json b/pkg/models/monitoring/testdata/faulty-node-metrics-paged.json new file mode 100644 index 000000000..ff99f2222 --- /dev/null +++ b/pkg/models/monitoring/testdata/faulty-node-metrics-paged.json @@ -0,0 +1,63 @@ +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "error":"error" + }, + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.42012898861983516 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.2588273152865106 + ] + } + ] + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.195, + 0.5286875837861773 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.195, + 0.2497060264216553 + ] + } + ] + } + } + ], + "page":1, + "total_page":2, + "total_item":4 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/faulty-node-metrics-sorted.json b/pkg/models/monitoring/testdata/faulty-node-metrics-sorted.json new file mode 100644 index 000000000..d5242f67c --- /dev/null +++ b/pkg/models/monitoring/testdata/faulty-node-metrics-sorted.json @@ -0,0 +1,99 @@ +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "error":"error" + }, + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.42012898861983516 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.2588273152865106 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.29849334024542695 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.193, + 0.2601006025131434 + ] + } + ] + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.195, + 0.5286875837861773 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.195, + 0.2497060264216553 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.195, + 0.23637090535053928 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.195, + 0.1446648505469157 + ] + } + ] + } + } + ], + "page":1, + "total_page":1, + "total_item":4 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/faulty-node-metrics.json b/pkg/models/monitoring/testdata/faulty-node-metrics.json new file mode 100644 index 000000000..e72b9aeca --- /dev/null +++ b/pkg/models/monitoring/testdata/faulty-node-metrics.json @@ -0,0 +1,96 @@ +{ + "results": [ + { + "metric_name": "node_cpu_utilisation", + "error": "error" + }, + { + "metric_name": "node_disk_size_utilisation", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "node": "i-2dazc1d6" + }, + "value": [ + 1585658599.193, + 0.42012898861983516 + ] + }, + { + "metric": { + "node": "i-9jtsi522" + }, + "value": [ + 1585658599.193, + 0.2601006025131434 + ] + }, + { + "metric": { + "node": "i-ezjb7gsk" + }, + "value": [ + 1585658599.193, + 0.29849334024542695 + ] + }, + { + "metric": { + "node": "i-hgcoippu" + }, + "value": [ + 1585658599.193, + 0.2588273152865106 + ] + } + ] + } + }, + { + "metric_name": "node_memory_utilisation", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "node": "i-2dazc1d6" + }, + "value": [ + 1585658599.195, + 0.5286875837861773 + ] + }, + { + "metric": { + "node": "i-9jtsi522" + }, + "value": [ + 1585658599.195, + 0.1446648505469157 + ] + }, + { + "metric": { + "node": "i-ezjb7gsk" + }, + "value": [ + 1585658599.195, + 0.23637090535053928 + ] + }, + { + "metric": { + "node": "i-hgcoippu" + }, + "value": [ + 1585658599.195, + 0.2497060264216553 + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/paged-node-metrics-1.json b/pkg/models/monitoring/testdata/paged-node-metrics-1.json new file mode 100644 index 000000000..60cd43507 --- /dev/null +++ b/pkg/models/monitoring/testdata/paged-node-metrics-1.json @@ -0,0 +1,166 @@ +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.193, + 0.021645833333483702 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.193, + 0.03250000000007276 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.05066666666655995 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.193, + 0.05210416666595847 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.193, + 0.06745833333334303 + ] + } + ] + } + }, + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.193, + 0.3335848564534758 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.193, + 0.2601006025131434 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.2588273152865106 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.193, + 0.21351118996831508 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.193, + 0.35981263055856705 + ] + } + ] + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.195, + 0.12824588180084573 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.195, + 0.1446648505469157 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.195, + 0.2497060264216553 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.195, + 0.21291125105270192 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.195, + 0.40309723127991315 + ] + } + ] + } + } + ], + "page":1, + "total_page":2, + "total_item":8 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/paged-node-metrics-2.json b/pkg/models/monitoring/testdata/paged-node-metrics-2.json new file mode 100644 index 000000000..097872631 --- /dev/null +++ b/pkg/models/monitoring/testdata/paged-node-metrics-2.json @@ -0,0 +1,112 @@ +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.07443750000044626 + ] + }, + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.193, + 0.07756249999996119 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.18095833333306172 + ] + } + ] + } + }, + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.29849334024542695 + ] + }, + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.193, + 0.4329682466178235 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.42012898861983516 + ] + } + ] + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.195, + 0.23637090535053928 + ] + }, + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.195, + 0.823247832787681 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.195, + 0.5286875837861773 + ] + } + ] + } + } + ], + "page":2, + "total_page":2, + "total_item":8 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/paged-node-metrics-3.json b/pkg/models/monitoring/testdata/paged-node-metrics-3.json new file mode 100644 index 000000000..16ea7c859 --- /dev/null +++ b/pkg/models/monitoring/testdata/paged-node-metrics-3.json @@ -0,0 +1,25 @@ +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "data":{ + "resultType":"vector" + } + }, + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector" + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector" + } + } + ], + "page":3, + "total_page":2, + "total_item":8 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/sorted-node-metrics-asc.json b/pkg/models/monitoring/testdata/sorted-node-metrics-asc.json new file mode 100644 index 000000000..909e4a58e --- /dev/null +++ b/pkg/models/monitoring/testdata/sorted-node-metrics-asc.json @@ -0,0 +1,247 @@ +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.193, + 0.021645833333483702 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.193, + 0.03250000000007276 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.05066666666655995 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.193, + 0.05210416666595847 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.193, + 0.06745833333334303 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.07443750000044626 + ] + }, + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.193, + 0.07756249999996119 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.18095833333306172 + ] + } + ] + } + }, + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.193, + 0.3335848564534758 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.193, + 0.2601006025131434 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.2588273152865106 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.193, + 0.21351118996831508 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.193, + 0.35981263055856705 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.29849334024542695 + ] + }, + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.193, + 0.4329682466178235 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.42012898861983516 + ] + } + ] + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.195, + 0.12824588180084573 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.195, + 0.1446648505469157 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.195, + 0.2497060264216553 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.195, + 0.21291125105270192 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.195, + 0.40309723127991315 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.195, + 0.23637090535053928 + ] + }, + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.195, + 0.823247832787681 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.195, + 0.5286875837861773 + ] + } + ] + } + } + ], + "page":1, + "total_page":1, + "total_item":8 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/sorted-node-metrics-desc.json b/pkg/models/monitoring/testdata/sorted-node-metrics-desc.json new file mode 100644 index 000000000..4542ee8c5 --- /dev/null +++ b/pkg/models/monitoring/testdata/sorted-node-metrics-desc.json @@ -0,0 +1,247 @@ +{ + "results":[ + { + "metric_name":"node_cpu_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.193, + 0.07756249999996119 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.18095833333306172 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.193, + 0.06745833333334303 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.05066666666655995 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.07443750000044626 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.193, + 0.05210416666595847 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.193, + 0.03250000000007276 + ] + }, + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.193, + 0.021645833333483702 + ] + } + ] + } + }, + { + "metric_name":"node_disk_size_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.193, + 0.4329682466178235 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.193, + 0.42012898861983516 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.193, + 0.35981263055856705 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.193, + 0.2588273152865106 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.193, + 0.29849334024542695 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.193, + 0.21351118996831508 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.193, + 0.2601006025131434 + ] + }, + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.193, + 0.3335848564534758 + ] + } + ] + } + }, + { + "metric_name":"node_memory_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "node":"i-o13skypq" + }, + "value":[ + 1585658599.195, + 0.823247832787681 + ] + }, + { + "metric":{ + "node":"i-2dazc1d6" + }, + "value":[ + 1585658599.195, + 0.5286875837861773 + ] + }, + { + "metric":{ + "node":"i-xfcxdn7z" + }, + "value":[ + 1585658599.195, + 0.40309723127991315 + ] + }, + { + "metric":{ + "node":"i-hgcoippu" + }, + "value":[ + 1585658599.195, + 0.2497060264216553 + ] + }, + { + "metric":{ + "node":"i-ezjb7gsk" + }, + "value":[ + 1585658599.195, + 0.23637090535053928 + ] + }, + { + "metric":{ + "node":"i-ircdnrao" + }, + "value":[ + 1585658599.195, + 0.21291125105270192 + ] + }, + { + "metric":{ + "node":"i-9jtsi522" + }, + "value":[ + 1585658599.195, + 0.1446648505469157 + ] + }, + { + "metric":{ + "node":"i-tl1i71hr" + }, + "value":[ + 1585658599.195, + 0.12824588180084573 + ] + } + ] + } + } + ], + "page":1, + "total_page":1, + "total_item":8 +} \ No newline at end of file diff --git a/pkg/models/monitoring/testdata/source-node-metrics.json b/pkg/models/monitoring/testdata/source-node-metrics.json new file mode 100644 index 000000000..ad1d3298c --- /dev/null +++ b/pkg/models/monitoring/testdata/source-node-metrics.json @@ -0,0 +1,244 @@ +{ + "results": [ + { + "metric_name": "node_cpu_utilisation", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "node": "i-2dazc1d6" + }, + "value": [ + 1585658599.193, + 0.18095833333306172 + ] + }, + { + "metric": { + "node": "i-9jtsi522" + }, + "value": [ + 1585658599.193, + 0.03250000000007276 + ] + }, + { + "metric": { + "node": "i-ezjb7gsk" + }, + "value": [ + 1585658599.193, + 0.07443750000044626 + ] + }, + { + "metric": { + "node": "i-hgcoippu" + }, + "value": [ + 1585658599.193, + 0.05066666666655995 + ] + }, + { + "metric": { + "node": "i-ircdnrao" + }, + "value": [ + 1585658599.193, + 0.05210416666595847 + ] + }, + { + "metric": { + "node": "i-o13skypq" + }, + "value": [ + 1585658599.193, + 0.07756249999996119 + ] + }, + { + "metric": { + "node": "i-tl1i71hr" + }, + "value": [ + 1585658599.193, + 0.021645833333483702 + ] + }, + { + "metric": { + "node": "i-xfcxdn7z" + }, + "value": [ + 1585658599.193, + 0.06745833333334303 + ] + } + ] + } + }, + { + "metric_name": "node_disk_size_utilisation", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "node": "i-2dazc1d6" + }, + "value": [ + 1585658599.193, + 0.42012898861983516 + ] + }, + { + "metric": { + "node": "i-9jtsi522" + }, + "value": [ + 1585658599.193, + 0.2601006025131434 + ] + }, + { + "metric": { + "node": "i-ezjb7gsk" + }, + "value": [ + 1585658599.193, + 0.29849334024542695 + ] + }, + { + "metric": { + "node": "i-hgcoippu" + }, + "value": [ + 1585658599.193, + 0.2588273152865106 + ] + }, + { + "metric": { + "node": "i-ircdnrao" + }, + "value": [ + 1585658599.193, + 0.21351118996831508 + ] + }, + { + "metric": { + "node": "i-o13skypq" + }, + "value": [ + 1585658599.193, + 0.4329682466178235 + ] + }, + { + "metric": { + "node": "i-tl1i71hr" + }, + "value": [ + 1585658599.193, + 0.3335848564534758 + ] + }, + { + "metric": { + "node": "i-xfcxdn7z" + }, + "value": [ + 1585658599.193, + 0.35981263055856705 + ] + } + ] + } + }, + { + "metric_name": "node_memory_utilisation", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "node": "i-2dazc1d6" + }, + "value": [ + 1585658599.195, + 0.5286875837861773 + ] + }, + { + "metric": { + "node": "i-9jtsi522" + }, + "value": [ + 1585658599.195, + 0.1446648505469157 + ] + }, + { + "metric": { + "node": "i-ezjb7gsk" + }, + "value": [ + 1585658599.195, + 0.23637090535053928 + ] + }, + { + "metric": { + "node": "i-hgcoippu" + }, + "value": [ + 1585658599.195, + 0.2497060264216553 + ] + }, + { + "metric": { + "node": "i-ircdnrao" + }, + "value": [ + 1585658599.195, + 0.21291125105270192 + ] + }, + { + "metric": { + "node": "i-o13skypq" + }, + "value": [ + 1585658599.195, + 0.823247832787681 + ] + }, + { + "metric": { + "node": "i-tl1i71hr" + }, + "value": [ + 1585658599.195, + 0.12824588180084573 + ] + }, + { + "metric": { + "node": "i-xfcxdn7z" + }, + "value": [ + 1585658599.195, + 0.40309723127991315 + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/pkg/api/monitoring/v1alpha2/types.go b/pkg/models/monitoring/types.go similarity index 64% rename from pkg/api/monitoring/v1alpha2/types.go rename to pkg/models/monitoring/types.go index ee33074e3..22364cfc1 100644 --- a/pkg/api/monitoring/v1alpha2/types.go +++ b/pkg/models/monitoring/types.go @@ -1,10 +1,10 @@ -package v1alpha2 +package monitoring import "kubesphere.io/kubesphere/pkg/simple/client/monitoring" -type APIResponse struct { +type Metrics struct { Results []monitoring.Metric `json:"results" description:"actual array of results"` CurrentPage int `json:"page,omitempty" description:"current page returned"` - TotalPage int `json:"total_page,omitempty" description:"total number of pages"` - TotalItem int `json:"total_item,omitempty" description:"page size"` + TotalPages int `json:"total_page,omitempty" description:"total number of pages"` + TotalItems int `json:"total_item,omitempty" description:"page size"` } diff --git a/pkg/simple/client/devops/fake/fakedevops.go b/pkg/simple/client/devops/fake/fakedevops.go index f35b66229..466620d2c 100644 --- a/pkg/simple/client/devops/fake/fakedevops.go +++ b/pkg/simple/client/devops/fake/fakedevops.go @@ -170,7 +170,9 @@ func (d *FakeDevops) GetCredentialInProject(projectId, id string, content bool) func (d *FakeDevops) GetCredentialsInProject(projectId string) ([]*devops.Credential, error) { return nil, nil } -func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) { return nil, nil } +func (d *FakeDevops) DeleteCredentialInProject(projectId, id string) (*string, error) { + return nil, nil +} // BuildGetter func (d *FakeDevops) GetProjectPipelineBuildByType(projectId, pipelineId string, status string) (*devops.Build, error) { diff --git a/pkg/simple/client/monitoring/interface.go b/pkg/simple/client/monitoring/interface.go index 2f135b5dc..488161dfd 100644 --- a/pkg/simple/client/monitoring/interface.go +++ b/pkg/simple/client/monitoring/interface.go @@ -2,40 +2,9 @@ package monitoring import "time" -const ( - StatusSuccess = "success" - StatusError = "error" - MetricTypeMatrix = "matrix" - MetricTypeVector = "vector" -) - -type Metric struct { - MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"` - Status string `json:"status" description:"result status, one of error, success"` - MetricData `json:"data" description:"actual metric result"` - ErrorType string `json:"errorType,omitempty"` - Error string `json:"error,omitempty"` -} - -type MetricData struct { - MetricType string `json:"resultType" description:"result type, one of matrix, vector"` - MetricValues []MetricValue `json:"result" description:"metric data including labels, time series and values"` -} - -type Point [2]float64 - -type MetricValue struct { - Metadata map[string]string `json:"metric,omitempty" description:"time series labels"` - Sample Point `json:"value,omitempty" description:"time series, values of vector type"` - Series []Point `json:"values,omitempty" description:"time series, values of matrix type"` -} - type Interface interface { - // The `stmts` defines statements, expressions or rules (eg. promql in Prometheus) for querying specific metrics. - GetMetrics(stmts []string, time time.Time) ([]Metric, error) - GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]Metric, error) - - // Get named metrics (eg. node_cpu_usage) - GetNamedMetrics(time time.Time, opt QueryOption) ([]Metric, error) - GetNamedMetricsOverTime(start, end time.Time, step time.Duration, opt QueryOption) ([]Metric, error) + GetMetrics(exprs []string, time time.Time) []Metric + GetMetricsOverTime(exprs []string, start, end time.Time, step time.Duration) []Metric + GetNamedMetrics(metrics []string, time time.Time, opt QueryOption) []Metric + GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, opt QueryOption) []Metric } diff --git a/pkg/simple/client/monitoring/prometheus/prometheus.go b/pkg/simple/client/monitoring/prometheus/prometheus.go index d4a02320f..0cb3bc8b3 100644 --- a/pkg/simple/client/monitoring/prometheus/prometheus.go +++ b/pkg/simple/client/monitoring/prometheus/prometheus.go @@ -1,178 +1,153 @@ package prometheus import ( - "fmt" - "github.com/json-iterator/go" - "io/ioutil" + "context" + "github.com/prometheus/client_golang/api" + apiv1 "github.com/prometheus/client_golang/api/prometheus/v1" + "github.com/prometheus/common/model" "kubesphere.io/kubesphere/pkg/simple/client/monitoring" - "net/http" - "net/url" - "regexp" "sync" "time" ) -var json = jsoniter.ConfigCompatibleWithStandardLibrary - // prometheus implements monitoring interface backed by Prometheus type prometheus struct { - options *Options - client *http.Client + client apiv1.API } -func NewPrometheus(options *Options) monitoring.Interface { - return &prometheus{ - options: options, - client: &http.Client{Timeout: 10 * time.Second}, +func NewPrometheus(options *Options) (monitoring.Interface, error) { + cfg := api.Config{ + Address: options.Endpoint, } + + client, err := api.NewClient(cfg) + return prometheus{client: apiv1.NewAPI(client)}, err } // TODO(huanggze): reserve for custom monitoring -func (p *prometheus) GetMetrics(stmts []string, time time.Time) ([]monitoring.Metric, error) { +func (p prometheus) GetMetrics(stmts []string, time time.Time) []monitoring.Metric { panic("implement me") } // TODO(huanggze): reserve for custom monitoring -func (p *prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) ([]monitoring.Metric, error) { +func (p prometheus) GetMetricsOverTime(stmts []string, start, end time.Time, step time.Duration) []monitoring.Metric { panic("implement me") } -func (p *prometheus) GetNamedMetrics(ts time.Time, o monitoring.QueryOption) ([]monitoring.Metric, error) { - metrics := make([]monitoring.Metric, 0) - var mtx sync.Mutex // guard metrics +func (p prometheus) GetNamedMetrics(metrics []string, ts time.Time, o monitoring.QueryOption) []monitoring.Metric { + var res []monitoring.Metric + var mtx sync.Mutex var wg sync.WaitGroup opts := monitoring.NewQueryOptions() o.Apply(opts) - errCh := make(chan error) - for _, metric := range opts.NamedMetrics { - matched, _ := regexp.MatchString(opts.MetricFilter, metric) - if matched { - exp := makeExpression(metric, *opts) - wg.Add(1) - go func(metric, exp string) { - res, err := p.query(exp, ts) - if err != nil { - select { - case errCh <- err: // Record error once - default: - } - } else { - res.MetricName = metric // Add metric name - mtx.Lock() - metrics = append(metrics, res) - mtx.Unlock() - } - wg.Done() - }(metric, exp) - } + for _, metric := range metrics { + wg.Add(1) + go func(metric string) { + parsedResp := monitoring.Metric{MetricName: metric} + + value, err := p.client.Query(context.Background(), makeExpr(metric, *opts), ts) + if err != nil { + parsedResp.Error = err.(*apiv1.Error).Msg + } else { + parsedResp.MetricData = parseQueryResp(value) + } + + mtx.Lock() + res = append(res, parsedResp) + mtx.Unlock() + + wg.Done() + }(metric) } wg.Wait() - select { - case err := <-errCh: - return nil, err - default: - return metrics, nil - } + return res } -func (p *prometheus) GetNamedMetricsOverTime(start, end time.Time, step time.Duration, o monitoring.QueryOption) ([]monitoring.Metric, error) { - metrics := make([]monitoring.Metric, 0) - var mtx sync.Mutex // guard metrics +func (p prometheus) GetNamedMetricsOverTime(metrics []string, start, end time.Time, step time.Duration, o monitoring.QueryOption) []monitoring.Metric { + var res []monitoring.Metric + var mtx sync.Mutex var wg sync.WaitGroup opts := monitoring.NewQueryOptions() o.Apply(opts) - errCh := make(chan error) - for _, metric := range opts.NamedMetrics { - matched, _ := regexp.MatchString(opts.MetricFilter, metric) - if matched { - exp := makeExpression(metric, *opts) - wg.Add(1) - go func(metric, exp string) { - res, err := p.rangeQuery(exp, start, end, step) - if err != nil { - select { - case errCh <- err: // Record error once - default: - } - } else { - res.MetricName = metric // Add metric name - mtx.Lock() - metrics = append(metrics, res) - mtx.Unlock() - } - wg.Done() - }(metric, exp) - } + timeRange := apiv1.Range{ + Start: start, + End: end, + Step: step, + } + + for _, metric := range metrics { + wg.Add(1) + go func(metric string) { + parsedResp := monitoring.Metric{MetricName: metric} + + value, err := p.client.QueryRange(context.Background(), makeExpr(metric, *opts), timeRange) + if err != nil { + parsedResp.Error = err.(*apiv1.Error).Msg + } else { + parsedResp.MetricData = parseQueryRangeResp(value) + } + + mtx.Lock() + res = append(res, parsedResp) + mtx.Unlock() + + wg.Done() + }(metric) } wg.Wait() - select { - case err := <-errCh: - return nil, err - default: - return metrics, nil - } + return res } -func (p prometheus) query(exp string, ts time.Time) (monitoring.Metric, error) { - params := &url.Values{} - params.Set("time", ts.Format(time.RFC3339)) - params.Set("query", exp) +func parseQueryRangeResp(value model.Value) monitoring.MetricData { + res := monitoring.MetricData{MetricType: monitoring.MetricTypeMatrix} - u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode()) + data, _ := value.(model.Matrix) - var m monitoring.Metric - response, err := p.client.Get(u) - if err != nil { - return monitoring.Metric{}, err + for _, v := range data { + mv := monitoring.MetricValue{ + Metadata: make(map[string]string), + } + + for k, v := range v.Metric { + mv.Metadata[string(k)] = string(v) + } + + for _, k := range v.Values { + mv.Series = append(mv.Series, monitoring.Point{float64(k.Timestamp) / 1000, float64(k.Value)}) + } + + res.MetricValues = append(res.MetricValues, mv) } - body, err := ioutil.ReadAll(response.Body) - if err != nil { - return monitoring.Metric{}, err - } - defer response.Body.Close() - - err = json.Unmarshal(body, m) - if err != nil { - return monitoring.Metric{}, err - } - - return m, nil + return res } -func (p prometheus) rangeQuery(exp string, start, end time.Time, step time.Duration) (monitoring.Metric, error) { - params := &url.Values{} - params.Set("start", start.Format(time.RFC3339)) - params.Set("end", end.Format(time.RFC3339)) - params.Set("step", step.String()) - params.Set("query", exp) +func parseQueryResp(value model.Value) monitoring.MetricData { + res := monitoring.MetricData{MetricType: monitoring.MetricTypeVector} - u := fmt.Sprintf("%s/api/v1/query?%s", p.options.Endpoint, params.Encode()) + data, _ := value.(model.Vector) - var m monitoring.Metric - response, err := p.client.Get(u) - if err != nil { - return monitoring.Metric{}, err + for _, v := range data { + mv := monitoring.MetricValue{ + Metadata: make(map[string]string), + } + + for k, v := range v.Metric { + mv.Metadata[string(k)] = string(v) + } + + mv.Sample = monitoring.Point{float64(v.Timestamp) / 1000, float64(v.Value)} + + res.MetricValues = append(res.MetricValues, mv) } - body, err := ioutil.ReadAll(response.Body) - if err != nil { - return monitoring.Metric{}, err - } - defer response.Body.Close() - - err = json.Unmarshal(body, m) - if err != nil { - return monitoring.Metric{}, err - } - - return m, nil + return res } diff --git a/pkg/simple/client/monitoring/prometheus/prometheus_test.go b/pkg/simple/client/monitoring/prometheus/prometheus_test.go new file mode 100644 index 000000000..043175b3d --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/prometheus_test.go @@ -0,0 +1,95 @@ +package prometheus + +import ( + "fmt" + "github.com/google/go-cmp/cmp" + "github.com/json-iterator/go" + "io/ioutil" + "kubesphere.io/kubesphere/pkg/simple/client/monitoring" + "net/http" + "net/http/httptest" + "testing" + "time" +) + +func TestGetNamedMetrics(t *testing.T) { + tests := []struct { + name string + fakeResp string + expected string + }{ + {"prom returns good values", "metrics-vector-type-prom.json", "metrics-vector-type-res.json"}, + {"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expected, err := jsonFromFile(tt.expected) + if err != nil { + t.Fatal(err) + } + + srv := mockPrometheusService("/api/v1/query", tt.fakeResp) + defer srv.Close() + + client, _ := NewPrometheus(&Options{Endpoint: srv.URL}) + result := client.GetNamedMetrics([]string{"cluster_cpu_utilisation"}, time.Now(), monitoring.ClusterOption{}) + if diff := cmp.Diff(result, expected); diff != "" { + t.Fatalf("%T differ (-got, +want): %s", expected, diff) + } + }) + } +} + +func TestGetNamedMetricsOverTime(t *testing.T) { + tests := []struct { + name string + fakeResp string + expected string + }{ + {"prom returns good values", "metrics-matrix-type-prom.json", "metrics-matrix-type-res.json"}, + {"prom returns error", "metrics-error-prom.json", "metrics-error-res.json"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expected, err := jsonFromFile(tt.expected) + if err != nil { + t.Fatal(err) + } + + srv := mockPrometheusService("/api/v1/query_range", tt.fakeResp) + defer srv.Close() + + client, _ := NewPrometheus(&Options{Endpoint: srv.URL}) + result := client.GetNamedMetricsOverTime([]string{"cluster_cpu_utilisation"}, time.Now().Add(-time.Minute*3), time.Now(), time.Minute, monitoring.ClusterOption{}) + if diff := cmp.Diff(result, expected); diff != "" { + t.Fatalf("%T differ (-got, +want): %s", expected, diff) + } + }) + } +} + +func mockPrometheusService(pattern, fakeResp string) *httptest.Server { + mux := http.NewServeMux() + mux.HandleFunc(pattern, func(res http.ResponseWriter, req *http.Request) { + b, _ := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", fakeResp)) + res.Write(b) + }) + return httptest.NewServer(mux) +} + +func jsonFromFile(expectedFile string) ([]monitoring.Metric, error) { + expectedJson := []monitoring.Metric{} + + json, err := ioutil.ReadFile(fmt.Sprintf("./testdata/%s", expectedFile)) + if err != nil { + return expectedJson, err + } + err = jsoniter.Unmarshal(json, &expectedJson) + if err != nil { + return expectedJson, err + } + + return expectedJson, nil +} diff --git a/pkg/simple/client/monitoring/prometheus/promql_templates.go b/pkg/simple/client/monitoring/prometheus/promql.go similarity index 97% rename from pkg/simple/client/monitoring/prometheus/promql_templates.go rename to pkg/simple/client/monitoring/prometheus/promql.go index 5ec826ac2..06ced3d34 100644 --- a/pkg/simple/client/monitoring/prometheus/promql_templates.go +++ b/pkg/simple/client/monitoring/prometheus/promql.go @@ -25,7 +25,6 @@ const ( Deployment = "Deployment" ) -//TODO(huanggze): move this part to a ConfigMap var promQLTemplates = map[string]string{ //cluster "cluster_cpu_utilisation": ":node_cpu_utilisation:avg1m", @@ -256,31 +255,33 @@ var promQLTemplates = map[string]string{ "prometheus_tsdb_head_samples_appended_rate": `prometheus:prometheus_tsdb_head_samples_appended:sum_rate`, } -func makeExpression(metric string, opt monitoring.QueryOptions) string { +func makeExpr(metric string, opt monitoring.QueryOptions) string { tmpl := promQLTemplates[metric] switch opt.Level { case monitoring.LevelCluster: + return tmpl case monitoring.LevelNode: - makeNodeMetricExpression(tmpl, opt) + return makeNodeMetricExpr(tmpl, opt) case monitoring.LevelWorkspace: - makeWorkspaceMetricExpression(tmpl, opt) + return makeWorkspaceMetricExpr(tmpl, opt) case monitoring.LevelNamespace: - makeNamespaceMetricExpression(tmpl, opt) + return makeNamespaceMetricExpr(tmpl, opt) case monitoring.LevelWorkload: - makeWorkloadMetricExpression(tmpl, opt) + return makeWorkloadMetricExpr(tmpl, opt) case monitoring.LevelPod: - makePodMetricExpression(tmpl, opt) + return makePodMetricExpr(tmpl, opt) case monitoring.LevelContainer: - makeContainerMetricExpression(tmpl, opt) + return makeContainerMetricExpr(tmpl, opt) case monitoring.LevelPVC: - makePVCMetricExpression(tmpl, opt) + return makePVCMetricExpr(tmpl, opt) case monitoring.LevelComponent: + return tmpl default: + return tmpl } - return tmpl } -func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string { +func makeNodeMetricExpr(tmpl string, o monitoring.QueryOptions) string { var nodeSelector string if o.NodeName != "" { nodeSelector = fmt.Sprintf(`node="%s"`, o.NodeName) @@ -290,7 +291,7 @@ func makeNodeMetricExpression(tmpl string, o monitoring.QueryOptions) string { return strings.Replace(tmpl, "$1", nodeSelector, -1) } -func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) string { +func makeWorkspaceMetricExpr(tmpl string, o monitoring.QueryOptions) string { var workspaceSelector string if o.WorkspaceName != "" { workspaceSelector = fmt.Sprintf(`label_kubesphere_io_workspace="%s"`, o.WorkspaceName) @@ -300,7 +301,7 @@ func makeWorkspaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin return strings.Replace(tmpl, "$1", workspaceSelector, -1) } -func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) string { +func makeNamespaceMetricExpr(tmpl string, o monitoring.QueryOptions) string { var namespaceSelector string // For monitoring namespaces in the specific workspace @@ -321,7 +322,7 @@ func makeNamespaceMetricExpression(tmpl string, o monitoring.QueryOptions) strin return strings.Replace(tmpl, "$1", namespaceSelector, -1) } -func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string { +func makeWorkloadMetricExpr(tmpl string, o monitoring.QueryOptions) string { var kindSelector, workloadSelector string switch o.WorkloadKind { case "deployment": @@ -341,7 +342,7 @@ func makeWorkloadMetricExpression(tmpl string, o monitoring.QueryOptions) string return strings.NewReplacer("$1", workloadSelector, "$2", kindSelector).Replace(tmpl) } -func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string { +func makePodMetricExpr(tmpl string, o monitoring.QueryOptions) string { var podSelector, workloadSelector string // For monitoriong pods of the specific workload @@ -371,7 +372,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string { // For monitoring pods on the specific node // GET /nodes/{node}/pods/{pod} - if o.PodName != "" { + if o.NodeName != "" { if o.PodName != "" { podSelector = fmt.Sprintf(`pod="%s", node="%s"`, o.PodName, o.NodeName) } else { @@ -381,7 +382,7 @@ func makePodMetricExpression(tmpl string, o monitoring.QueryOptions) string { return strings.NewReplacer("$1", workloadSelector, "$2", podSelector).Replace(tmpl) } -func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) string { +func makeContainerMetricExpr(tmpl string, o monitoring.QueryOptions) string { var containerSelector string if o.ContainerName != "" { containerSelector = fmt.Sprintf(`pod_name="%s", namespace="%s", container_name="%s"`, o.PodName, o.NamespaceName, o.ContainerName) @@ -391,7 +392,7 @@ func makeContainerMetricExpression(tmpl string, o monitoring.QueryOptions) strin return strings.Replace(tmpl, "$1", containerSelector, -1) } -func makePVCMetricExpression(tmpl string, o monitoring.QueryOptions) string { +func makePVCMetricExpr(tmpl string, o monitoring.QueryOptions) string { var pvcSelector string // For monitoring persistentvolumeclaims in the specific namespace diff --git a/pkg/simple/client/monitoring/prometheus/promql_test.go b/pkg/simple/client/monitoring/prometheus/promql_test.go new file mode 100644 index 000000000..66293cfeb --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/promql_test.go @@ -0,0 +1,45 @@ +package prometheus + +import ( + "github.com/google/go-cmp/cmp" + "kubesphere.io/kubesphere/pkg/simple/client/monitoring" + "kubesphere.io/kubesphere/pkg/simple/client/monitoring/prometheus/testdata" + "testing" +) + +func TestMakeExpr(t *testing.T) { + tests := []struct { + name string + opt monitoring.QueryOptions + }{ + {"cluster_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelCluster}}, + {"node_cpu_utilisation", monitoring.QueryOptions{Level: monitoring.LevelNode, NodeName: "i-2dazc1d6"}}, + {"node_cpu_total", monitoring.QueryOptions{Level: monitoring.LevelNode, ResourceFilter: "i-2dazc1d6|i-ezjb7gsk"}}, + {"workspace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, WorkspaceName: "system-workspace"}}, + {"workspace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkspace, ResourceFilter: "system-workspace|demo"}}, + {"namespace_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, NamespaceName: "kube-system"}}, + {"namespace_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelNamespace, ResourceFilter: "kube-system|default"}}, + {"namespace_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelNamespace, WorkspaceName: "system-workspace", ResourceFilter: "kube-system|default"}}, + {"workload_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: "deployment", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}}, + {"workload_deployment_replica_available", monitoring.QueryOptions{Level: monitoring.LevelWorkload, WorkloadKind: ".*", NamespaceName: "default", ResourceFilter: "apiserver|coredns"}}, + {"pod_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", WorkloadKind: "deployment", WorkloadName: "elasticsearch", ResourceFilter: "elasticsearch-0"}}, + {"pod_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelPod, NamespaceName: "default", PodName: "elasticsearch-12345"}}, + {"pod_memory_usage_wo_cache", monitoring.QueryOptions{Level: monitoring.LevelPod, NodeName: "i-2dazc1d6", PodName: "elasticsearch-12345"}}, + {"container_cpu_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ContainerName: "syscall"}}, + {"container_memory_usage", monitoring.QueryOptions{Level: monitoring.LevelContainer, NamespaceName: "default", PodName: "elasticsearch-12345", ResourceFilter: "syscall"}}, + {"pvc_inodes_available", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", PersistentVolumeClaimName: "db-123"}}, + {"pvc_inodes_used", monitoring.QueryOptions{Level: monitoring.LevelPVC, NamespaceName: "default", ResourceFilter: "db-123"}}, + {"pvc_inodes_total", monitoring.QueryOptions{Level: monitoring.LevelPVC, StorageClassName: "default", ResourceFilter: "db-123"}}, + {"etcd_server_list", monitoring.QueryOptions{Level: monitoring.LevelComponent}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + expected := testdata.PromQLs[tt.name] + result := makeExpr(tt.name, tt.opt) + if diff := cmp.Diff(result, expected); diff != "" { + t.Fatalf("%T differ (-got, +want): %s", expected, diff) + } + }) + } +} diff --git a/pkg/simple/client/monitoring/prometheus/testdata/metrics-error-prom.json b/pkg/simple/client/monitoring/prometheus/testdata/metrics-error-prom.json new file mode 100644 index 000000000..4a9dddf9b --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/testdata/metrics-error-prom.json @@ -0,0 +1,5 @@ +{ + "status":"error", + "errorType":"internal", + "error":"inconsistent body for response code" +} \ No newline at end of file diff --git a/pkg/simple/client/monitoring/prometheus/testdata/metrics-error-res.json b/pkg/simple/client/monitoring/prometheus/testdata/metrics-error-res.json new file mode 100644 index 000000000..b580a35f2 --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/testdata/metrics-error-res.json @@ -0,0 +1,6 @@ +[ + { + "metric_name": "cluster_cpu_utilisation", + "error": "inconsistent body for response code" + } +] \ No newline at end of file diff --git a/pkg/simple/client/monitoring/prometheus/testdata/metrics-matrix-type-prom.json b/pkg/simple/client/monitoring/prometheus/testdata/metrics-matrix-type-prom.json new file mode 100644 index 000000000..82ef6cb12 --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/testdata/metrics-matrix-type-prom.json @@ -0,0 +1,206 @@ +{ + "status":"success", + "data":{ + "resultType":"matrix", + "result":[ + { + "metric":{ + "__name__":"up", + "endpoint":"https", + "instance":"192.168.2.2:9100", + "job":"node-exporter", + "namespace":"kubesphere-monitoring-system", + "pod":"node-exporter-nxpld", + "service":"node-exporter" + }, + "values":[ + [ + 1585743925, + "1.123456" + ], + [ + 1585744045, + "1.123456" + ], + [ + 1585744165, + "1.123456" + ], + [ + 1585744285, + "1.123456" + ], + [ + 1585744405, + "1.123456" + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-main", + "instance":"10.233.99.18:8443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "values":[ + [ + 1585743925, + "1.123456" + ], + [ + 1585744045, + "1.123456" + ], + [ + 1585744165, + "1.123456" + ], + [ + 1585744285, + "1.123456" + ], + [ + 1585744405, + "1.123456" + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-metrics", + "instance":"192.168.2.2:10250", + "job":"kubelet", + "namespace":"kube-system", + "node":"ks-allinone", + "service":"kubelet" + }, + "values":[ + [ + 1585743925, + "1.123456" + ], + [ + 1585744045, + "1.123456" + ], + [ + 1585744165, + "1.123456" + ], + [ + 1585744285, + "1.123456" + ], + [ + 1585744405, + "1.123456" + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-self", + "instance":"10.233.99.18:9443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "values":[ + [ + 1585743925, + "1.123456" + ], + [ + 1585744045, + "1.123456" + ], + [ + 1585744165, + "1.123456" + ], + [ + 1585744285, + "1.123456" + ], + [ + 1585744405, + "1.123456" + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"mysql-exporter", + "instance":"10.233.99.71:9104", + "job":"mysql-sz197k-prometheus-mysql-exporter", + "namespace":"exporter", + "pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9", + "service":"mysql-sz197k-prometheus-mysql-exporter" + }, + "values":[ + [ + 1585743925, + "1.123456" + ], + [ + 1585744045, + "1.123456" + ], + [ + 1585744165, + "1.123456" + ], + [ + 1585744285, + "1.123456" + ], + [ + 1585744405, + "1.123456" + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"web", + "instance":"10.233.99.22:9090", + "job":"prometheus-k8s-system", + "namespace":"kubesphere-monitoring-system", + "pod":"prometheus-k8s-system-0", + "service":"prometheus-k8s-system" + }, + "values":[ + [ + 1585743925, + "1.123456" + ], + [ + 1585744045, + "1.123456" + ], + [ + 1585744165, + "1.123456" + ], + [ + 1585744285, + "1.123456" + ], + [ + 1585744405, + "1.123456" + ] + ] + } + ] + } +} \ No newline at end of file diff --git a/pkg/simple/client/monitoring/prometheus/testdata/metrics-matrix-type-res.json b/pkg/simple/client/monitoring/prometheus/testdata/metrics-matrix-type-res.json new file mode 100644 index 000000000..12ac864df --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/testdata/metrics-matrix-type-res.json @@ -0,0 +1,208 @@ +[ + { + "metric_name":"cluster_cpu_utilisation", + "data":{ + "resultType":"matrix", + "result":[ + { + "metric":{ + "__name__":"up", + "endpoint":"https", + "instance":"192.168.2.2:9100", + "job":"node-exporter", + "namespace":"kubesphere-monitoring-system", + "pod":"node-exporter-nxpld", + "service":"node-exporter" + }, + "values":[ + [ + 1585743925, + 1.123456 + ], + [ + 1585744045, + 1.123456 + ], + [ + 1585744165, + 1.123456 + ], + [ + 1585744285, + 1.123456 + ], + [ + 1585744405, + 1.123456 + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-main", + "instance":"10.233.99.18:8443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "values":[ + [ + 1585743925, + 1.123456 + ], + [ + 1585744045, + 1.123456 + ], + [ + 1585744165, + 1.123456 + ], + [ + 1585744285, + 1.123456 + ], + [ + 1585744405, + 1.123456 + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-metrics", + "instance":"192.168.2.2:10250", + "job":"kubelet", + "namespace":"kube-system", + "node":"ks-allinone", + "service":"kubelet" + }, + "values":[ + [ + 1585743925, + 1.123456 + ], + [ + 1585744045, + 1.123456 + ], + [ + 1585744165, + 1.123456 + ], + [ + 1585744285, + 1.123456 + ], + [ + 1585744405, + 1.123456 + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-self", + "instance":"10.233.99.18:9443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "values":[ + [ + 1585743925, + 1.123456 + ], + [ + 1585744045, + 1.123456 + ], + [ + 1585744165, + 1.123456 + ], + [ + 1585744285, + 1.123456 + ], + [ + 1585744405, + 1.123456 + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"mysql-exporter", + "instance":"10.233.99.71:9104", + "job":"mysql-sz197k-prometheus-mysql-exporter", + "namespace":"exporter", + "pod":"mysql-sz197k-prometheus-mysql-exporter-5d58bc7d94-dh6r9", + "service":"mysql-sz197k-prometheus-mysql-exporter" + }, + "values":[ + [ + 1585743925, + 1.123456 + ], + [ + 1585744045, + 1.123456 + ], + [ + 1585744165, + 1.123456 + ], + [ + 1585744285, + 1.123456 + ], + [ + 1585744405, + 1.123456 + ] + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"web", + "instance":"10.233.99.22:9090", + "job":"prometheus-k8s-system", + "namespace":"kubesphere-monitoring-system", + "pod":"prometheus-k8s-system-0", + "service":"prometheus-k8s-system" + }, + "values":[ + [ + 1585743925, + 1.123456 + ], + [ + 1585744045, + 1.123456 + ], + [ + 1585744165, + 1.123456 + ], + [ + 1585744285, + 1.123456 + ], + [ + 1585744405, + 1.123456 + ] + ] + } + ] + } + } +] \ No newline at end of file diff --git a/pkg/simple/client/monitoring/prometheus/testdata/metrics-vector-type-prom.json b/pkg/simple/client/monitoring/prometheus/testdata/metrics-vector-type-prom.json new file mode 100644 index 000000000..9bc79077a --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/testdata/metrics-vector-type-prom.json @@ -0,0 +1,68 @@ +{ + "status":"success", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "__name__":"up", + "endpoint":"https", + "instance":"192.168.2.2:9100", + "job":"node-exporter", + "namespace":"kubesphere-monitoring-system", + "pod":"node-exporter-nxpld", + "service":"node-exporter" + }, + "value":[ + 1585743854.077, + "1.123456" + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-main", + "instance":"10.233.99.18:8443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "value":[ + 1585743854.077, + "1.123456" + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-metrics", + "instance":"192.168.2.2:10250", + "job":"kubelet", + "namespace":"kube-system", + "node":"ks-allinone", + "service":"kubelet" + }, + "value":[ + 1585743854.077, + "1.123456" + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-self", + "instance":"10.233.99.18:9443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "value":[ + 1585743854.077, + "1.123456" + ] + } + ] + } +} \ No newline at end of file diff --git a/pkg/simple/client/monitoring/prometheus/testdata/metrics-vector-type-res.json b/pkg/simple/client/monitoring/prometheus/testdata/metrics-vector-type-res.json new file mode 100644 index 000000000..ff9e7cdff --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/testdata/metrics-vector-type-res.json @@ -0,0 +1,70 @@ +[ + { + "metric_name":"cluster_cpu_utilisation", + "data":{ + "resultType":"vector", + "result":[ + { + "metric":{ + "__name__":"up", + "endpoint":"https", + "instance":"192.168.2.2:9100", + "job":"node-exporter", + "namespace":"kubesphere-monitoring-system", + "pod":"node-exporter-nxpld", + "service":"node-exporter" + }, + "value":[ + 1585743854.077, + 1.123456 + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-main", + "instance":"10.233.99.18:8443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "value":[ + 1585743854.077, + 1.123456 + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-metrics", + "instance":"192.168.2.2:10250", + "job":"kubelet", + "namespace":"kube-system", + "node":"ks-allinone", + "service":"kubelet" + }, + "value":[ + 1585743854.077, + 1.123456 + ] + }, + { + "metric":{ + "__name__":"up", + "endpoint":"https-self", + "instance":"10.233.99.18:9443", + "job":"kube-state-metrics", + "namespace":"kubesphere-monitoring-system", + "pod":"kube-state-metrics-566cdbcb48-98brh", + "service":"kube-state-metrics" + }, + "value":[ + 1585743854.077, + 1.123456 + ] + } + ] + } + } +] \ No newline at end of file diff --git a/pkg/simple/client/monitoring/prometheus/testdata/promqls.go b/pkg/simple/client/monitoring/prometheus/testdata/promqls.go new file mode 100644 index 000000000..66d3fcb57 --- /dev/null +++ b/pkg/simple/client/monitoring/prometheus/testdata/promqls.go @@ -0,0 +1,23 @@ +package testdata + +var PromQLs = map[string]string{ + "cluster_cpu_utilisation": `:node_cpu_utilisation:avg1m`, + "node_cpu_utilisation": `node:node_cpu_utilisation:avg1m{node="i-2dazc1d6"}`, + "node_cpu_total": `node:node_num_cpu:sum{node=~"i-2dazc1d6|i-ezjb7gsk"}`, + "workspace_cpu_usage": `round(sum by (label_kubesphere_io_workspace) (namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", label_kubesphere_io_workspace="system-workspace"}), 0.001)`, + "workspace_memory_usage": `sum by (label_kubesphere_io_workspace) (namespace:container_memory_usage_bytes:sum{namespace!="", label_kubesphere_io_workspace=~"system-workspace|demo", label_kubesphere_io_workspace!=""})`, + "namespace_cpu_usage": `round(namespace:container_cpu_usage_seconds_total:sum_rate{namespace!="", namespace="kube-system"}, 0.001)`, + "namespace_memory_usage": `namespace:container_memory_usage_bytes:sum{namespace!="", namespace=~"kube-system|default"}`, + "namespace_memory_usage_wo_cache": `namespace:container_memory_usage_bytes_wo_cache:sum{namespace!="", label_kubesphere_io_workspace="system-workspace", namespace=~"kube-system|default"}`, + "workload_cpu_usage": `round(namespace:workload_cpu_usage:sum{namespace="default", workload=~"Deployment:apiserver|coredns"}, 0.001)`, + "workload_deployment_replica_available": `label_join(sum (label_join(label_replace(kube_deployment_status_replicas_available{namespace="default"}, "owner_kind", "Deployment", "", ""), "workload", "", "deployment")) by (namespace, owner_kind, workload), "workload", ":", "owner_kind", "workload")`, + "pod_cpu_usage": `round(label_join(sum by (namespace, pod_name) (irate(container_cpu_usage_seconds_total{job="kubelet", pod_name!="", image!=""}[5m])), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{owner_kind="ReplicaSet", owner_name=~"^deployment-[^-]{1,10}$"} * on (namespace, pod) group_left(node) kube_pod_info{pod=~"elasticsearch-0", namespace="default"}, 0.001)`, + "pod_memory_usage": `label_join(sum by (namespace, pod_name) (container_memory_usage_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", namespace="default"}`, + "pod_memory_usage_wo_cache": `label_join(sum by (namespace, pod_name) (container_memory_working_set_bytes{job="kubelet", pod_name!="", image!=""}), "pod", "", "pod_name") * on (namespace, pod) group_left(owner_kind, owner_name) kube_pod_owner{} * on (namespace, pod) group_left(node) kube_pod_info{pod="elasticsearch-12345", node="i-2dazc1d6"}`, + "container_cpu_usage": `round(sum by (namespace, pod_name, container_name) (irate(container_cpu_usage_seconds_total{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name="syscall"}[5m])), 0.001)`, + "container_memory_usage": `sum by (namespace, pod_name, container_name) (container_memory_usage_bytes{job="kubelet", container_name!="POD", container_name!="", image!="", pod_name="elasticsearch-12345", namespace="default", container_name=~"syscall"})`, + "pvc_inodes_available": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_free) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim="db-123"}`, + "pvc_inodes_used": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes_used) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{namespace="default", persistentvolumeclaim=~"db-123"}`, + "pvc_inodes_total": `max by (namespace, persistentvolumeclaim) (kubelet_volume_stats_inodes) * on (namespace, persistentvolumeclaim) group_left (storageclass) kube_persistentvolumeclaim_info{storageclass="default", persistentvolumeclaim=~"db-123"}`, + "etcd_server_list": `label_replace(up{job="etcd"}, "node_ip", "$1", "instance", "(.*):.*")`, +} diff --git a/pkg/simple/client/monitoring/query_options.go b/pkg/simple/client/monitoring/query_options.go index 4acb05f51..6759f4572 100644 --- a/pkg/simple/client/monitoring/query_options.go +++ b/pkg/simple/client/monitoring/query_options.go @@ -1,14 +1,26 @@ package monitoring +type Level int + +const ( + LevelCluster = 1 << iota + LevelNode + LevelWorkspace + LevelNamespace + LevelWorkload + LevelPod + LevelContainer + LevelPVC + LevelComponent +) + type QueryOption interface { Apply(*QueryOptions) } type QueryOptions struct { - Level MonitoringLevel - NamedMetrics []string + Level Level - MetricFilter string ResourceFilter string NodeName string WorkspaceName string @@ -25,44 +37,35 @@ func NewQueryOptions() *QueryOptions { return &QueryOptions{} } -type ClusterOption struct { - MetricFilter string -} +type ClusterOption struct{} -func (co ClusterOption) Apply(o *QueryOptions) { +func (_ ClusterOption) Apply(o *QueryOptions) { o.Level = LevelCluster - o.NamedMetrics = ClusterMetrics } type NodeOption struct { - MetricFilter string ResourceFilter string NodeName string } func (no NodeOption) Apply(o *QueryOptions) { o.Level = LevelNode - o.NamedMetrics = NodeMetrics o.ResourceFilter = no.ResourceFilter o.NodeName = no.NodeName } type WorkspaceOption struct { - MetricFilter string ResourceFilter string WorkspaceName string } func (wo WorkspaceOption) Apply(o *QueryOptions) { o.Level = LevelWorkspace - o.NamedMetrics = WorkspaceMetrics - o.MetricFilter = wo.MetricFilter o.ResourceFilter = wo.ResourceFilter o.WorkspaceName = wo.WorkspaceName } type NamespaceOption struct { - MetricFilter string ResourceFilter string WorkspaceName string NamespaceName string @@ -70,33 +73,25 @@ type NamespaceOption struct { func (no NamespaceOption) Apply(o *QueryOptions) { o.Level = LevelNamespace - o.NamedMetrics = NamespaceMetrics - o.MetricFilter = no.MetricFilter o.ResourceFilter = no.ResourceFilter o.WorkspaceName = no.WorkspaceName o.NamespaceName = no.NamespaceName } type WorkloadOption struct { - MetricFilter string ResourceFilter string NamespaceName string WorkloadKind string - WorkloadName string } func (wo WorkloadOption) Apply(o *QueryOptions) { o.Level = LevelWorkload - o.NamedMetrics = WorkspaceMetrics - o.MetricFilter = wo.MetricFilter o.ResourceFilter = wo.ResourceFilter o.NamespaceName = wo.NamespaceName o.WorkloadKind = wo.WorkloadKind - o.WorkloadName = wo.WorkloadName } type PodOption struct { - MetricFilter string ResourceFilter string NodeName string NamespaceName string @@ -107,8 +102,6 @@ type PodOption struct { func (po PodOption) Apply(o *QueryOptions) { o.Level = LevelPod - o.NamedMetrics = PodMetrics - o.MetricFilter = po.MetricFilter o.ResourceFilter = po.ResourceFilter o.NamespaceName = po.NamespaceName o.WorkloadKind = po.WorkloadKind @@ -116,7 +109,6 @@ func (po PodOption) Apply(o *QueryOptions) { } type ContainerOption struct { - MetricFilter string ResourceFilter string NamespaceName string PodName string @@ -125,8 +117,6 @@ type ContainerOption struct { func (co ContainerOption) Apply(o *QueryOptions) { o.Level = LevelContainer - o.NamedMetrics = ContainerMetrics - o.MetricFilter = co.MetricFilter o.ResourceFilter = co.ResourceFilter o.NamespaceName = co.NamespaceName o.PodName = co.PodName @@ -134,7 +124,6 @@ func (co ContainerOption) Apply(o *QueryOptions) { } type PVCOption struct { - MetricFilter string ResourceFilter string NamespaceName string StorageClassName string @@ -143,20 +132,14 @@ type PVCOption struct { func (po PVCOption) Apply(o *QueryOptions) { o.Level = LevelPVC - o.NamedMetrics = PVCMetrics - o.MetricFilter = po.MetricFilter o.ResourceFilter = po.ResourceFilter o.NamespaceName = po.NamespaceName o.StorageClassName = po.StorageClassName o.PersistentVolumeClaimName = po.PersistentVolumeClaimName } -type ComponentOption struct { - MetricFilter string -} +type ComponentOption struct{} -func (co ComponentOption) Apply(o *QueryOptions) { +func (_ ComponentOption) Apply(o *QueryOptions) { o.Level = LevelComponent - o.NamedMetrics = ComponentMetrics - o.MetricFilter = co.MetricFilter } diff --git a/pkg/simple/client/monitoring/types.go b/pkg/simple/client/monitoring/types.go new file mode 100644 index 000000000..a9bc915de --- /dev/null +++ b/pkg/simple/client/monitoring/types.go @@ -0,0 +1,33 @@ +package monitoring + +const ( + MetricTypeMatrix = "matrix" + MetricTypeVector = "vector" +) + +type Metric struct { + MetricName string `json:"metric_name,omitempty" description:"metric name, eg. scheduler_up_sum"` + MetricData `json:"data,omitempty" description:"actual metric result"` + Error string `json:"error,omitempty"` +} + +type MetricData struct { + MetricType string `json:"resultType,omitempty" description:"result type, one of matrix, vector"` + MetricValues []MetricValue `json:"result,omitempty" description:"metric data including labels, time series and values"` +} + +type Point [2]float64 + +type MetricValue struct { + Metadata map[string]string `json:"metric,omitempty" description:"time series labels"` + Sample Point `json:"value,omitempty" description:"time series, values of vector type"` + Series []Point `json:"values,omitempty" description:"time series, values of matrix type"` +} + +func (p Point) Timestamp() float64 { + return p[0] +} + +func (p Point) Value() float64 { + return p[1] +} diff --git a/tools/cmd/doc-gen/main.go b/tools/cmd/doc-gen/main.go index edc385952..cb27cfb8b 100644 --- a/tools/cmd/doc-gen/main.go +++ b/tools/cmd/doc-gen/main.go @@ -32,7 +32,7 @@ import ( devopsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/devops/v1alpha2" iamv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/iam/v1alpha2" loggingv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/logging/v1alpha2" - monitoringv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha2" + monitoringv1alpha3 "kubesphere.io/kubesphere/pkg/kapis/monitoring/v1alpha3" openpitrixv1 "kubesphere.io/kubesphere/pkg/kapis/openpitrix/v1" operationsv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/operations/v1alpha2" resourcesv1alpha2 "kubesphere.io/kubesphere/pkg/kapis/resources/v1alpha2" @@ -61,7 +61,7 @@ func generateSwaggerJson() { urlruntime.Must(devopsv1alpha2.AddToContainer(container, nil, nil, nil, nil, nil, nil)) urlruntime.Must(iamv1alpha2.AddToContainer(container, nil, nil, nil, nil, nil)) urlruntime.Must(loggingv1alpha2.AddToContainer(container, nil, nil)) - urlruntime.Must(monitoringv1alpha2.AddToContainer(container, nil, nil)) + urlruntime.Must(monitoringv1alpha3.AddToContainer(container, nil, nil)) urlruntime.Must(openpitrixv1.AddToContainer(container, nil, nil)) urlruntime.Must(operationsv1alpha2.AddToContainer(container, nil)) urlruntime.Must(resourcesv1alpha2.AddToContainer(container, nil, nil)) diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE index 5c389317e..8dada3eda 100644 --- a/vendor/github.com/openshift/api/LICENSE +++ b/vendor/github.com/openshift/api/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -176,7 +175,18 @@ END OF TERMS AND CONDITIONS - Copyright 2020 Red Hat, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openshift/api/apps/v1/consts.go b/vendor/github.com/openshift/api/apps/v1/consts.go deleted file mode 100644 index 212578bcc..000000000 --- a/vendor/github.com/openshift/api/apps/v1/consts.go +++ /dev/null @@ -1,108 +0,0 @@ -package v1 - -const ( - // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state - // Used for specifying the reason for cancellation or failure of a deployment - // This is on replication controller set by deployer controller. - DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason" - - // DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The - // annotation value is the name of the deployer Pod which will act upon the ReplicationController - // to implement the deployment behavior. - // This is set on replication controller by deployer controller. - DeploymentPodAnnotation = "openshift.io/deployer-pod.name" - - // DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the - // DeploymentConfig on which the deployment is based. - // This is set on replication controller pod template by deployer controller. - DeploymentConfigAnnotation = "openshift.io/deployment-config.name" - - // DeploymentCancelledAnnotation indicates that the deployment has been cancelled - // The annotation value does not matter and its mere presence indicates cancellation. - // This is set on replication controller by deployment config controller or oc rollout cancel command. - DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled" - - // DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded - // DeploymentConfig on which a given deployment is based. - // This is set on replication controller by deployer controller. - DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config" - - // DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The - // annotation value is the LatestVersion value of the DeploymentConfig which was the basis for - // the deployment. - // This is set on replication controller pod template by deployment config controller. - DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version" - - // DeployerPodForDeploymentLabel is a label which groups pods related to a - // deployment. The value is a deployment name. The deployer pod and hook pods - // created by the internal strategies will have this label. Custom - // strategies can apply this label to any pods they create, enabling - // platform-provided cancellation and garbage collection support. - // This is set on deployer pod by deployer controller. - DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name" - - // DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of - // a deployment. - // This is set on replication controller by deployer controller. - DeploymentStatusAnnotation = "openshift.io/deployment.phase" -) - -type DeploymentConditionReason string - -var ( - // ReplicationControllerUpdatedReason is added in a deployment config when one of its replication - // controllers is updated as part of the rollout process. - ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated" - - // ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication - // controller. - ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError" - - // ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication - // controller. - NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated" - - // NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made - // available ie. the number of new pods that have passed readiness checks and run for at least - // minReadySeconds is at least the minimum available pods that need to run for the deployment config. - NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable" - - // ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show - // any progress within the given deadline (progressDeadlineSeconds). - ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded" - - // DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be - // estimated once a deployment config is paused. - DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused" - - // DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally - // deployment configs that paused amidst a rollout. - DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed" - - // RolloutCancelledReason is added in a deployment config when its newest rollout was - // interrupted by cancellation. - RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled" -) - -// DeploymentStatus describes the possible states a deployment can be in. -type DeploymentStatus string - -var ( - - // DeploymentStatusNew means the deployment has been accepted but not yet acted upon. - DeploymentStatusNew DeploymentStatus = "New" - - // DeploymentStatusPending means the deployment been handed over to a deployment strategy, - // but the strategy has not yet declared the deployment to be running. - DeploymentStatusPending DeploymentStatus = "Pending" - - // DeploymentStatusRunning means the deployment strategy has reported the deployment as - // being in-progress. - DeploymentStatusRunning DeploymentStatus = "Running" - - // DeploymentStatusComplete means the deployment finished without an error. - DeploymentStatusComplete DeploymentStatus = "Complete" - - // DeploymentStatusFailed means the deployment finished with an error. - DeploymentStatusFailed DeploymentStatus = "Failed" -) diff --git a/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go deleted file mode 100644 index 31969786c..000000000 --- a/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go +++ /dev/null @@ -1,38 +0,0 @@ -package v1 - -// This file contains consts that are not shared between components and set just internally. -// They will likely be removed in (near) future. - -const ( - // DeployerPodCreatedAtAnnotation is an annotation on a deployment that - // records the time in RFC3339 format of when the deployer pod for this particular - // deployment was created. - // This is set by deployer controller, but not consumed by any command or internally. - // DEPRECATED: will be removed soon - DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at" - - // DeployerPodStartedAtAnnotation is an annotation on a deployment that - // records the time in RFC3339 format of when the deployer pod for this particular - // deployment was started. - // This is set by deployer controller, but not consumed by any command or internally. - // DEPRECATED: will be removed soon - DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at" - - // DeployerPodCompletedAtAnnotation is an annotation on deployment that records - // the time in RFC3339 format of when the deployer pod finished. - // This is set by deployer controller, but not consumed by any command or internally. - // DEPRECATED: will be removed soon - DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at" - - // DesiredReplicasAnnotation represents the desired number of replicas for a - // new deployment. - // This is set by deployer controller, but not consumed by any command or internally. - // DEPRECATED: will be removed soon - DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas" - - // DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name - // of the deployment (a ReplicationController) on which the deployer Pod acts. - // This is set by deployer controller and consumed internally and in oc adm top command. - // DEPRECATED: will be removed soon - DeploymentAnnotation = "openshift.io/deployment.name" -) diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go index d54012700..53b61c61a 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go @@ -1,26 +1,55 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: github.com/openshift/api/apps/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + github.com/openshift/api/apps/v1/generated.proto + + It has these top-level messages: + CustomDeploymentStrategyParams + DeploymentCause + DeploymentCauseImageTrigger + DeploymentCondition + DeploymentConfig + DeploymentConfigList + DeploymentConfigRollback + DeploymentConfigRollbackSpec + DeploymentConfigSpec + DeploymentConfigStatus + DeploymentDetails + DeploymentLog + DeploymentLogOptions + DeploymentRequest + DeploymentStrategy + DeploymentTriggerImageChangeParams + DeploymentTriggerPolicies + DeploymentTriggerPolicy + ExecNewPodHook + LifecycleHook + RecreateDeploymentStrategyParams + RollingDeploymentStrategyParams + TagImageHook +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" - v11 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -31,651 +60,117 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} } func (*CustomDeploymentStrategyParams) ProtoMessage() {} func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{0} -} -func (m *CustomDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CustomDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CustomDeploymentStrategyParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomDeploymentStrategyParams.Merge(m, src) -} -func (m *CustomDeploymentStrategyParams) XXX_Size() int { - return m.Size() -} -func (m *CustomDeploymentStrategyParams) XXX_DiscardUnknown() { - xxx_messageInfo_CustomDeploymentStrategyParams.DiscardUnknown(m) + return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_CustomDeploymentStrategyParams proto.InternalMessageInfo - -func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } -func (*DeploymentCause) ProtoMessage() {} -func (*DeploymentCause) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{1} -} -func (m *DeploymentCause) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentCause) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentCause.Merge(m, src) -} -func (m *DeploymentCause) XXX_Size() int { - return m.Size() -} -func (m *DeploymentCause) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentCause.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentCause proto.InternalMessageInfo +func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } +func (*DeploymentCause) ProtoMessage() {} +func (*DeploymentCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} } func (*DeploymentCauseImageTrigger) ProtoMessage() {} func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{2} -} -func (m *DeploymentCauseImageTrigger) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentCauseImageTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentCauseImageTrigger) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentCauseImageTrigger.Merge(m, src) -} -func (m *DeploymentCauseImageTrigger) XXX_Size() int { - return m.Size() -} -func (m *DeploymentCauseImageTrigger) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentCauseImageTrigger.DiscardUnknown(m) + return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_DeploymentCauseImageTrigger proto.InternalMessageInfo +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{3} -} -func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentCondition.Merge(m, src) -} -func (m *DeploymentCondition) XXX_Size() int { - return m.Size() -} -func (m *DeploymentCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) -} +func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } +func (*DeploymentConfig) ProtoMessage() {} +func (*DeploymentConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo - -func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } -func (*DeploymentConfig) ProtoMessage() {} -func (*DeploymentConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{4} -} -func (m *DeploymentConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentConfig.Merge(m, src) -} -func (m *DeploymentConfig) XXX_Size() int { - return m.Size() -} -func (m *DeploymentConfig) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentConfig proto.InternalMessageInfo - -func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } -func (*DeploymentConfigList) ProtoMessage() {} -func (*DeploymentConfigList) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{5} -} -func (m *DeploymentConfigList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentConfigList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentConfigList.Merge(m, src) -} -func (m *DeploymentConfigList) XXX_Size() int { - return m.Size() -} -func (m *DeploymentConfigList) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentConfigList.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentConfigList proto.InternalMessageInfo +func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } +func (*DeploymentConfigList) ProtoMessage() {} +func (*DeploymentConfigList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} } func (*DeploymentConfigRollback) ProtoMessage() {} func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{6} + return fileDescriptorGenerated, []int{6} } -func (m *DeploymentConfigRollback) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentConfigRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentConfigRollback) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentConfigRollback.Merge(m, src) -} -func (m *DeploymentConfigRollback) XXX_Size() int { - return m.Size() -} -func (m *DeploymentConfigRollback) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentConfigRollback.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentConfigRollback proto.InternalMessageInfo func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} } func (*DeploymentConfigRollbackSpec) ProtoMessage() {} func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{7} -} -func (m *DeploymentConfigRollbackSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentConfigRollbackSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentConfigRollbackSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentConfigRollbackSpec.Merge(m, src) -} -func (m *DeploymentConfigRollbackSpec) XXX_Size() int { - return m.Size() -} -func (m *DeploymentConfigRollbackSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentConfigRollbackSpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_DeploymentConfigRollbackSpec proto.InternalMessageInfo +func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } +func (*DeploymentConfigSpec) ProtoMessage() {} +func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } -func (*DeploymentConfigSpec) ProtoMessage() {} -func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{8} -} -func (m *DeploymentConfigSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentConfigSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentConfigSpec.Merge(m, src) -} -func (m *DeploymentConfigSpec) XXX_Size() int { - return m.Size() -} -func (m *DeploymentConfigSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentConfigSpec.DiscardUnknown(m) -} +func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } +func (*DeploymentConfigStatus) ProtoMessage() {} +func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_DeploymentConfigSpec proto.InternalMessageInfo +func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } +func (*DeploymentDetails) ProtoMessage() {} +func (*DeploymentDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } -func (*DeploymentConfigStatus) ProtoMessage() {} -func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{9} -} -func (m *DeploymentConfigStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentConfigStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentConfigStatus.Merge(m, src) -} -func (m *DeploymentConfigStatus) XXX_Size() int { - return m.Size() -} -func (m *DeploymentConfigStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentConfigStatus.DiscardUnknown(m) -} +func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } +func (*DeploymentLog) ProtoMessage() {} +func (*DeploymentLog) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_DeploymentConfigStatus proto.InternalMessageInfo +func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } +func (*DeploymentLogOptions) ProtoMessage() {} +func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } -func (*DeploymentDetails) ProtoMessage() {} -func (*DeploymentDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{10} -} -func (m *DeploymentDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentDetails.Merge(m, src) -} -func (m *DeploymentDetails) XXX_Size() int { - return m.Size() -} -func (m *DeploymentDetails) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentDetails.DiscardUnknown(m) -} +func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } +func (*DeploymentRequest) ProtoMessage() {} +func (*DeploymentRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_DeploymentDetails proto.InternalMessageInfo - -func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } -func (*DeploymentLog) ProtoMessage() {} -func (*DeploymentLog) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{11} -} -func (m *DeploymentLog) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentLog) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentLog.Merge(m, src) -} -func (m *DeploymentLog) XXX_Size() int { - return m.Size() -} -func (m *DeploymentLog) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentLog.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentLog proto.InternalMessageInfo - -func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } -func (*DeploymentLogOptions) ProtoMessage() {} -func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{12} -} -func (m *DeploymentLogOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentLogOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentLogOptions.Merge(m, src) -} -func (m *DeploymentLogOptions) XXX_Size() int { - return m.Size() -} -func (m *DeploymentLogOptions) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentLogOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentLogOptions proto.InternalMessageInfo - -func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } -func (*DeploymentRequest) ProtoMessage() {} -func (*DeploymentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{13} -} -func (m *DeploymentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentRequest.Merge(m, src) -} -func (m *DeploymentRequest) XXX_Size() int { - return m.Size() -} -func (m *DeploymentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentRequest proto.InternalMessageInfo - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{14} -} -func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStrategy.Merge(m, src) -} -func (m *DeploymentStrategy) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} } func (*DeploymentTriggerImageChangeParams) ProtoMessage() {} func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{15} + return fileDescriptorGenerated, []int{15} } -func (m *DeploymentTriggerImageChangeParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentTriggerImageChangeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentTriggerImageChangeParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentTriggerImageChangeParams.Merge(m, src) -} -func (m *DeploymentTriggerImageChangeParams) XXX_Size() int { - return m.Size() -} -func (m *DeploymentTriggerImageChangeParams) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentTriggerImageChangeParams.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentTriggerImageChangeParams proto.InternalMessageInfo func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} } func (*DeploymentTriggerPolicies) ProtoMessage() {} func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{16} + return fileDescriptorGenerated, []int{16} } -func (m *DeploymentTriggerPolicies) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentTriggerPolicies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentTriggerPolicies) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentTriggerPolicies.Merge(m, src) -} -func (m *DeploymentTriggerPolicies) XXX_Size() int { - return m.Size() -} -func (m *DeploymentTriggerPolicies) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentTriggerPolicies.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentTriggerPolicies proto.InternalMessageInfo func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} } func (*DeploymentTriggerPolicy) ProtoMessage() {} func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{17} -} -func (m *DeploymentTriggerPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentTriggerPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentTriggerPolicy.Merge(m, src) -} -func (m *DeploymentTriggerPolicy) XXX_Size() int { - return m.Size() -} -func (m *DeploymentTriggerPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentTriggerPolicy.DiscardUnknown(m) + return fileDescriptorGenerated, []int{17} } -var xxx_messageInfo_DeploymentTriggerPolicy proto.InternalMessageInfo +func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } +func (*ExecNewPodHook) ProtoMessage() {} +func (*ExecNewPodHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } -func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } -func (*ExecNewPodHook) ProtoMessage() {} -func (*ExecNewPodHook) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{18} -} -func (m *ExecNewPodHook) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecNewPodHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExecNewPodHook) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecNewPodHook.Merge(m, src) -} -func (m *ExecNewPodHook) XXX_Size() int { - return m.Size() -} -func (m *ExecNewPodHook) XXX_DiscardUnknown() { - xxx_messageInfo_ExecNewPodHook.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecNewPodHook proto.InternalMessageInfo - -func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } -func (*LifecycleHook) ProtoMessage() {} -func (*LifecycleHook) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{19} -} -func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LifecycleHook) XXX_Merge(src proto.Message) { - xxx_messageInfo_LifecycleHook.Merge(m, src) -} -func (m *LifecycleHook) XXX_Size() int { - return m.Size() -} -func (m *LifecycleHook) XXX_DiscardUnknown() { - xxx_messageInfo_LifecycleHook.DiscardUnknown(m) -} - -var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo +func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } +func (*LifecycleHook) ProtoMessage() {} +func (*LifecycleHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} } func (*RecreateDeploymentStrategyParams) ProtoMessage() {} func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{20} + return fileDescriptorGenerated, []int{20} } -func (m *RecreateDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RecreateDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RecreateDeploymentStrategyParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_RecreateDeploymentStrategyParams.Merge(m, src) -} -func (m *RecreateDeploymentStrategyParams) XXX_Size() int { - return m.Size() -} -func (m *RecreateDeploymentStrategyParams) XXX_DiscardUnknown() { - xxx_messageInfo_RecreateDeploymentStrategyParams.DiscardUnknown(m) -} - -var xxx_messageInfo_RecreateDeploymentStrategyParams proto.InternalMessageInfo func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} } func (*RollingDeploymentStrategyParams) ProtoMessage() {} func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{21} -} -func (m *RollingDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingDeploymentStrategyParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingDeploymentStrategyParams.Merge(m, src) -} -func (m *RollingDeploymentStrategyParams) XXX_Size() int { - return m.Size() -} -func (m *RollingDeploymentStrategyParams) XXX_DiscardUnknown() { - xxx_messageInfo_RollingDeploymentStrategyParams.DiscardUnknown(m) + return fileDescriptorGenerated, []int{21} } -var xxx_messageInfo_RollingDeploymentStrategyParams proto.InternalMessageInfo - -func (m *TagImageHook) Reset() { *m = TagImageHook{} } -func (*TagImageHook) ProtoMessage() {} -func (*TagImageHook) Descriptor() ([]byte, []int) { - return fileDescriptor_8f1b1bee37da74c1, []int{22} -} -func (m *TagImageHook) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TagImageHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TagImageHook) XXX_Merge(src proto.Message) { - xxx_messageInfo_TagImageHook.Merge(m, src) -} -func (m *TagImageHook) XXX_Size() int { - return m.Size() -} -func (m *TagImageHook) XXX_DiscardUnknown() { - xxx_messageInfo_TagImageHook.DiscardUnknown(m) -} - -var xxx_messageInfo_TagImageHook proto.InternalMessageInfo +func (m *TagImageHook) Reset() { *m = TagImageHook{} } +func (*TagImageHook) ProtoMessage() {} +func (*TagImageHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func init() { proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams") @@ -685,18 +180,14 @@ func init() { proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig") proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList") proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback") - proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec") proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec") - proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec.SelectorEntry") proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus") proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails") proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog") proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions") proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest") proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy") - proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.AnnotationsEntry") - proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.LabelsEntry") proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams") proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies") proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy") @@ -706,177 +197,10 @@ func init() { proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams") proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook") } - -func init() { - proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptor_8f1b1bee37da74c1) -} - -var fileDescriptor_8f1b1bee37da74c1 = []byte{ - // 2520 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6f, 0x5b, 0x59, - 0x15, 0xcf, 0x8b, 0xed, 0xc4, 0x3e, 0xf9, 0x6a, 0x6e, 0xfa, 0xe1, 0xc9, 0xa0, 0x38, 0xf2, 0x68, - 0x86, 0x00, 0x83, 0x3d, 0xcd, 0x94, 0xd1, 0xb4, 0xd5, 0x0c, 0xc4, 0x69, 0x3a, 0x93, 0xca, 0x69, - 0xc3, 0x4d, 0xda, 0xd2, 0x0a, 0x41, 0x6f, 0x9e, 0x6f, 0x9c, 0x3b, 0x79, 0xef, 0x5d, 0xf3, 0xde, - 0xb5, 0x5b, 0x23, 0x84, 0x66, 0x03, 0x12, 0xd2, 0x2c, 0x58, 0xc2, 0x06, 0xb1, 0x60, 0x0b, 0x62, - 0xc1, 0x1e, 0xb1, 0x40, 0xea, 0x02, 0xa4, 0x91, 0x90, 0x60, 0x84, 0x50, 0x34, 0x0d, 0x3b, 0xfe, - 0x84, 0xae, 0xd0, 0xfd, 0x78, 0x5f, 0xfe, 0x68, 0xe2, 0xb4, 0x3b, 0xbf, 0xf3, 0xf1, 0x3b, 0xe7, - 0x9e, 0x7b, 0xce, 0xb9, 0xe7, 0x5e, 0xc3, 0x3b, 0x4d, 0x26, 0x0e, 0xda, 0x7b, 0x15, 0x9b, 0xbb, - 0x55, 0xde, 0xa2, 0x5e, 0x70, 0xc0, 0xf6, 0x45, 0x95, 0xb4, 0x58, 0x95, 0xb4, 0x5a, 0x41, 0xb5, - 0x73, 0xb9, 0xda, 0xa4, 0x1e, 0xf5, 0x89, 0xa0, 0x8d, 0x4a, 0xcb, 0xe7, 0x82, 0xa3, 0xe5, 0x58, - 0xa3, 0x12, 0x69, 0x54, 0x48, 0x8b, 0x55, 0xa4, 0x46, 0xa5, 0x73, 0x79, 0xf1, 0x9b, 0x09, 0xcc, - 0x26, 0x6f, 0xf2, 0xaa, 0x52, 0xdc, 0x6b, 0xef, 0xab, 0x2f, 0xf5, 0xa1, 0x7e, 0x69, 0xc0, 0xc5, - 0xf2, 0xe1, 0xfb, 0x41, 0x85, 0x71, 0x65, 0xd4, 0xe6, 0x3e, 0x1d, 0x60, 0x74, 0xf1, 0x4a, 0x2c, - 0xe3, 0x12, 0xfb, 0x80, 0x79, 0xd4, 0xef, 0x56, 0x5b, 0x87, 0x4d, 0x49, 0x08, 0xaa, 0x2e, 0x15, - 0x64, 0x90, 0xd6, 0x7b, 0xc3, 0xb4, 0xfc, 0xb6, 0x27, 0x98, 0x4b, 0xab, 0x81, 0x7d, 0x40, 0x5d, - 0xd2, 0xa7, 0xf7, 0xee, 0x30, 0xbd, 0xb6, 0x60, 0x4e, 0x95, 0x79, 0x22, 0x10, 0x7e, 0xaf, 0x52, - 0xf9, 0xcf, 0x16, 0x2c, 0xad, 0xb7, 0x03, 0xc1, 0xdd, 0x1b, 0xb4, 0xe5, 0xf0, 0xae, 0x4b, 0x3d, - 0xb1, 0x23, 0xa4, 0x44, 0xb3, 0xbb, 0x4d, 0x7c, 0xe2, 0x06, 0xe8, 0x0d, 0xc8, 0x31, 0x97, 0x34, - 0x69, 0xd1, 0x5a, 0xb6, 0x56, 0x0a, 0xb5, 0x99, 0xa7, 0x47, 0xa5, 0xb1, 0xe3, 0xa3, 0x52, 0x6e, - 0x53, 0x12, 0xb1, 0xe6, 0xa1, 0xef, 0xc2, 0x14, 0xf5, 0x3a, 0xcc, 0xe7, 0x9e, 0x44, 0x28, 0x8e, - 0x2f, 0x67, 0x56, 0xa6, 0x56, 0x17, 0x2b, 0xda, 0x25, 0x15, 0x67, 0x19, 0xa4, 0x4a, 0xe7, 0x72, - 0x65, 0xc3, 0xeb, 0xdc, 0x23, 0x7e, 0x6d, 0xc1, 0xc0, 0x4c, 0x6d, 0xc4, 0x6a, 0x38, 0x89, 0x81, - 0xde, 0x84, 0x49, 0x9b, 0xbb, 0x2e, 0xf1, 0x1a, 0xc5, 0xcc, 0x72, 0x66, 0xa5, 0x50, 0x9b, 0x3a, - 0x3e, 0x2a, 0x4d, 0xae, 0x6b, 0x12, 0x0e, 0x79, 0xe5, 0xbf, 0x58, 0x30, 0x17, 0xfb, 0xbe, 0x4e, - 0xda, 0x01, 0x45, 0x57, 0x21, 0x2b, 0xba, 0xad, 0xd0, 0xe3, 0x37, 0x8d, 0xa9, 0xec, 0x6e, 0xb7, - 0x45, 0x9f, 0x1f, 0x95, 0x2e, 0xc4, 0xe2, 0xbb, 0x3e, 0x6b, 0x36, 0xa9, 0x2f, 0x19, 0x58, 0xa9, - 0xa0, 0x00, 0xa6, 0xd5, 0x8a, 0x0c, 0xa7, 0x38, 0xbe, 0x6c, 0xad, 0x4c, 0xad, 0x7e, 0x50, 0x39, - 0x29, 0x7f, 0x2a, 0x3d, 0x3e, 0x6c, 0x26, 0x40, 0x6a, 0xe7, 0x8e, 0x8f, 0x4a, 0xd3, 0x49, 0x0a, - 0x4e, 0x19, 0x29, 0x37, 0xe0, 0xf5, 0x17, 0xa8, 0xa3, 0x0d, 0xc8, 0xee, 0xfb, 0xdc, 0x55, 0xcb, - 0x99, 0x5a, 0x7d, 0x63, 0x50, 0x54, 0xef, 0xec, 0x7d, 0x42, 0x6d, 0x81, 0xe9, 0x3e, 0xf5, 0xa9, - 0x67, 0xd3, 0xda, 0x74, 0xb8, 0xe6, 0x9b, 0x3e, 0x77, 0xb1, 0x52, 0x2f, 0xff, 0x2b, 0x03, 0x0b, - 0x09, 0x33, 0xdc, 0x6b, 0x30, 0xc1, 0xb8, 0x87, 0xae, 0xa7, 0xa2, 0xf5, 0xd5, 0x9e, 0x68, 0x5d, - 0x1a, 0xa0, 0x92, 0x88, 0x57, 0x1d, 0x26, 0x02, 0x41, 0x44, 0x3b, 0x50, 0x91, 0x2a, 0xd4, 0xae, - 0x18, 0xf5, 0x89, 0x1d, 0x45, 0x7d, 0x7e, 0x54, 0x1a, 0x50, 0x29, 0x95, 0x08, 0x49, 0x4b, 0x61, - 0x83, 0x81, 0x3e, 0x81, 0x59, 0x87, 0x04, 0xe2, 0x6e, 0xab, 0x41, 0x04, 0xdd, 0x65, 0x2e, 0x2d, - 0x4e, 0xa8, 0x35, 0x7f, 0x3d, 0xb1, 0xe6, 0x28, 0xb9, 0x2b, 0xad, 0xc3, 0xa6, 0x24, 0x04, 0x15, - 0x59, 0x4a, 0x32, 0x0a, 0x52, 0xa3, 0x76, 0xd1, 0x78, 0x30, 0x5b, 0x4f, 0x21, 0xe1, 0x1e, 0x64, - 0xd4, 0x01, 0x24, 0x29, 0xbb, 0x3e, 0xf1, 0x02, 0xbd, 0x2a, 0x69, 0x2f, 0x33, 0xb2, 0xbd, 0x45, - 0x63, 0x0f, 0xd5, 0xfb, 0xd0, 0xf0, 0x00, 0x0b, 0xe8, 0x2d, 0x98, 0xf0, 0x29, 0x09, 0xb8, 0x57, - 0xcc, 0xaa, 0x88, 0xcd, 0x86, 0x11, 0xc3, 0x8a, 0x8a, 0x0d, 0x17, 0x7d, 0x0d, 0x26, 0x5d, 0x1a, - 0x04, 0xb2, 0xf2, 0x72, 0x4a, 0x70, 0xce, 0x08, 0x4e, 0x6e, 0x69, 0x32, 0x0e, 0xf9, 0xe5, 0x3f, - 0x8e, 0xc3, 0xb9, 0xd4, 0x36, 0xed, 0xb3, 0x26, 0x7a, 0x04, 0x79, 0xe9, 0x67, 0x83, 0x08, 0x62, - 0x32, 0xe7, 0x9d, 0xd3, 0xad, 0x4a, 0xe7, 0xd2, 0x16, 0x15, 0xa4, 0x86, 0x8c, 0x49, 0x88, 0x69, - 0x38, 0x42, 0x45, 0xdf, 0x83, 0x6c, 0xd0, 0xa2, 0xb6, 0xa9, 0x91, 0xf7, 0x46, 0xaa, 0x11, 0xe5, - 0xe3, 0x4e, 0x8b, 0xda, 0x71, 0xaa, 0xca, 0x2f, 0xac, 0x10, 0xd1, 0xa3, 0x28, 0xab, 0xf4, 0x7e, - 0xbc, 0x7f, 0x06, 0x6c, 0xa5, 0x1f, 0x47, 0x37, 0x9d, 0x69, 0xe5, 0xbf, 0x5b, 0x70, 0xbe, 0x57, - 0xa5, 0xce, 0x02, 0x81, 0xbe, 0xdf, 0x17, 0xb6, 0xca, 0xe9, 0xc2, 0x26, 0xb5, 0x55, 0xd0, 0xce, - 0x19, 0x93, 0xf9, 0x90, 0x92, 0x08, 0xd9, 0x7d, 0xc8, 0x31, 0x41, 0xdd, 0xc0, 0x74, 0xc8, 0xd5, - 0xd1, 0xd7, 0x95, 0x68, 0xc0, 0x12, 0x08, 0x6b, 0xbc, 0xf2, 0xcf, 0x33, 0x50, 0xec, 0x15, 0xc5, - 0xdc, 0x71, 0xf6, 0x88, 0x7d, 0x88, 0x96, 0x21, 0xeb, 0x11, 0x37, 0xac, 0xf0, 0x28, 0xe0, 0xb7, - 0x89, 0x4b, 0xb1, 0xe2, 0xa0, 0xdf, 0x58, 0x80, 0xda, 0xaa, 0x36, 0x1a, 0x6b, 0x9e, 0xc7, 0x05, - 0x91, 0xe9, 0x1a, 0x7a, 0x89, 0x47, 0xf7, 0x32, 0x34, 0x5d, 0xb9, 0xdb, 0x07, 0xba, 0xe1, 0x09, - 0xbf, 0x1b, 0x57, 0x4d, 0xbf, 0x00, 0x1e, 0xe0, 0x09, 0x7a, 0x64, 0x72, 0x4d, 0xe7, 0xc3, 0x87, - 0x67, 0xf7, 0x68, 0x58, 0xce, 0x2d, 0x6e, 0xc0, 0xa5, 0x21, 0xce, 0xa2, 0x73, 0x90, 0x39, 0xa4, - 0x5d, 0x1d, 0x3e, 0x2c, 0x7f, 0xa2, 0xf3, 0x90, 0xeb, 0x10, 0xa7, 0x4d, 0x75, 0xd7, 0xc3, 0xfa, - 0xe3, 0xda, 0xf8, 0xfb, 0x56, 0xf9, 0x4f, 0x19, 0xf8, 0xca, 0x8b, 0x6c, 0xbf, 0xa2, 0x6e, 0x8e, - 0xde, 0x86, 0xbc, 0x4f, 0x3b, 0x2c, 0x60, 0xdc, 0x53, 0x4e, 0x64, 0xe2, 0xbc, 0xc3, 0x86, 0x8e, - 0x23, 0x09, 0xb4, 0x06, 0x73, 0xcc, 0xb3, 0x9d, 0x76, 0x23, 0x3c, 0x54, 0x74, 0x65, 0xe5, 0x6b, - 0x97, 0x8c, 0xd2, 0xdc, 0x66, 0x9a, 0x8d, 0x7b, 0xe5, 0x93, 0x10, 0xd4, 0x6d, 0x39, 0x44, 0x50, - 0xd5, 0xc0, 0x06, 0x40, 0x18, 0x36, 0xee, 0x95, 0x47, 0xf7, 0xe0, 0xa2, 0x21, 0x61, 0xda, 0x72, - 0x98, 0xad, 0x62, 0x2c, 0x2b, 0x44, 0x75, 0xb8, 0x7c, 0x6d, 0xc9, 0x20, 0x5d, 0xdc, 0x1c, 0x28, - 0x85, 0x87, 0x68, 0x27, 0x5c, 0x0b, 0x67, 0x17, 0x75, 0x6e, 0xf4, 0xbb, 0x16, 0xb2, 0x71, 0xaf, - 0x7c, 0xf9, 0x7f, 0xb9, 0xfe, 0x7e, 0xa0, 0xb6, 0x6b, 0x0f, 0xf2, 0x41, 0x08, 0xaa, 0xb7, 0xec, - 0xca, 0x28, 0xc9, 0x17, 0x1a, 0x88, 0x77, 0x27, 0xf2, 0x21, 0xc2, 0x95, 0xfe, 0xbb, 0xcc, 0xc3, - 0x94, 0x34, 0xba, 0x3b, 0xd4, 0xe6, 0x5e, 0x23, 0x28, 0x16, 0x96, 0xad, 0x95, 0x5c, 0xec, 0xff, - 0x56, 0x9a, 0x8d, 0x7b, 0xe5, 0x11, 0x85, 0xbc, 0x08, 0x77, 0x56, 0xf7, 0xe3, 0xeb, 0xa3, 0xb8, - 0x69, 0x76, 0x79, 0x9b, 0x3b, 0xcc, 0x66, 0x34, 0xa8, 0x4d, 0x4b, 0x4f, 0xa3, 0x5c, 0x88, 0xa0, - 0x75, 0xd6, 0xa9, 0xe0, 0xeb, 0x04, 0xca, 0x25, 0xb3, 0x4e, 0xd3, 0x71, 0x24, 0x81, 0xea, 0x70, - 0x3e, 0xcc, 0xc0, 0x8f, 0x59, 0x20, 0xb8, 0xdf, 0xad, 0x33, 0x97, 0x09, 0x95, 0x37, 0xb9, 0x5a, - 0xf1, 0xf8, 0xa8, 0x74, 0x1e, 0x0f, 0xe0, 0xe3, 0x81, 0x5a, 0xb2, 0x8b, 0x09, 0x1a, 0x08, 0x93, - 0x2b, 0x51, 0x4d, 0xec, 0xd2, 0x40, 0x60, 0xc5, 0x91, 0x47, 0x6b, 0x4b, 0x4e, 0x4f, 0x0d, 0xb3, - 0xfd, 0x51, 0xf3, 0xdf, 0x56, 0x54, 0x6c, 0xb8, 0xc8, 0x87, 0x7c, 0x40, 0x1d, 0x6a, 0x0b, 0xee, - 0x17, 0x27, 0x55, 0x8b, 0xbb, 0x71, 0xb6, 0xc3, 0xab, 0xb2, 0x63, 0x60, 0x74, 0x53, 0x8b, 0xf7, - 0xd8, 0x90, 0x71, 0x64, 0x07, 0x6d, 0x41, 0x5e, 0x84, 0x75, 0x93, 0x1f, 0x5e, 0xfa, 0xdb, 0xbc, - 0x11, 0x96, 0x8b, 0xee, 0x54, 0x6a, 0x23, 0xc2, 0x8a, 0x8a, 0x20, 0x16, 0xaf, 0xc3, 0x4c, 0xca, - 0xf6, 0x48, 0x3d, 0xea, 0x0f, 0x39, 0xb8, 0x38, 0xf8, 0xbc, 0x44, 0xd7, 0x61, 0x46, 0xe2, 0x07, - 0xe2, 0x1e, 0xf5, 0x55, 0x6f, 0xb1, 0x54, 0x6f, 0xb9, 0x60, 0x56, 0x36, 0x53, 0x4f, 0x32, 0x71, - 0x5a, 0x16, 0xdd, 0x02, 0xc4, 0xf7, 0x02, 0xea, 0x77, 0x68, 0xe3, 0x23, 0x7d, 0xd1, 0x88, 0xbb, - 0x53, 0xd4, 0xf0, 0xef, 0xf4, 0x49, 0xe0, 0x01, 0x5a, 0x23, 0x66, 0xda, 0x1a, 0xcc, 0x99, 0x43, - 0x23, 0x64, 0x9a, 0x24, 0x8b, 0x2a, 0xe8, 0x6e, 0x9a, 0x8d, 0x7b, 0xe5, 0xd1, 0x47, 0x30, 0x4f, - 0x3a, 0x84, 0x39, 0x64, 0xcf, 0xa1, 0x11, 0x48, 0x4e, 0x81, 0xbc, 0x66, 0x40, 0xe6, 0xd7, 0x7a, - 0x05, 0x70, 0xbf, 0x0e, 0xda, 0x82, 0x85, 0xb6, 0xd7, 0x0f, 0x35, 0xa1, 0xa0, 0x5e, 0x37, 0x50, - 0x0b, 0x77, 0xfb, 0x45, 0xf0, 0x20, 0x3d, 0xf4, 0x10, 0x26, 0x1b, 0x54, 0x10, 0xe6, 0x04, 0xc5, - 0x49, 0x95, 0x37, 0xef, 0x8e, 0x92, 0xab, 0x37, 0xb4, 0xaa, 0xbe, 0x3c, 0x99, 0x0f, 0x1c, 0x02, - 0x22, 0x06, 0x60, 0x87, 0xa3, 0x78, 0x50, 0xcc, 0xab, 0x52, 0xf8, 0xd6, 0x88, 0xa5, 0xa0, 0xb5, - 0xe3, 0x51, 0x31, 0x22, 0x05, 0x38, 0x01, 0x2e, 0x13, 0xcb, 0x97, 0x0d, 0x2b, 0x8a, 0x87, 0xee, - 0x70, 0x51, 0x62, 0xe1, 0x24, 0x13, 0xa7, 0x65, 0xcb, 0xbf, 0xb6, 0x60, 0xbe, 0x6f, 0x4d, 0xc9, - 0x09, 0xd9, 0x7a, 0xf1, 0x84, 0x8c, 0x1e, 0xc0, 0x84, 0x2d, 0x6b, 0x3f, 0x1c, 0x69, 0x2e, 0x8f, - 0x7c, 0xa1, 0x8b, 0x9b, 0x89, 0xfa, 0x0c, 0xb0, 0x01, 0x2c, 0xcf, 0xc1, 0x4c, 0x2c, 0x5a, 0xe7, - 0xcd, 0xf2, 0x67, 0xd9, 0xe4, 0x51, 0x52, 0xe7, 0xcd, 0x3b, 0x2d, 0x1d, 0x82, 0x2a, 0x14, 0x6c, - 0xee, 0x09, 0x22, 0x07, 0x48, 0xe3, 0xf1, 0xbc, 0x01, 0x2d, 0xac, 0x87, 0x0c, 0x1c, 0xcb, 0xc8, - 0x7e, 0xb6, 0xcf, 0x1d, 0x87, 0x3f, 0x56, 0x35, 0x94, 0xe8, 0x67, 0x37, 0x15, 0x15, 0x1b, 0xae, - 0xac, 0x95, 0x96, 0x6c, 0x99, 0xbc, 0x1d, 0x1e, 0xeb, 0x51, 0xad, 0x6c, 0x1b, 0x3a, 0x8e, 0x24, - 0xd0, 0x15, 0x98, 0x0e, 0x98, 0x67, 0xd3, 0xf0, 0xa8, 0xc9, 0xea, 0xe9, 0x41, 0xde, 0x51, 0x77, - 0x12, 0x74, 0x9c, 0x92, 0x42, 0xf7, 0xa1, 0xa0, 0xbe, 0xd5, 0x2d, 0x29, 0x37, 0xf2, 0x2d, 0x69, - 0x46, 0x2e, 0x72, 0x27, 0x04, 0xc0, 0x31, 0x16, 0x5a, 0x05, 0x10, 0xcc, 0xa5, 0x81, 0x20, 0x6e, - 0x2b, 0x30, 0x8d, 0x3b, 0x4a, 0xa6, 0xdd, 0x88, 0x83, 0x13, 0x52, 0xe8, 0x1b, 0x50, 0x90, 0x29, - 0x50, 0x67, 0x1e, 0xd5, 0x55, 0x91, 0xd1, 0x06, 0x76, 0x43, 0x22, 0x8e, 0xf9, 0xa8, 0x02, 0xe0, - 0xc8, 0x03, 0xa4, 0xd6, 0x15, 0x34, 0x50, 0xbd, 0x37, 0x53, 0x9b, 0x95, 0xe0, 0xf5, 0x88, 0x8a, - 0x13, 0x12, 0x32, 0xea, 0x1e, 0x7f, 0x4c, 0x98, 0x50, 0x29, 0x9a, 0x88, 0xfa, 0x6d, 0x7e, 0x9f, - 0x30, 0x81, 0x0d, 0x17, 0xbd, 0x09, 0x93, 0x1d, 0xd3, 0x24, 0x41, 0x81, 0xaa, 0x1a, 0x0b, 0x5b, - 0x63, 0xc8, 0x2b, 0xff, 0x3b, 0x95, 0xbb, 0x98, 0xfe, 0xa8, 0x2d, 0x8f, 0xaa, 0x93, 0x47, 0xf2, - 0xb7, 0x60, 0x42, 0x77, 0xd7, 0xde, 0xcd, 0xd7, 0x2d, 0x18, 0x1b, 0x2e, 0x7a, 0x03, 0x72, 0xfb, - 0xdc, 0xb7, 0xa9, 0xd9, 0xf9, 0xe8, 0x7a, 0x70, 0x53, 0x12, 0xb1, 0xe6, 0xa1, 0x7b, 0x30, 0x47, - 0x9f, 0xa4, 0xe7, 0xbf, 0xac, 0x7a, 0x54, 0x79, 0x5b, 0xf6, 0xc6, 0x8d, 0x34, 0x6b, 0xf8, 0x1b, - 0x49, 0x2f, 0x48, 0xf9, 0x1f, 0x93, 0x80, 0xfa, 0x87, 0x1d, 0x74, 0x2d, 0xf5, 0xa4, 0xf0, 0x56, - 0xcf, 0x93, 0xc2, 0xc5, 0x7e, 0x8d, 0xc4, 0x8b, 0x42, 0x07, 0xa6, 0x6d, 0xf5, 0x22, 0xa5, 0xdf, - 0x9f, 0xcc, 0x34, 0xf3, 0x9d, 0x93, 0x0b, 0xf6, 0xc5, 0xef, 0x58, 0x3a, 0xc1, 0xd7, 0x13, 0xc8, - 0x38, 0x65, 0x07, 0xfd, 0x14, 0x66, 0x7d, 0x6a, 0xfb, 0x94, 0x08, 0x6a, 0x2c, 0xeb, 0xbb, 0x46, - 0xed, 0x64, 0xcb, 0xd8, 0xe8, 0x0d, 0xb5, 0x8d, 0x8e, 0x8f, 0x4a, 0xb3, 0x38, 0x85, 0x8e, 0x7b, - 0xac, 0xa1, 0x1f, 0xc3, 0x8c, 0xcf, 0x1d, 0x87, 0x79, 0x4d, 0x63, 0x3e, 0xab, 0xcc, 0xaf, 0x9d, - 0xc2, 0xbc, 0x56, 0x1b, 0x6a, 0x7d, 0x5e, 0xf5, 0xd7, 0x24, 0x36, 0x4e, 0x9b, 0x42, 0x0f, 0xa0, - 0xe0, 0xd3, 0x80, 0xb7, 0x7d, 0x9b, 0x06, 0xa6, 0xb8, 0x57, 0x06, 0x4d, 0x27, 0xd8, 0x08, 0xc9, - 0x2c, 0x66, 0x3e, 0x95, 0xb6, 0x82, 0xb8, 0x87, 0x85, 0xdc, 0x00, 0xc7, 0x68, 0xe8, 0x40, 0xa6, - 0xf1, 0x1e, 0x75, 0x64, 0x69, 0x67, 0x4e, 0xb7, 0x91, 0xfd, 0x0b, 0xa9, 0xd4, 0x15, 0x84, 0x9e, - 0xb2, 0x12, 0x85, 0x20, 0x89, 0xd8, 0xe0, 0xa3, 0x9f, 0xc0, 0x14, 0x49, 0xdc, 0x5d, 0xf5, 0x60, - 0xb7, 0x71, 0x26, 0x73, 0x7d, 0xd7, 0xd5, 0xe8, 0xb9, 0x32, 0x79, 0x4f, 0x4d, 0x9a, 0x43, 0x77, - 0xe0, 0x02, 0xb1, 0x05, 0xeb, 0xd0, 0x1b, 0x94, 0x34, 0x1c, 0xe6, 0x45, 0xed, 0x55, 0x37, 0x9c, - 0xd7, 0x8e, 0x8f, 0x4a, 0x17, 0xd6, 0x06, 0x09, 0xe0, 0xc1, 0x7a, 0x8b, 0x57, 0x61, 0x2a, 0xb1, - 0xea, 0x51, 0xe6, 0xbb, 0xc5, 0x0f, 0xe1, 0xdc, 0x4b, 0xdd, 0x61, 0x7f, 0x37, 0x0e, 0xe5, 0xbe, - 0x06, 0xa0, 0x9e, 0x24, 0xd7, 0x0f, 0x88, 0xd7, 0x0c, 0x33, 0xb6, 0x0a, 0x05, 0xd2, 0x16, 0xdc, - 0x25, 0x82, 0xd9, 0x0a, 0x38, 0x1f, 0xe7, 0xc2, 0x5a, 0xc8, 0xc0, 0xb1, 0x0c, 0xba, 0x06, 0xb3, - 0xd1, 0xe1, 0x26, 0x3b, 0x9d, 0x3e, 0x8d, 0x0b, 0xba, 0x3c, 0xd6, 0x53, 0x1c, 0xdc, 0x23, 0x19, - 0x5d, 0x9b, 0x33, 0x2f, 0x77, 0x6d, 0xbe, 0x15, 0xbe, 0xfa, 0xa9, 0x35, 0xd1, 0x86, 0x5a, 0x95, - 0x79, 0x89, 0xeb, 0x79, 0xc9, 0x4b, 0x4a, 0xe0, 0x01, 0x5a, 0xe5, 0x9f, 0x59, 0xf0, 0xda, 0xd0, - 0x2b, 0x14, 0xfa, 0x41, 0xf8, 0xd4, 0x63, 0xa9, 0x44, 0xbc, 0x7a, 0xd6, 0xeb, 0x58, 0x77, 0xf0, - 0x8b, 0xcf, 0xb5, 0xfc, 0xaf, 0x7e, 0x5b, 0x1a, 0xfb, 0xf4, 0x3f, 0xcb, 0x63, 0xe5, 0x2f, 0x2d, - 0xb8, 0x34, 0x44, 0xf7, 0x65, 0x9e, 0xc2, 0x7f, 0x61, 0xc1, 0x3c, 0xeb, 0xdd, 0x74, 0xd3, 0x8e, - 0x6f, 0x9c, 0x61, 0x35, 0x7d, 0x09, 0x54, 0xbb, 0x20, 0x67, 0xea, 0x3e, 0x32, 0xee, 0xb7, 0x5a, - 0xfe, 0xa7, 0x05, 0xb3, 0x1b, 0x4f, 0xa8, 0x7d, 0x9b, 0x3e, 0xde, 0xe6, 0x8d, 0x8f, 0x39, 0x3f, - 0x4c, 0xfe, 0x3f, 0x60, 0x0d, 0xff, 0x7f, 0x00, 0x5d, 0x85, 0x0c, 0xf5, 0x3a, 0xa7, 0xf8, 0x47, - 0x62, 0xca, 0xc4, 0x26, 0xb3, 0xe1, 0x75, 0xb0, 0xd4, 0x91, 0x23, 0x6b, 0x2a, 0x09, 0x55, 0xee, - 0x15, 0xe2, 0x91, 0x35, 0x95, 0xb1, 0x38, 0x2d, 0xab, 0xa6, 0x03, 0xee, 0xb4, 0x65, 0x92, 0x67, - 0x63, 0xf7, 0xee, 0x69, 0x12, 0x0e, 0x79, 0xe5, 0xdf, 0x8f, 0xc3, 0x4c, 0x9d, 0xed, 0x53, 0xbb, - 0x6b, 0x3b, 0x54, 0xad, 0xeb, 0x01, 0xcc, 0xec, 0x13, 0xe6, 0xb4, 0x7d, 0xaa, 0xb7, 0xd0, 0x6c, - 0xdd, 0xbb, 0xa1, 0xd5, 0x9b, 0x49, 0xe6, 0xf3, 0xa3, 0xd2, 0x62, 0x4a, 0x3d, 0xc5, 0xc5, 0x69, - 0x24, 0xf4, 0x08, 0x80, 0x46, 0x41, 0x34, 0x3b, 0xf9, 0xce, 0xc9, 0x3b, 0x99, 0x0e, 0xbc, 0x9e, - 0x9d, 0x62, 0x1a, 0x4e, 0x60, 0xa2, 0x1f, 0xca, 0xc1, 0xac, 0xa9, 0xb6, 0x34, 0x50, 0x7f, 0xdb, - 0x4c, 0xad, 0x56, 0x4e, 0x36, 0xb0, 0x6b, 0x54, 0x14, 0x7c, 0xd4, 0x42, 0x42, 0xaa, 0x1a, 0xe6, - 0xcc, 0xcf, 0xf2, 0x5f, 0xc7, 0x61, 0xf9, 0xa4, 0xe3, 0x56, 0xf6, 0x19, 0x39, 0x2c, 0xf2, 0xb6, - 0x08, 0x9b, 0xb0, 0xbe, 0xc5, 0xaa, 0x3e, 0xb3, 0x9b, 0xe2, 0xe0, 0x1e, 0x49, 0x74, 0x0b, 0x32, - 0x2d, 0x9f, 0x9a, 0xe0, 0x54, 0x4f, 0xf6, 0x3d, 0x15, 0xfd, 0xda, 0xa4, 0x4c, 0xa0, 0x6d, 0x9f, - 0x62, 0x09, 0x22, 0xb1, 0x5c, 0xd6, 0x30, 0x2d, 0xeb, 0x6c, 0x58, 0x5b, 0xac, 0x81, 0x25, 0x08, - 0xda, 0x82, 0x6c, 0x8b, 0x07, 0xc2, 0x4c, 0x05, 0x23, 0x83, 0xe5, 0x65, 0xd5, 0x6f, 0xf3, 0x40, - 0x60, 0x05, 0x53, 0xfe, 0x5b, 0x16, 0x4a, 0x27, 0xcc, 0x0d, 0x68, 0x13, 0x16, 0xf4, 0x25, 0x79, - 0x9b, 0xfa, 0x8c, 0x37, 0xd2, 0xb1, 0xbc, 0xa4, 0x2e, 0xb1, 0xfd, 0x6c, 0x3c, 0x48, 0x07, 0x7d, - 0x00, 0x73, 0xcc, 0x13, 0xd4, 0xef, 0x10, 0x27, 0x84, 0xd1, 0xcf, 0x02, 0x0b, 0xfa, 0x75, 0x2e, - 0xc5, 0xc2, 0xbd, 0xb2, 0x03, 0x36, 0x34, 0x73, 0xea, 0x0d, 0x75, 0x60, 0xd6, 0x25, 0x4f, 0x12, - 0xd7, 0x6d, 0x13, 0xc2, 0xe1, 0xff, 0x86, 0xb4, 0x05, 0x73, 0x2a, 0xfa, 0x0f, 0xd3, 0xca, 0xa6, - 0x27, 0xee, 0xf8, 0x3b, 0xc2, 0x67, 0x5e, 0x53, 0x5b, 0xdb, 0x4a, 0x61, 0xe1, 0x1e, 0x6c, 0xf4, - 0x10, 0xf2, 0x2e, 0x79, 0xb2, 0xd3, 0xf6, 0x9b, 0xe1, 0x2d, 0x69, 0x74, 0x3b, 0xea, 0xcd, 0x67, - 0xcb, 0xa0, 0xe0, 0x08, 0x2f, 0x4c, 0xcd, 0xc9, 0x57, 0x91, 0x9a, 0x61, 0x3a, 0xe5, 0x5f, 0x4d, - 0x3a, 0x7d, 0x66, 0xc1, 0x74, 0xb2, 0x8a, 0xfb, 0x7b, 0xa7, 0x35, 0x42, 0xef, 0xfc, 0x36, 0x8c, - 0x0b, 0x6e, 0x4a, 0xf0, 0x54, 0x27, 0x3d, 0x18, 0xd8, 0xf1, 0x5d, 0x8e, 0xc7, 0x05, 0xaf, 0xad, - 0x3c, 0x7d, 0xb6, 0x34, 0xf6, 0xf9, 0xb3, 0xa5, 0xb1, 0x2f, 0x9e, 0x2d, 0x8d, 0x7d, 0x7a, 0xbc, - 0x64, 0x3d, 0x3d, 0x5e, 0xb2, 0x3e, 0x3f, 0x5e, 0xb2, 0xbe, 0x38, 0x5e, 0xb2, 0xbe, 0x3c, 0x5e, - 0xb2, 0x7e, 0xf9, 0xdf, 0xa5, 0xb1, 0x87, 0xe3, 0x9d, 0xcb, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, - 0x19, 0x34, 0x0f, 0xd6, 0x4b, 0x20, 0x00, 0x00, -} - func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -884,50 +208,48 @@ func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { } func (m *CustomDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CustomDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Environment) > 0 { - for iNdEx := len(m.Environment) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Environment[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + if len(m.Environment) > 0 { + for _, msg := range m.Environment { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil } func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -935,39 +257,31 @@ func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCause) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ImageTrigger != nil { - { - size, err := m.ImageTrigger.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.ImageTrigger != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ImageTrigger.Size())) + n1, err := m.ImageTrigger.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil } func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -975,32 +289,25 @@ func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCauseImageTrigger) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentCauseImageTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) + n2, err := m.From.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1008,62 +315,49 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n3, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil } func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1071,52 +365,41 @@ func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n6, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil } func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1124,46 +407,37 @@ func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1171,61 +445,51 @@ func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigRollback) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentConfigRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + for _, k := range keysForUpdatedAnnotations { dAtA[i] = 0x12 - i -= len(keysForUpdatedAnnotations[iNdEx]) - copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) - i-- + i++ + v := m.UpdatedAnnotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n9, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil } func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1233,67 +497,60 @@ func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigRollbackSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentConfigRollbackSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i-- - if m.IncludeStrategy { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) + n10, err := m.From.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x30 - i-- - if m.IncludeReplicationMeta { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - i-- - if m.IncludeTemplate { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i-- + i += n10 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + dAtA[i] = 0x18 + i++ if m.IncludeTriggers { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x10 - { - size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + dAtA[i] = 0x20 + i++ + if m.IncludeTemplate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x28 + i++ + if m.IncludeReplicationMeta { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x30 + i++ + if m.IncludeStrategy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1301,107 +558,94 @@ func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x48 - if m.Template != nil { - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n11, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n11 + if m.Triggers != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Triggers.Size())) + n12, err := m.Triggers.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x28 + i++ + if m.Test { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x30 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + for _, k := range keysForSelector { dAtA[i] = 0x3a + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i-- - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - i-- - if m.Test { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x20 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x18 - if m.Triggers != nil { - { - size, err := m.Triggers.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Template != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n13, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n13 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil } func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1409,69 +653,60 @@ func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x48 - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) if m.Details != nil { - { - size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) + n14, err := m.Details.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x3a } - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - i-- - dAtA[i] = 0x30 - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + return i, nil } func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1479,41 +714,33 @@ func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { } func (m *DeploymentDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) if len(m.Causes) > 0 { - for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Causes { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1521,22 +748,17 @@ func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { } func (m *DeploymentLog) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1544,91 +766,83 @@ func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { } func (m *DeploymentLogOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Version != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) - i-- - dAtA[i] = 0x50 - } - i-- - if m.NoWait { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - if m.LimitBytes != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) - i-- - dAtA[i] = 0x40 - } - if m.TailLines != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - i-- - dAtA[i] = 0x38 - } - i-- - if m.Timestamps { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if m.SinceTime != nil { - { - size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.SinceSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) - i-- - dAtA[i] = 0x20 - } - i-- - if m.Previous { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + dAtA[i] = 0x10 + i++ if m.Follow { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - i -= len(m.Container) - copy(dAtA[i:], m.Container) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x18 + i++ + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SinceSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) + n15, err := m.SinceTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + dAtA[i] = 0x30 + i++ + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.TailLines != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + } + dAtA[i] = 0x48 + i++ + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.Version != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + } + return i, nil } func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1636,52 +850,52 @@ func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.ExcludeTriggers) > 0 { - for iNdEx := len(m.ExcludeTriggers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludeTriggers[iNdEx]) - copy(dAtA[i:], m.ExcludeTriggers[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExcludeTriggers[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - i-- - if m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ if m.Latest { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x18 + i++ + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.ExcludeTriggers) > 0 { + for _, s := range m.ExcludeTriggers { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1689,19 +903,73 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ActiveDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - i-- - dAtA[i] = 0x40 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.CustomParams != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CustomParams.Size())) + n16, err := m.CustomParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.RecreateParams != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RecreateParams.Size())) + n17, err := m.RecreateParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.RollingParams != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingParams.Size())) + n18, err := m.RollingParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n19, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x32 + i++ + v := m.Labels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } } if len(m.Annotations) > 0 { keysForAnnotations := make([]string, 0, len(m.Annotations)) @@ -1709,106 +977,34 @@ func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { keysForAnnotations = append(keysForAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.Annotations[string(keysForAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAnnotations[iNdEx]) - copy(dAtA[i:], keysForAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + for _, k := range keysForAnnotations { dAtA[i] = 0x3a - } - } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { - v := m.Labels[string(keysForLabels[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForLabels[iNdEx]) - copy(dAtA[i:], keysForLabels[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) - i-- + i++ + v := m.Annotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) } - i-- - dAtA[i] = 0x2a - if m.RollingParams != nil { - { - size, err := m.RollingParams.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.RecreateParams != nil { - { - size, err := m.RecreateParams.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.CustomParams != nil { - { - size, err := m.CustomParams.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1816,54 +1012,52 @@ func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) } func (m *DeploymentTriggerImageChangeParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentTriggerImageChangeParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.LastTriggeredImage) - copy(dAtA[i:], m.LastTriggeredImage) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) - i-- - dAtA[i] = 0x22 - { - size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.ContainerNames) > 0 { - for iNdEx := len(m.ContainerNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ContainerNames[iNdEx]) - copy(dAtA[i:], m.ContainerNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerNames[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i-- + dAtA[i] = 0x8 + i++ if m.Automatic { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + if len(m.ContainerNames) > 0 { + for _, s := range m.ContainerNames { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) + n20, err := m.From.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) + i += copy(dAtA[i:], m.LastTriggeredImage) + return i, nil } func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1871,36 +1065,29 @@ func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { } func (m DeploymentTriggerPolicies) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m DeploymentTriggerPolicies) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m) > 0 { - for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1908,39 +1095,31 @@ func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ImageChangeParams != nil { - { - size, err := m.ImageChangeParams.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.ImageChangeParams != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ImageChangeParams.Size())) + n21, err := m.ImageChangeParams.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + return i, nil } func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1948,59 +1127,63 @@ func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { } func (m *ExecNewPodHook) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecNewPodHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.ContainerName) - copy(dAtA[i:], m.ContainerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i-- - dAtA[i] = 0x1a - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- + for _, s := range m.Command { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + if len(m.Env) > 0 { + for _, msg := range m.Env { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil } func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2008,53 +1191,43 @@ func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { } func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.TagImages) > 0 { - for iNdEx := len(m.TagImages) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.TagImages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) + i += copy(dAtA[i:], m.FailurePolicy) if m.ExecNewPod != nil { - { - size, err := m.ExecNewPod.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExecNewPod.Size())) + n22, err := m.ExecNewPod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.TagImages) > 0 { + for _, msg := range m.TagImages { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 } - i -= len(m.FailurePolicy) - copy(dAtA[i:], m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2062,63 +1235,52 @@ func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { } func (m *RecreateDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RecreateDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Post != nil { - { - size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Mid != nil { - { - size, err := m.Mid.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.TimeoutSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } if m.Pre != nil { - { - size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pre.Size())) + n23, err := m.Pre.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x8 + if m.Mid != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Mid.Size())) + n24, err := m.Mid.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 } - return len(dAtA) - i, nil + if m.Post != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Post.Size())) + n25, err := m.Post.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil } func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2126,85 +1288,72 @@ func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { } func (m *RollingDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Post != nil { - { - size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Pre != nil { - { - size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x18 + if m.UpdatePeriodSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) } if m.IntervalSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) } - if m.UpdatePeriodSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) - i-- - dAtA[i] = 0x8 + if m.TimeoutSeconds != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } - return len(dAtA) - i, nil + if m.MaxUnavailable != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n26, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.MaxSurge != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n27, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.Pre != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pre.Size())) + n28, err := m.Pre.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.Post != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Post.Size())) + n29, err := m.Post.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil } func (m *TagImageHook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2212,48 +1361,53 @@ func (m *TagImageHook) Marshal() (dAtA []byte, err error) { } func (m *TagImageHook) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TagImageHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.ContainerName) - copy(dAtA[i:], m.ContainerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.To.Size())) + n30, err := m.To.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CustomDeploymentStrategyParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Image) @@ -2274,9 +1428,6 @@ func (m *CustomDeploymentStrategyParams) Size() (n int) { } func (m *DeploymentCause) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2289,9 +1440,6 @@ func (m *DeploymentCause) Size() (n int) { } func (m *DeploymentCauseImageTrigger) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.From.Size() @@ -2300,9 +1448,6 @@ func (m *DeploymentCauseImageTrigger) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2321,9 +1466,6 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentConfig) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2336,9 +1478,6 @@ func (m *DeploymentConfig) Size() (n int) { } func (m *DeploymentConfigList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2353,9 +1492,6 @@ func (m *DeploymentConfigList) Size() (n int) { } func (m *DeploymentConfigRollback) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2374,9 +1510,6 @@ func (m *DeploymentConfigRollback) Size() (n int) { } func (m *DeploymentConfigRollbackSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.From.Size() @@ -2390,9 +1523,6 @@ func (m *DeploymentConfigRollbackSpec) Size() (n int) { } func (m *DeploymentConfigSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Strategy.Size() @@ -2424,9 +1554,6 @@ func (m *DeploymentConfigSpec) Size() (n int) { } func (m *DeploymentConfigStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.LatestVersion)) @@ -2450,9 +1577,6 @@ func (m *DeploymentConfigStatus) Size() (n int) { } func (m *DeploymentDetails) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Message) @@ -2467,18 +1591,12 @@ func (m *DeploymentDetails) Size() (n int) { } func (m *DeploymentLog) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l return n } func (m *DeploymentLogOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Container) @@ -2507,9 +1625,6 @@ func (m *DeploymentLogOptions) Size() (n int) { } func (m *DeploymentRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2526,9 +1641,6 @@ func (m *DeploymentRequest) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2570,9 +1682,6 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *DeploymentTriggerImageChangeParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -2590,9 +1699,6 @@ func (m *DeploymentTriggerImageChangeParams) Size() (n int) { } func (m DeploymentTriggerPolicies) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m) > 0 { @@ -2605,9 +1711,6 @@ func (m DeploymentTriggerPolicies) Size() (n int) { } func (m *DeploymentTriggerPolicy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2620,9 +1723,6 @@ func (m *DeploymentTriggerPolicy) Size() (n int) { } func (m *ExecNewPodHook) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Command) > 0 { @@ -2649,9 +1749,6 @@ func (m *ExecNewPodHook) Size() (n int) { } func (m *LifecycleHook) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.FailurePolicy) @@ -2670,9 +1767,6 @@ func (m *LifecycleHook) Size() (n int) { } func (m *RecreateDeploymentStrategyParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.TimeoutSeconds != nil { @@ -2694,9 +1788,6 @@ func (m *RecreateDeploymentStrategyParams) Size() (n int) { } func (m *RollingDeploymentStrategyParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.UpdatePeriodSeconds != nil { @@ -2728,9 +1819,6 @@ func (m *RollingDeploymentStrategyParams) Size() (n int) { } func (m *TagImageHook) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.ContainerName) @@ -2741,7 +1829,14 @@ func (m *TagImageHook) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2750,14 +1845,9 @@ func (this *CustomDeploymentStrategyParams) String() string { if this == nil { return "nil" } - repeatedStringForEnvironment := "[]EnvVar{" - for _, f := range this.Environment { - repeatedStringForEnvironment += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnvironment += "}" s := strings.Join([]string{`&CustomDeploymentStrategyParams{`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Environment:` + repeatedStringForEnvironment + `,`, + `Environment:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Environment), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, `Command:` + fmt.Sprintf("%v", this.Command) + `,`, `}`, }, "") @@ -2769,7 +1859,7 @@ func (this *DeploymentCause) String() string { } s := strings.Join([]string{`&DeploymentCause{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ImageTrigger:` + strings.Replace(this.ImageTrigger.String(), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, + `ImageTrigger:` + strings.Replace(fmt.Sprintf("%v", this.ImageTrigger), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, `}`, }, "") return s @@ -2779,7 +1869,7 @@ func (this *DeploymentCauseImageTrigger) String() string { return "nil" } s := strings.Join([]string{`&DeploymentCauseImageTrigger{`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2791,10 +1881,10 @@ func (this *DeploymentCondition) String() string { s := strings.Join([]string{`&DeploymentCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2804,7 +1894,7 @@ func (this *DeploymentConfig) String() string { return "nil" } s := strings.Join([]string{`&DeploymentConfig{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigSpec", "DeploymentConfigSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentConfigStatus", "DeploymentConfigStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2815,14 +1905,9 @@ func (this *DeploymentConfigList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]DeploymentConfig{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentConfigList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2854,7 +1939,7 @@ func (this *DeploymentConfigRollbackSpec) String() string { return "nil" } s := strings.Join([]string{`&DeploymentConfigRollbackSpec{`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `IncludeTriggers:` + fmt.Sprintf("%v", this.IncludeTriggers) + `,`, `IncludeTemplate:` + fmt.Sprintf("%v", this.IncludeTemplate) + `,`, @@ -2886,7 +1971,7 @@ func (this *DeploymentConfigSpec) String() string { `Test:` + fmt.Sprintf("%v", this.Test) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1) + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -2896,11 +1981,6 @@ func (this *DeploymentConfigStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentConfigStatus{`, `LatestVersion:` + fmt.Sprintf("%v", this.LatestVersion) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, @@ -2908,8 +1988,8 @@ func (this *DeploymentConfigStatus) String() string { `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Details:` + strings.Replace(this.Details.String(), "DeploymentDetails", "DeploymentDetails", 1) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "DeploymentDetails", "DeploymentDetails", 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `}`, }, "") @@ -2919,14 +1999,9 @@ func (this *DeploymentDetails) String() string { if this == nil { return "nil" } - repeatedStringForCauses := "[]DeploymentCause{" - for _, f := range this.Causes { - repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + "," - } - repeatedStringForCauses += "}" s := strings.Join([]string{`&DeploymentDetails{`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Causes:` + repeatedStringForCauses + `,`, + `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2949,7 +2024,7 @@ func (this *DeploymentLogOptions) String() string { `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v11.Time", 1) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, @@ -2998,10 +2073,10 @@ func (this *DeploymentStrategy) String() string { mapStringForAnnotations += "}" s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `CustomParams:` + strings.Replace(this.CustomParams.String(), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, - `RecreateParams:` + strings.Replace(this.RecreateParams.String(), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, - `RollingParams:` + strings.Replace(this.RollingParams.String(), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `CustomParams:` + strings.Replace(fmt.Sprintf("%v", this.CustomParams), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, + `RecreateParams:` + strings.Replace(fmt.Sprintf("%v", this.RecreateParams), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, + `RollingParams:` + strings.Replace(fmt.Sprintf("%v", this.RollingParams), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "k8s_io_api_core_v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, @@ -3016,7 +2091,7 @@ func (this *DeploymentTriggerImageChangeParams) String() string { s := strings.Join([]string{`&DeploymentTriggerImageChangeParams{`, `Automatic:` + fmt.Sprintf("%v", this.Automatic) + `,`, `ContainerNames:` + fmt.Sprintf("%v", this.ContainerNames) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, `LastTriggeredImage:` + fmt.Sprintf("%v", this.LastTriggeredImage) + `,`, `}`, }, "") @@ -3028,7 +2103,7 @@ func (this *DeploymentTriggerPolicy) String() string { } s := strings.Join([]string{`&DeploymentTriggerPolicy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ImageChangeParams:` + strings.Replace(this.ImageChangeParams.String(), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, + `ImageChangeParams:` + strings.Replace(fmt.Sprintf("%v", this.ImageChangeParams), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, `}`, }, "") return s @@ -3037,14 +2112,9 @@ func (this *ExecNewPodHook) String() string { if this == nil { return "nil" } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnv += "}" s := strings.Join([]string{`&ExecNewPodHook{`, `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Env:` + repeatedStringForEnv + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `}`, @@ -3055,15 +2125,10 @@ func (this *LifecycleHook) String() string { if this == nil { return "nil" } - repeatedStringForTagImages := "[]TagImageHook{" - for _, f := range this.TagImages { - repeatedStringForTagImages += strings.Replace(strings.Replace(f.String(), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + "," - } - repeatedStringForTagImages += "}" s := strings.Join([]string{`&LifecycleHook{`, `FailurePolicy:` + fmt.Sprintf("%v", this.FailurePolicy) + `,`, - `ExecNewPod:` + strings.Replace(this.ExecNewPod.String(), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, - `TagImages:` + repeatedStringForTagImages + `,`, + `ExecNewPod:` + strings.Replace(fmt.Sprintf("%v", this.ExecNewPod), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, + `TagImages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TagImages), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3074,9 +2139,9 @@ func (this *RecreateDeploymentStrategyParams) String() string { } s := strings.Join([]string{`&RecreateDeploymentStrategyParams{`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, - `Mid:` + strings.Replace(this.Mid.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, - `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Pre:` + strings.Replace(fmt.Sprintf("%v", this.Pre), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Mid:` + strings.Replace(fmt.Sprintf("%v", this.Mid), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(fmt.Sprintf("%v", this.Post), "LifecycleHook", "LifecycleHook", 1) + `,`, `}`, }, "") return s @@ -3089,10 +2154,10 @@ func (this *RollingDeploymentStrategyParams) String() string { `UpdatePeriodSeconds:` + valueToStringGenerated(this.UpdatePeriodSeconds) + `,`, `IntervalSeconds:` + valueToStringGenerated(this.IntervalSeconds) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, - `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Pre:` + strings.Replace(fmt.Sprintf("%v", this.Pre), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(fmt.Sprintf("%v", this.Post), "LifecycleHook", "LifecycleHook", 1) + `,`, `}`, }, "") return s @@ -3103,7 +2168,7 @@ func (this *TagImageHook) String() string { } s := strings.Join([]string{`&TagImageHook{`, `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3131,7 +2196,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3159,7 +2224,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3169,9 +2234,6 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3191,7 +2253,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3200,13 +2262,10 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Environment = append(m.Environment, v1.EnvVar{}) + m.Environment = append(m.Environment, k8s_io_api_core_v1.EnvVar{}) if err := m.Environment[len(m.Environment)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -3225,7 +2284,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3235,9 +2294,6 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3252,9 +2308,6 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3282,7 +2335,7 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3310,7 +2363,7 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3320,9 +2373,6 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3342,7 +2392,7 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3351,9 +2401,6 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3373,9 +2420,6 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3403,7 +2447,7 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3431,7 +2475,7 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3440,9 +2484,6 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3459,9 +2500,6 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3489,7 +2527,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3517,7 +2555,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3527,9 +2565,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3549,7 +2584,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3559,9 +2594,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3581,7 +2613,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3590,9 +2622,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3614,7 +2643,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3624,9 +2653,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3646,7 +2672,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3656,9 +2682,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3678,7 +2701,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3687,9 +2710,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3706,9 +2726,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3736,7 +2753,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3764,7 +2781,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3773,9 +2790,6 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3797,7 +2811,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3806,9 +2820,6 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3830,7 +2841,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3839,9 +2850,6 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3858,9 +2866,6 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3888,7 +2893,7 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3916,7 +2921,7 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3925,9 +2930,6 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3949,7 +2951,7 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3958,9 +2960,6 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3978,9 +2977,6 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4008,7 +3004,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4036,7 +3032,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4046,9 +3042,6 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4068,7 +3061,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4077,20 +3070,54 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4100,86 +3127,41 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue } - m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -4195,7 +3177,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4204,9 +3186,6 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4223,9 +3202,6 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4253,7 +3229,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4281,7 +3257,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4290,9 +3266,6 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4314,7 +3287,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4333,7 +3306,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4353,7 +3326,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4373,7 +3346,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4393,7 +3366,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4408,9 +3381,6 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4438,7 +3408,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4466,7 +3436,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4475,9 +3445,6 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4499,7 +3466,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4508,9 +3475,6 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4535,7 +3499,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4554,7 +3518,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4574,7 +3538,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4594,7 +3558,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4614,7 +3578,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4623,20 +3587,54 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4646,86 +3644,41 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -4741,7 +3694,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4750,14 +3703,11 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Template == nil { - m.Template = &v1.PodTemplateSpec{} + m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} } if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4777,7 +3727,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4791,9 +3741,6 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4821,7 +3768,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4849,7 +3796,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.LatestVersion |= int64(b&0x7F) << shift + m.LatestVersion |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4868,7 +3815,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4887,7 +3834,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4906,7 +3853,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4925,7 +3872,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4944,7 +3891,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4963,7 +3910,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4972,9 +3919,6 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4999,7 +3943,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5008,9 +3952,6 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5033,7 +3974,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5047,9 +3988,6 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5077,7 +4015,7 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5105,7 +4043,7 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5115,9 +4053,6 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5137,7 +4072,7 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5146,9 +4081,6 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5166,9 +4098,6 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5196,7 +4125,7 @@ func (m *DeploymentLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5219,9 +4148,6 @@ func (m *DeploymentLog) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5249,7 +4175,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5277,7 +4203,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5287,9 +4213,6 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5309,7 +4232,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5329,7 +4252,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5349,7 +4272,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5369,7 +4292,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5378,14 +4301,11 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &v11.Time{} + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5405,7 +4325,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5425,7 +4345,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5445,7 +4365,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5465,7 +4385,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5485,7 +4405,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5500,9 +4420,6 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5530,7 +4447,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5558,7 +4475,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5568,9 +4485,6 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5590,7 +4504,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5610,7 +4524,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5630,7 +4544,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5640,9 +4554,6 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5657,9 +4568,6 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5687,7 +4595,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5715,7 +4623,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5725,9 +4633,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5747,7 +4652,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5756,9 +4661,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5783,7 +4685,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5792,9 +4694,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5819,7 +4718,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5828,9 +4727,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5855,7 +4751,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5864,9 +4760,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5888,7 +4781,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5897,20 +4790,54 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5920,86 +4847,41 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue } - m.Labels[mapkey] = mapvalue iNdEx = postIndex case 7: if wireType != 2 { @@ -6015,7 +4897,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6024,20 +4906,54 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6047,86 +4963,41 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Annotations[mapkey] = mapvalue + } else { + var mapvalue string + m.Annotations[mapkey] = mapvalue } - m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 0 { @@ -6142,7 +5013,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6157,9 +5028,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6187,7 +5055,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6215,7 +5083,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6235,7 +5103,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6245,9 +5113,6 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6267,7 +5132,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6276,9 +5141,6 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6300,7 +5162,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6310,9 +5172,6 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6327,9 +5186,6 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6357,7 +5213,7 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6385,7 +5241,7 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6394,9 +5250,6 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6414,9 +5267,6 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6444,7 +5294,7 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6472,7 +5322,7 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6482,9 +5332,6 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6504,7 +5351,7 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6513,9 +5360,6 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6535,9 +5379,6 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6565,7 +5406,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6593,7 +5434,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6603,9 +5444,6 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6625,7 +5463,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6634,13 +5472,10 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v1.EnvVar{}) + m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -6659,7 +5494,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6669,9 +5504,6 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6691,7 +5523,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6701,9 +5533,6 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6718,9 +5547,6 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6748,7 +5574,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6776,7 +5602,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6786,9 +5612,6 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6808,7 +5631,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6817,9 +5640,6 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6844,7 +5664,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6853,9 +5673,6 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6873,9 +5690,6 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6903,7 +5717,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6931,7 +5745,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6951,7 +5765,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6960,9 +5774,6 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6987,7 +5798,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6996,9 +5807,6 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7023,7 +5831,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7032,9 +5840,6 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7054,9 +5859,6 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7084,7 +5886,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7112,7 +5914,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7132,7 +5934,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7152,7 +5954,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7172,7 +5974,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7181,14 +5983,11 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7208,7 +6007,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7217,14 +6016,11 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7244,7 +6040,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7253,9 +6049,6 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7280,7 +6073,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7289,9 +6082,6 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7311,9 +6101,6 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7341,7 +6128,7 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7369,7 +6156,7 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7379,9 +6166,6 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7401,7 +6185,7 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7410,9 +6194,6 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7429,9 +6210,6 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7447,7 +6225,6 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -7479,8 +6256,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -7497,34 +6276,219 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2517 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcb, 0x6f, 0x1c, 0x49, + 0x19, 0x77, 0x7b, 0x66, 0xec, 0x99, 0xcf, 0xaf, 0xb8, 0x9c, 0xc7, 0xac, 0x17, 0xd9, 0xd6, 0xac, + 0x36, 0x18, 0x58, 0x66, 0x12, 0x27, 0xac, 0xf2, 0xd0, 0x2e, 0x78, 0x1c, 0x67, 0xd7, 0xd1, 0x38, + 0x31, 0x65, 0x27, 0x21, 0x11, 0x82, 0x94, 0x7b, 0xca, 0xe3, 0x5a, 0x77, 0x77, 0x0d, 0xdd, 0x35, + 0x93, 0x0c, 0x42, 0x68, 0x2f, 0x20, 0x21, 0xed, 0x81, 0x23, 0x5c, 0x10, 0x07, 0xae, 0x20, 0x0e, + 0xdc, 0x11, 0x07, 0xa4, 0x1c, 0x40, 0x5a, 0x09, 0x09, 0x56, 0x08, 0x59, 0x1b, 0x73, 0xe3, 0x4f, + 0xc8, 0x09, 0xd5, 0xa3, 0x5f, 0xf3, 0x88, 0x3d, 0x4e, 0x6e, 0xd3, 0xdf, 0xe3, 0xf7, 0x55, 0x7d, + 0xaf, 0xfa, 0xaa, 0x06, 0x2e, 0x35, 0x98, 0xd8, 0x6f, 0xed, 0x96, 0x6d, 0xee, 0x56, 0x78, 0x93, + 0x7a, 0xc1, 0x3e, 0xdb, 0x13, 0x15, 0xd2, 0x64, 0x15, 0xd2, 0x6c, 0x06, 0x95, 0xf6, 0xe5, 0x4a, + 0x83, 0x7a, 0xd4, 0x27, 0x82, 0xd6, 0xcb, 0x4d, 0x9f, 0x0b, 0x8e, 0x96, 0x62, 0x8d, 0x72, 0xa4, + 0x51, 0x26, 0x4d, 0x56, 0x96, 0x1a, 0xe5, 0xf6, 0xe5, 0xf9, 0x6f, 0x26, 0x30, 0x1b, 0xbc, 0xc1, + 0x2b, 0x4a, 0x71, 0xb7, 0xb5, 0xa7, 0xbe, 0xd4, 0x87, 0xfa, 0xa5, 0x01, 0xe7, 0x4b, 0x07, 0xd7, + 0x82, 0x32, 0xe3, 0xca, 0xa8, 0xcd, 0x7d, 0xda, 0xc7, 0xe8, 0xfc, 0xd5, 0x58, 0xc6, 0x25, 0xf6, + 0x3e, 0xf3, 0xa8, 0xdf, 0xa9, 0x34, 0x0f, 0x1a, 0x92, 0x10, 0x54, 0x5c, 0x2a, 0x48, 0x3f, 0xad, + 0xca, 0x20, 0x2d, 0xbf, 0xe5, 0x09, 0xe6, 0xd2, 0x1e, 0x85, 0xf7, 0x8f, 0x53, 0x08, 0xec, 0x7d, + 0xea, 0x92, 0x1e, 0xbd, 0x2b, 0x83, 0xf4, 0x5a, 0x82, 0x39, 0x15, 0xe6, 0x89, 0x40, 0xf8, 0xdd, + 0x4a, 0xa5, 0x3f, 0x5b, 0xb0, 0xb0, 0xd6, 0x0a, 0x04, 0x77, 0x6f, 0xd1, 0xa6, 0xc3, 0x3b, 0x2e, + 0xf5, 0xc4, 0xb6, 0x90, 0x12, 0x8d, 0xce, 0x16, 0xf1, 0x89, 0x1b, 0xa0, 0x77, 0x20, 0xc7, 0x5c, + 0xd2, 0xa0, 0x45, 0x6b, 0xc9, 0x5a, 0x2e, 0x54, 0xa7, 0x9e, 0x1f, 0x2e, 0x8e, 0x1c, 0x1d, 0x2e, + 0xe6, 0x36, 0x24, 0x11, 0x6b, 0x1e, 0xfa, 0x2e, 0x4c, 0x50, 0xaf, 0xcd, 0x7c, 0xee, 0x49, 0x84, + 0xe2, 0xe8, 0x52, 0x66, 0x79, 0x62, 0x65, 0xbe, 0xac, 0x97, 0xa4, 0x02, 0x23, 0xbd, 0x5a, 0x6e, + 0x5f, 0x2e, 0xaf, 0x7b, 0xed, 0x07, 0xc4, 0xaf, 0xce, 0x19, 0x98, 0x89, 0xf5, 0x58, 0x0d, 0x27, + 0x31, 0xd0, 0xbb, 0x30, 0x6e, 0x73, 0xd7, 0x25, 0x5e, 0xbd, 0x98, 0x59, 0xca, 0x2c, 0x17, 0xaa, + 0x13, 0x47, 0x87, 0x8b, 0xe3, 0x6b, 0x9a, 0x84, 0x43, 0x5e, 0xe9, 0x2f, 0x16, 0xcc, 0xc4, 0x6b, + 0x5f, 0x23, 0xad, 0x80, 0xa2, 0xeb, 0x90, 0x15, 0x9d, 0x66, 0xb8, 0xe2, 0x77, 0x8d, 0xa9, 0xec, + 0x4e, 0xa7, 0x49, 0x5f, 0x1e, 0x2e, 0x9e, 0x8b, 0xc5, 0x77, 0x7c, 0xd6, 0x68, 0x50, 0x5f, 0x32, + 0xb0, 0x52, 0x41, 0x01, 0x4c, 0xaa, 0x1d, 0x19, 0x4e, 0x71, 0x74, 0xc9, 0x5a, 0x9e, 0x58, 0xf9, + 0xa0, 0x7c, 0x5c, 0xc2, 0x95, 0xbb, 0xd6, 0xb0, 0x91, 0x00, 0xa9, 0x9e, 0x39, 0x3a, 0x5c, 0x9c, + 0x4c, 0x52, 0x70, 0xca, 0x48, 0xa9, 0x0e, 0x6f, 0xbf, 0x42, 0x1d, 0xad, 0x43, 0x76, 0xcf, 0xe7, + 0xae, 0xda, 0xce, 0xc4, 0xca, 0x3b, 0xfd, 0xbc, 0x7a, 0x6f, 0xf7, 0x13, 0x6a, 0x0b, 0x4c, 0xf7, + 0xa8, 0x4f, 0x3d, 0x9b, 0x56, 0x27, 0xc3, 0x3d, 0xdf, 0xf6, 0xb9, 0x8b, 0x95, 0x7a, 0xe9, 0x5f, + 0x19, 0x98, 0x4b, 0x98, 0xe1, 0x5e, 0x9d, 0x09, 0xc6, 0x3d, 0x74, 0x33, 0xe5, 0xad, 0xaf, 0x76, + 0x79, 0xeb, 0x42, 0x1f, 0x95, 0x84, 0xbf, 0x6a, 0x30, 0x16, 0x08, 0x22, 0x5a, 0x81, 0xf2, 0x54, + 0xa1, 0x7a, 0xd5, 0xa8, 0x8f, 0x6d, 0x2b, 0xea, 0xcb, 0xc3, 0xc5, 0x3e, 0xa5, 0x55, 0x8e, 0x90, + 0xb4, 0x14, 0x36, 0x18, 0xa8, 0x0d, 0xc8, 0x21, 0x81, 0xd8, 0xf1, 0x89, 0x17, 0x68, 0x4b, 0xcc, + 0xa5, 0xc5, 0x8c, 0xda, 0xf7, 0xd7, 0x13, 0xfb, 0x8e, 0x12, 0xbc, 0xdc, 0x3c, 0x68, 0x48, 0x42, + 0x50, 0x96, 0xf5, 0x27, 0x3d, 0x21, 0x35, 0xaa, 0xf3, 0x66, 0x15, 0xa8, 0xd6, 0x83, 0x86, 0xfb, + 0x58, 0x40, 0x17, 0x61, 0xcc, 0xa7, 0x24, 0xe0, 0x5e, 0x31, 0xab, 0x76, 0x31, 0x1d, 0xee, 0x02, + 0x2b, 0x2a, 0x36, 0x5c, 0xf4, 0x35, 0x18, 0x77, 0x69, 0x10, 0xc8, 0x6a, 0xc8, 0x29, 0xc1, 0x19, + 0x23, 0x38, 0xbe, 0xa9, 0xc9, 0x38, 0xe4, 0xa3, 0x4f, 0x60, 0x5a, 0x1a, 0xba, 0xdf, 0xac, 0x13, + 0x41, 0xd5, 0x36, 0xc6, 0x86, 0xde, 0xc6, 0x79, 0x83, 0x3e, 0x5d, 0x4b, 0x21, 0xe1, 0x2e, 0xe4, + 0xd2, 0x1f, 0x47, 0xe1, 0x4c, 0x2a, 0x4c, 0x7b, 0xac, 0x81, 0x9e, 0x40, 0x5e, 0x82, 0xd5, 0x89, + 0x20, 0x26, 0x73, 0x2e, 0x9d, 0xcc, 0xb4, 0xce, 0xa5, 0x4d, 0x2a, 0x48, 0x15, 0x99, 0x05, 0x40, + 0x4c, 0xc3, 0x11, 0x2a, 0xfa, 0x1e, 0x64, 0x83, 0x26, 0xb5, 0x4d, 0x8d, 0xbc, 0x3f, 0x54, 0x8d, + 0xa8, 0x35, 0x6e, 0x37, 0xa9, 0x1d, 0xa7, 0xaa, 0xfc, 0xc2, 0x0a, 0x11, 0x3d, 0x89, 0xb2, 0x4a, + 0xc7, 0xfe, 0xda, 0x29, 0xb0, 0x95, 0x7e, 0x1c, 0xc9, 0x74, 0xa6, 0x95, 0xfe, 0x6e, 0xc1, 0xd9, + 0x6e, 0x95, 0x1a, 0x0b, 0x04, 0xfa, 0x7e, 0x8f, 0xdb, 0xca, 0x27, 0x73, 0x9b, 0xd4, 0x56, 0x4e, + 0x3b, 0x63, 0x4c, 0xe6, 0x43, 0x4a, 0xc2, 0x65, 0x0f, 0x21, 0xc7, 0x04, 0x75, 0x03, 0xd3, 0x21, + 0x57, 0x86, 0xdf, 0x57, 0xa2, 0x01, 0x4b, 0x20, 0xac, 0xf1, 0x4a, 0x3f, 0xcf, 0x40, 0xb1, 0x5b, + 0x14, 0x73, 0xc7, 0xd9, 0x25, 0xf6, 0x01, 0x5a, 0x82, 0xac, 0x47, 0xdc, 0xb0, 0xc2, 0x23, 0x87, + 0xdf, 0x25, 0x2e, 0xc5, 0x8a, 0x83, 0x7e, 0x63, 0x01, 0x6a, 0xa9, 0x84, 0xaa, 0xaf, 0x7a, 0x1e, + 0x17, 0x44, 0x96, 0x46, 0xb8, 0x4a, 0x3c, 0xfc, 0x2a, 0x43, 0xd3, 0xe5, 0xfb, 0x3d, 0xa0, 0xeb, + 0x9e, 0xf0, 0x3b, 0x71, 0x85, 0xf6, 0x0a, 0xe0, 0x3e, 0x2b, 0x41, 0x4f, 0x4c, 0xae, 0xe9, 0x7c, + 0xf8, 0xf0, 0xf4, 0x2b, 0x1a, 0x94, 0x73, 0xf3, 0xeb, 0x70, 0x61, 0xc0, 0x62, 0xd1, 0x19, 0xc8, + 0x1c, 0xd0, 0x8e, 0x76, 0x1f, 0x96, 0x3f, 0xd1, 0x59, 0xc8, 0xb5, 0x89, 0xd3, 0xa2, 0xba, 0xeb, + 0x61, 0xfd, 0x71, 0x63, 0xf4, 0x9a, 0x55, 0xfa, 0x53, 0x06, 0xbe, 0xf2, 0x2a, 0xdb, 0x6f, 0xa8, + 0x9b, 0xa3, 0xf7, 0x20, 0xef, 0xd3, 0x36, 0x0b, 0x18, 0xf7, 0xd4, 0x22, 0x32, 0x71, 0xde, 0x61, + 0x43, 0xc7, 0x91, 0x04, 0x5a, 0x85, 0x19, 0xe6, 0xd9, 0x4e, 0xab, 0x1e, 0x1e, 0x2a, 0xba, 0xb2, + 0xf2, 0xd5, 0x0b, 0x46, 0x69, 0x66, 0x23, 0xcd, 0xc6, 0xdd, 0xf2, 0x49, 0x08, 0xea, 0x36, 0x1d, + 0x22, 0xa8, 0x6a, 0x96, 0x7d, 0x20, 0x0c, 0x1b, 0x77, 0xcb, 0xa3, 0x07, 0x70, 0xde, 0x90, 0x30, + 0x6d, 0x3a, 0xcc, 0x56, 0x3e, 0x96, 0x15, 0xa2, 0xba, 0x69, 0xbe, 0xba, 0x60, 0x90, 0xce, 0x6f, + 0xf4, 0x95, 0xc2, 0x03, 0xb4, 0x13, 0x4b, 0x0b, 0x67, 0x17, 0xd5, 0x6c, 0x7b, 0x97, 0x16, 0xb2, + 0x71, 0xb7, 0x7c, 0xe9, 0x7f, 0xb9, 0xde, 0x7e, 0xa0, 0xc2, 0xb5, 0x0b, 0xf9, 0x20, 0x04, 0xd5, + 0x21, 0xbb, 0x3a, 0x4c, 0xf2, 0x85, 0x06, 0xe2, 0xe8, 0x44, 0x6b, 0x88, 0x70, 0x11, 0x85, 0xbc, + 0x08, 0xc3, 0xa2, 0x9b, 0xe9, 0xcd, 0x61, 0x6c, 0x98, 0x10, 0x6d, 0x71, 0x87, 0xd9, 0x8c, 0x06, + 0xd5, 0x49, 0x69, 0x26, 0x0a, 0x64, 0x04, 0xad, 0x53, 0x46, 0x79, 0x4e, 0x47, 0x3f, 0x97, 0x4c, + 0x19, 0x4d, 0xc7, 0x91, 0x04, 0xaa, 0xc1, 0xd9, 0x30, 0x7d, 0x3e, 0x66, 0x81, 0xe0, 0x7e, 0xa7, + 0xc6, 0x5c, 0x26, 0x54, 0xd0, 0x73, 0xd5, 0xe2, 0xd1, 0xe1, 0xe2, 0x59, 0xdc, 0x87, 0x8f, 0xfb, + 0x6a, 0xc9, 0x16, 0x24, 0x68, 0x20, 0x4c, 0xa0, 0xa3, 0x84, 0xde, 0xa1, 0x81, 0xc0, 0x8a, 0x23, + 0xcf, 0xe0, 0xa6, 0x1c, 0x7d, 0xea, 0x26, 0x76, 0x51, 0xe7, 0xde, 0x52, 0x54, 0x6c, 0xb8, 0xc8, + 0x87, 0x7c, 0x40, 0x1d, 0x6a, 0x0b, 0xee, 0x17, 0xc7, 0x55, 0x7f, 0xba, 0x75, 0xba, 0x93, 0xa7, + 0xbc, 0x6d, 0x60, 0x74, 0x47, 0x8a, 0x03, 0x64, 0xc8, 0x38, 0xb2, 0x83, 0x36, 0x21, 0x2f, 0xc2, + 0xa4, 0xcf, 0x0f, 0xae, 0xdb, 0x2d, 0x5e, 0x0f, 0x73, 0x5d, 0xb7, 0x19, 0x15, 0x88, 0xb0, 0x1c, + 0x22, 0x08, 0x99, 0xaf, 0x2e, 0xf3, 0x30, 0x25, 0xf5, 0xce, 0x36, 0xb5, 0xb9, 0x57, 0x0f, 0x8a, + 0x05, 0xe5, 0xd5, 0x28, 0x5f, 0x37, 0xd3, 0x6c, 0xdc, 0x2d, 0x3f, 0x7f, 0x13, 0xa6, 0x52, 0xcb, + 0x1f, 0xaa, 0x47, 0xfd, 0x21, 0x07, 0xe7, 0xfb, 0x9f, 0x97, 0xe8, 0x26, 0x4c, 0xc9, 0x25, 0x06, + 0xe2, 0x01, 0xf5, 0x55, 0x6f, 0xb1, 0x54, 0x6f, 0x39, 0x67, 0x16, 0x36, 0x55, 0x4b, 0x32, 0x71, + 0x5a, 0x16, 0xdd, 0x01, 0xc4, 0x77, 0x03, 0xea, 0xb7, 0x69, 0xfd, 0x23, 0x7d, 0xd1, 0x88, 0xbb, + 0x53, 0xd4, 0xf0, 0xef, 0xf5, 0x48, 0xe0, 0x3e, 0x5a, 0x43, 0x26, 0xeb, 0x2a, 0xcc, 0x98, 0x43, + 0x23, 0x64, 0x9a, 0x3c, 0x8d, 0x3c, 0x7a, 0x3f, 0xcd, 0xc6, 0xdd, 0xf2, 0xe8, 0x23, 0x98, 0x25, + 0x6d, 0xc2, 0x1c, 0xb2, 0xeb, 0xd0, 0x08, 0x24, 0xa7, 0x40, 0xde, 0x32, 0x20, 0xb3, 0xab, 0xdd, + 0x02, 0xb8, 0x57, 0x07, 0x6d, 0xc2, 0x5c, 0xcb, 0xeb, 0x85, 0x1a, 0x53, 0x50, 0x6f, 0x1b, 0xa8, + 0xb9, 0xfb, 0xbd, 0x22, 0xb8, 0x9f, 0x1e, 0x7a, 0x0c, 0xe3, 0x75, 0x2a, 0x08, 0x73, 0x82, 0xe2, + 0xb8, 0x4a, 0xbd, 0x2b, 0xc3, 0xa4, 0xfb, 0x2d, 0xad, 0xaa, 0x2f, 0x4f, 0xe6, 0x03, 0x87, 0x80, + 0x88, 0x01, 0xd8, 0xe1, 0x28, 0x1e, 0x14, 0xf3, 0xaa, 0x9a, 0xbe, 0x35, 0x64, 0x35, 0x69, 0xed, + 0x78, 0x54, 0x8c, 0x48, 0x01, 0x4e, 0x80, 0xcb, 0xc4, 0xf2, 0x65, 0x02, 0x47, 0xfe, 0xd0, 0x19, + 0x1f, 0x25, 0x16, 0x4e, 0x32, 0x71, 0x5a, 0xb6, 0xf4, 0x6b, 0x0b, 0x66, 0x7b, 0xf6, 0x94, 0x9c, + 0xc6, 0xad, 0x63, 0xa6, 0xf1, 0x47, 0x30, 0x66, 0xcb, 0xf6, 0x11, 0x8e, 0x34, 0x97, 0x87, 0xbe, + 0xd0, 0xc5, 0xfd, 0x48, 0x7d, 0x06, 0xd8, 0x00, 0x96, 0x66, 0x60, 0x2a, 0x16, 0xad, 0xf1, 0x46, + 0xe9, 0xb3, 0x6c, 0xf2, 0x28, 0xa9, 0xf1, 0xc6, 0xbd, 0xa6, 0x76, 0x41, 0x05, 0x0a, 0x36, 0xf7, + 0x04, 0x91, 0x03, 0xa4, 0x59, 0xf1, 0xac, 0x01, 0x2d, 0xac, 0x85, 0x0c, 0x1c, 0xcb, 0xc8, 0x96, + 0xb8, 0xc7, 0x1d, 0x87, 0x3f, 0x55, 0x35, 0x94, 0x68, 0x89, 0xb7, 0x15, 0x15, 0x1b, 0xae, 0xac, + 0x95, 0xa6, 0xec, 0xba, 0xbc, 0x15, 0x1e, 0xeb, 0x51, 0xad, 0x6c, 0x19, 0x3a, 0x8e, 0x24, 0xd0, + 0x55, 0x98, 0x0c, 0x98, 0x67, 0xd3, 0xb0, 0xf5, 0x64, 0xf5, 0xf4, 0x20, 0xef, 0xa8, 0xdb, 0x09, + 0x3a, 0x4e, 0x49, 0xa1, 0x87, 0x50, 0x50, 0xdf, 0xea, 0x2a, 0x93, 0x1b, 0xfa, 0x2a, 0x33, 0x25, + 0x37, 0xb9, 0x1d, 0x02, 0xe0, 0x18, 0x0b, 0xad, 0x00, 0x08, 0xe6, 0xd2, 0x40, 0x10, 0xb7, 0x19, + 0x98, 0xde, 0x1f, 0x25, 0xd3, 0x4e, 0xc4, 0xc1, 0x09, 0x29, 0xf4, 0x0d, 0x28, 0xc8, 0x14, 0xa8, + 0x31, 0x8f, 0xea, 0xaa, 0xc8, 0x68, 0x03, 0x3b, 0x21, 0x11, 0xc7, 0x7c, 0x54, 0x06, 0x70, 0xe4, + 0x19, 0x54, 0xed, 0x08, 0x1a, 0xa8, 0xf6, 0x9d, 0xa9, 0x4e, 0x4b, 0xf0, 0x5a, 0x44, 0xc5, 0x09, + 0x09, 0xe9, 0x75, 0x8f, 0x3f, 0x25, 0x4c, 0xa8, 0x14, 0x4d, 0x78, 0xfd, 0x2e, 0x7f, 0x48, 0x98, + 0xc0, 0x86, 0x8b, 0xde, 0x85, 0xf1, 0xb6, 0x69, 0x92, 0xa0, 0x40, 0x55, 0x8d, 0x85, 0xad, 0x31, + 0xe4, 0x95, 0xfe, 0x9d, 0xca, 0x5d, 0x4c, 0x7f, 0xd4, 0x92, 0xa7, 0xdd, 0xf1, 0x23, 0xf9, 0x45, + 0x18, 0xd3, 0xdd, 0xb5, 0x3b, 0xf8, 0xba, 0x05, 0x63, 0xc3, 0x45, 0xef, 0x40, 0x6e, 0x8f, 0xfb, + 0x36, 0x35, 0x91, 0x8f, 0xae, 0x07, 0xb7, 0x25, 0x11, 0x6b, 0x1e, 0x7a, 0x00, 0x33, 0xf4, 0x59, + 0x7a, 0xfe, 0xcb, 0xaa, 0x47, 0x95, 0xf7, 0x64, 0x6f, 0x5c, 0x4f, 0xb3, 0x06, 0xbf, 0x91, 0x74, + 0x83, 0x94, 0xfe, 0x31, 0x0e, 0xa8, 0x77, 0xd8, 0x41, 0x37, 0x52, 0x4f, 0x0a, 0x17, 0xbb, 0x9e, + 0x14, 0xce, 0xf7, 0x6a, 0x24, 0x5e, 0x14, 0xda, 0x30, 0x69, 0xab, 0x17, 0x29, 0xfd, 0xfe, 0x64, + 0x06, 0xa2, 0xef, 0x1c, 0x5f, 0xb0, 0xaf, 0x7e, 0xc7, 0xd2, 0x09, 0xbe, 0x96, 0x40, 0xc6, 0x29, + 0x3b, 0xe8, 0xa7, 0x30, 0xed, 0x53, 0xdb, 0xa7, 0x44, 0x50, 0x63, 0x59, 0xdf, 0x35, 0xaa, 0xc7, + 0x5b, 0xc6, 0x46, 0x6f, 0xa0, 0x6d, 0x24, 0x2f, 0xf1, 0x38, 0x85, 0x8e, 0xbb, 0xac, 0xa1, 0x1f, + 0xc3, 0x94, 0xcf, 0x1d, 0x87, 0x79, 0x0d, 0x63, 0x3e, 0xab, 0xcc, 0xaf, 0x9e, 0xc0, 0xbc, 0x56, + 0x1b, 0x68, 0x7d, 0x56, 0xf5, 0xd7, 0x24, 0x36, 0x4e, 0x9b, 0x42, 0x8f, 0xa0, 0xe0, 0xd3, 0x80, + 0xb7, 0x7c, 0x9b, 0x06, 0xa6, 0xb8, 0x97, 0xfb, 0x0d, 0x38, 0xd8, 0x08, 0xc9, 0x2c, 0x66, 0x3e, + 0x95, 0xb6, 0x82, 0xb8, 0x87, 0x85, 0xdc, 0x00, 0xc7, 0x68, 0x68, 0x5f, 0xa6, 0xf1, 0x2e, 0x75, + 0x64, 0x69, 0x67, 0x4e, 0x16, 0xc8, 0xde, 0x8d, 0x94, 0x6b, 0x0a, 0x42, 0x0f, 0x6a, 0x89, 0x42, + 0x90, 0x44, 0x6c, 0xf0, 0xd1, 0x4f, 0x60, 0x82, 0x24, 0xee, 0xae, 0x7a, 0x36, 0x5c, 0x3f, 0x95, + 0xb9, 0x9e, 0xeb, 0x6a, 0xf4, 0x5c, 0x99, 0xbc, 0xa7, 0x26, 0xcd, 0xa1, 0x7b, 0x70, 0x8e, 0xd8, + 0x82, 0xb5, 0xe9, 0x2d, 0x4a, 0xea, 0x0e, 0xf3, 0xa2, 0xf6, 0xaa, 0x1b, 0xce, 0x5b, 0x47, 0x87, + 0x8b, 0xe7, 0x56, 0xfb, 0x09, 0xe0, 0xfe, 0x7a, 0xf3, 0xd7, 0x61, 0x22, 0xb1, 0xeb, 0x61, 0xe6, + 0xbb, 0xf9, 0x0f, 0xe1, 0xcc, 0x6b, 0xdd, 0x61, 0x7f, 0x37, 0x0a, 0xa5, 0x9e, 0x06, 0xa0, 0x9e, + 0x24, 0xd7, 0xf6, 0x89, 0xd7, 0x08, 0x33, 0xb6, 0x02, 0x05, 0xd2, 0x12, 0xdc, 0x25, 0x82, 0xd9, + 0x0a, 0x38, 0x1f, 0xe7, 0xc2, 0x6a, 0xc8, 0xc0, 0xb1, 0x0c, 0xba, 0x01, 0xd3, 0xd1, 0xe1, 0x26, + 0x3b, 0x9d, 0x3e, 0x8d, 0x0b, 0xba, 0x3c, 0xd6, 0x52, 0x1c, 0xdc, 0x25, 0x19, 0x5d, 0x9b, 0x33, + 0xaf, 0x77, 0x6d, 0xbe, 0x13, 0xbe, 0x30, 0xaa, 0x3d, 0xd1, 0xba, 0xda, 0x95, 0x79, 0xf5, 0xeb, + 0x7a, 0x35, 0x4c, 0x4a, 0xe0, 0x3e, 0x5a, 0xa5, 0x9f, 0x59, 0xf0, 0xd6, 0xc0, 0x5b, 0x18, 0xfa, + 0x41, 0xf8, 0xd4, 0x63, 0xa9, 0x44, 0xbc, 0x7e, 0xda, 0x1b, 0x5d, 0xa7, 0xff, 0x8b, 0xcf, 0x8d, + 0xfc, 0xaf, 0x7e, 0xbb, 0x38, 0xf2, 0xe9, 0x7f, 0x96, 0x46, 0x4a, 0x5f, 0x5a, 0x70, 0x61, 0x80, + 0xee, 0xeb, 0x3c, 0x85, 0xff, 0xc2, 0x82, 0x59, 0xd6, 0x1d, 0x74, 0xd3, 0x8e, 0x6f, 0x9d, 0x62, + 0x37, 0x3d, 0x09, 0x54, 0x3d, 0x27, 0x67, 0xea, 0x1e, 0x32, 0xee, 0xb5, 0x5a, 0xfa, 0xa7, 0x05, + 0xd3, 0xeb, 0xcf, 0xa8, 0x7d, 0x97, 0x3e, 0xdd, 0xe2, 0xf5, 0x8f, 0x39, 0x3f, 0x48, 0xfe, 0x3f, + 0x60, 0x0d, 0xfe, 0x7f, 0x00, 0x5d, 0x87, 0x0c, 0xf5, 0xda, 0x27, 0xf8, 0x47, 0x62, 0xc2, 0xf8, + 0x26, 0xb3, 0xee, 0xb5, 0xb1, 0xd4, 0x91, 0x23, 0x6b, 0x2a, 0x09, 0x55, 0xee, 0x15, 0xe2, 0x91, + 0x35, 0x95, 0xb1, 0x38, 0x2d, 0xab, 0xa6, 0x03, 0xee, 0xb4, 0x64, 0x92, 0x67, 0xe3, 0xe5, 0x3d, + 0xd0, 0x24, 0x1c, 0xf2, 0x4a, 0xbf, 0x1f, 0x85, 0xa9, 0x1a, 0xdb, 0xa3, 0x76, 0xc7, 0x76, 0xa8, + 0xda, 0xd7, 0x23, 0x98, 0xda, 0x23, 0xcc, 0x69, 0xf9, 0x54, 0x87, 0xd0, 0x84, 0xee, 0x4a, 0x68, + 0xf5, 0x76, 0x92, 0xf9, 0xf2, 0x70, 0x71, 0x3e, 0xa5, 0x9e, 0xe2, 0xe2, 0x34, 0x12, 0x7a, 0x02, + 0x40, 0x23, 0x27, 0x9a, 0x48, 0x5e, 0x3a, 0x3e, 0x92, 0x69, 0xc7, 0xeb, 0xd9, 0x29, 0xa6, 0xe1, + 0x04, 0x26, 0xfa, 0xa1, 0x1c, 0xcc, 0x1a, 0x2a, 0xa4, 0x81, 0xfa, 0xdb, 0x66, 0x62, 0xa5, 0x7c, + 0xbc, 0x81, 0x1d, 0xa3, 0xa2, 0xe0, 0xa3, 0x16, 0x12, 0x52, 0xd5, 0x30, 0x67, 0x7e, 0x96, 0xfe, + 0x3a, 0x0a, 0x4b, 0xc7, 0x1d, 0xb7, 0xb2, 0xcf, 0xc8, 0x61, 0x91, 0xb7, 0x44, 0xd8, 0x84, 0xf5, + 0x2d, 0x56, 0xf5, 0x99, 0x9d, 0x14, 0x07, 0x77, 0x49, 0xa2, 0x3b, 0x90, 0x69, 0xfa, 0xd4, 0x38, + 0xa7, 0x72, 0xfc, 0xda, 0x53, 0xde, 0xaf, 0x8e, 0xcb, 0x04, 0xda, 0xf2, 0x29, 0x96, 0x20, 0x12, + 0xcb, 0x65, 0x75, 0xd3, 0xb2, 0x4e, 0x87, 0xb5, 0xc9, 0xea, 0x58, 0x82, 0xa0, 0x4d, 0xc8, 0x36, + 0x79, 0x20, 0xcc, 0x54, 0x30, 0x34, 0x58, 0x5e, 0x56, 0xfd, 0x16, 0x0f, 0x04, 0x56, 0x30, 0xa5, + 0xbf, 0x65, 0x61, 0xf1, 0x98, 0xb9, 0x01, 0x6d, 0xc0, 0x9c, 0xbe, 0x24, 0x6f, 0x51, 0x9f, 0xf1, + 0x7a, 0xda, 0x97, 0x17, 0xd4, 0x25, 0xb6, 0x97, 0x8d, 0xfb, 0xe9, 0xa0, 0x0f, 0x60, 0x86, 0x79, + 0x82, 0xfa, 0x6d, 0xe2, 0x84, 0x30, 0xfa, 0x59, 0x60, 0x4e, 0xbf, 0xce, 0xa5, 0x58, 0xb8, 0x5b, + 0xb6, 0x4f, 0x40, 0x33, 0x27, 0x0e, 0xa8, 0x03, 0xd3, 0x2e, 0x79, 0x96, 0xb8, 0x6e, 0x1b, 0x17, + 0x0e, 0xfe, 0x37, 0xa4, 0x25, 0x98, 0x53, 0xd6, 0x7f, 0x98, 0x96, 0x37, 0x3c, 0x71, 0xcf, 0xdf, + 0x16, 0x3e, 0xf3, 0x1a, 0xda, 0xda, 0x66, 0x0a, 0x0b, 0x77, 0x61, 0xa3, 0xc7, 0x90, 0x77, 0xc9, + 0xb3, 0xed, 0x96, 0xdf, 0x08, 0x6f, 0x49, 0xc3, 0xdb, 0x51, 0xcf, 0x46, 0x9b, 0x06, 0x05, 0x47, + 0x78, 0x61, 0x6a, 0x8e, 0xbf, 0x89, 0xd4, 0x0c, 0xd3, 0x29, 0xff, 0x66, 0xd2, 0xe9, 0x33, 0x0b, + 0x26, 0x93, 0x55, 0xdc, 0xdb, 0x3b, 0xad, 0x21, 0x7a, 0xe7, 0xb7, 0x61, 0x54, 0x70, 0x53, 0x82, + 0x27, 0x3a, 0xe9, 0xc1, 0xc0, 0x8e, 0xee, 0x70, 0x3c, 0x2a, 0x78, 0x75, 0xf9, 0xf9, 0x8b, 0x85, + 0x91, 0xcf, 0x5f, 0x2c, 0x8c, 0x7c, 0xf1, 0x62, 0x61, 0xe4, 0xd3, 0xa3, 0x05, 0xeb, 0xf9, 0xd1, + 0x82, 0xf5, 0xf9, 0xd1, 0x82, 0xf5, 0xc5, 0xd1, 0x82, 0xf5, 0xe5, 0xd1, 0x82, 0xf5, 0xcb, 0xff, + 0x2e, 0x8c, 0x3c, 0x1e, 0x6d, 0x5f, 0xfe, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb6, 0x32, + 0x5f, 0x7c, 0x20, 0x00, 0x00, +} diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto index d15f20c0d..1648f9461 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.proto +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -7,6 +7,7 @@ package github.com.openshift.api.apps.v1; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; @@ -15,7 +16,7 @@ option go_package = "v1"; // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. message CustomDeploymentStrategyParams { - // Image specifies a container image which can carry out a deployment. + // Image specifies a Docker image which can carry out a deployment. optional string image = 1; // Environment holds the environment which will be given to the container for Image. @@ -67,24 +68,25 @@ message DeploymentCondition { // A single deployment configuration is usually analogous to a single micro-service. Can support many different // deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as // well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. -// +// // A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. // Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment // is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment // is triggered by any means. message DeploymentConfig { + // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec represents a desired deployment state and how to deploy to it. optional DeploymentConfigSpec spec = 2; // Status represents the current deployment state. - // +optional optional DeploymentConfigStatus status = 3; } // DeploymentConfigList is a collection of deployment configs. message DeploymentConfigList { + // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of deployment configs @@ -360,7 +362,7 @@ message ExecNewPodHook { repeated k8s.io.api.core.v1.EnvVar env = 2; // ContainerName is the name of a container in the deployment pod template - // whose container image will be used for the hook pod's container. + // whose Docker image will be used for the hook pod's container. optional string containerName = 3; // Volumes is a list of named volumes from the pod template which should be @@ -420,9 +422,9 @@ message RollingDeploymentStrategyParams { // during the update. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of update (ex: 10%). Absolute // number is calculated from percentage by rounding down. - // + // // This cannot be 0 if MaxSurge is 0. By default, 25% is used. - // + // // Example: when this is set to 30%, the old RC can be scaled down by 30% // immediately when the rolling update starts. Once new pods are ready, old // RC can be scaled down further, followed by scaling up the new RC, @@ -434,9 +436,9 @@ message RollingDeploymentStrategyParams { // original number of pods. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of the update (ex: 10%). Absolute // number is calculated from percentage by rounding up. - // + // // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. - // + // // Example: when this is set to 30%, the new RC can be scaled up by 30% // immediately when the rolling update starts. Once old pods have been // killed, new RC can be scaled up further, ensuring that total number of diff --git a/vendor/github.com/openshift/api/apps/v1/legacy.go b/vendor/github.com/openshift/api/apps/v1/legacy.go deleted file mode 100644 index c8fa0ed99..000000000 --- a/vendor/github.com/openshift/api/apps/v1/legacy.go +++ /dev/null @@ -1,28 +0,0 @@ -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} - legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) - DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme -) - -func addLegacyKnownTypes(scheme *runtime.Scheme) error { - types := []runtime.Object{ - &DeploymentConfig{}, - &DeploymentConfigList{}, - &DeploymentConfigRollback{}, - &DeploymentRequest{}, - &DeploymentLog{}, - &DeploymentLogOptions{}, - &extensionsv1beta1.Scale{}, - } - scheme.AddKnownTypes(legacyGroupVersion, types...) - return nil -} diff --git a/vendor/github.com/openshift/api/apps/v1/register.go b/vendor/github.com/openshift/api/apps/v1/register.go index 0c1e47e6d..88e40b4d4 100644 --- a/vendor/github.com/openshift/api/apps/v1/register.go +++ b/vendor/github.com/openshift/api/apps/v1/register.go @@ -1,37 +1,50 @@ package v1 import ( - corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -var ( - GroupName = "apps.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // SchemeGroupVersion generated code relies on this name - // Deprecated - SchemeGroupVersion = GroupVersion - // AddToScheme exists solely to keep the old generators creating valid code - // DEPRECATED - AddToScheme = schemeBuilder.AddToScheme +const ( + LegacyGroupName = "" + GroupName = "apps.openshift.io" +) + +var ( + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme ) -// Resource generated code relies on this being here, but it logically belongs to the group -// DEPRECATED func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + } + scheme.AddKnownTypes(LegacySchemeGroupVersion, types...) + return nil } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(GroupVersion, + scheme.AddKnownTypes(SchemeGroupVersion, &DeploymentConfig{}, &DeploymentConfigList{}, &DeploymentConfigRollback{}, @@ -40,6 +53,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentLogOptions{}, &extensionsv1beta1.Scale{}, ) - metav1.AddToGroupVersion(scheme, GroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go index ed147807d..4f70b3d40 100644 --- a/vendor/github.com/openshift/api/apps/v1/types.go +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -25,14 +25,14 @@ import ( // is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment // is triggered by any means. type DeploymentConfig struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec represents a desired deployment state and how to deploy to it. Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // Status represents the current deployment state. - // +optional Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -120,7 +120,7 @@ const ( // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. type CustomDeploymentStrategyParams struct { - // Image specifies a container image which can carry out a deployment. + // Image specifies a Docker image which can carry out a deployment. Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` // Environment holds the environment which will be given to the container for Image. Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` @@ -225,7 +225,7 @@ type ExecNewPodHook struct { // Env is a set of environment variables to supply to the hook pod's container. Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` // ContainerName is the name of a container in the deployment pod template - // whose container image will be used for the hook pod's container. + // whose Docker image will be used for the hook pod's container. ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` // Volumes is a list of named volumes from the pod template which should be // copied to the hook pod. Volumes names not found in pod spec are ignored. @@ -264,7 +264,7 @@ type DeploymentTriggerType string const ( // DeploymentTriggerOnImageChange will create new deployments in response to updated tags from - // a container image repository. + // a Docker image repository. DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange" // DeploymentTriggerOnConfigChange will create new deployments in response to changes to // the ControllerTemplate of a DeploymentConfig. @@ -389,6 +389,7 @@ type DeploymentCondition struct { // DeploymentConfigList is a collection of deployment configs. type DeploymentConfigList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of deployment configs diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go similarity index 92% rename from vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go rename to vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go index 9e3a07e8f..78fe5ddc0 100644 --- a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go @@ -8,12 +8,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-swagger-docs.sh +// Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_CustomDeploymentStrategyParams = map[string]string{ "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", - "image": "Image specifies a container image which can carry out a deployment.", + "image": "Image specifies a Docker image which can carry out a deployment.", "environment": "Environment holds the environment which will be given to the container for Image.", "command": "Command is optional and overrides CMD in the container Image.", } @@ -56,9 +56,10 @@ func (DeploymentCondition) SwaggerDoc() map[string]string { } var map_DeploymentConfig = map[string]string{ - "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.", - "spec": "Spec represents a desired deployment state and how to deploy to it.", - "status": "Status represents the current deployment state.", + "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.", + "metadata": "Standard object's metadata.", + "spec": "Spec represents a desired deployment state and how to deploy to it.", + "status": "Status represents the current deployment state.", } func (DeploymentConfig) SwaggerDoc() map[string]string { @@ -66,8 +67,9 @@ func (DeploymentConfig) SwaggerDoc() map[string]string { } var map_DeploymentConfigList = map[string]string{ - "": "DeploymentConfigList is a collection of deployment configs.", - "items": "Items is a list of deployment configs", + "": "DeploymentConfigList is a collection of deployment configs.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of deployment configs", } func (DeploymentConfigList) SwaggerDoc() map[string]string { @@ -223,7 +225,7 @@ var map_ExecNewPodHook = map[string]string{ "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", "command": "Command is the action command and its arguments.", "env": "Env is a set of environment variables to supply to the hook pod's container.", - "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", + "containerName": "ContainerName is the name of a container in the deployment pod template whose Docker image will be used for the hook pod's container.", "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", } @@ -255,7 +257,7 @@ func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { } var map_RollingDeploymentStrategyParams = map[string]string{ - "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", + "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go index f6ab2fd48..383c2f516 100644 --- a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go @@ -1,13 +1,15 @@ // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1 import ( - corev1 "k8s.io/api/core/v1" + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" + unsafe "unsafe" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -15,7 +17,7 @@ func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStra *out = *in if in.Environment != nil { in, out := &in.Environment, &out.Environment - *out = make([]corev1.EnvVar, len(*in)) + *out = make([]core_v1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -43,8 +45,12 @@ func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) { *out = *in if in.ImageTrigger != nil { in, out := &in.ImageTrigger, &out.ImageTrigger - *out = new(DeploymentCauseImageTrigger) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(DeploymentCauseImageTrigger) + **out = **in + } } return } @@ -94,6 +100,26 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConditionType) DeepCopyInto(out *DeploymentConditionType) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConditionType. +func (in *DeploymentConditionType) DeepCopy() *DeploymentConditionType { + if in == nil { + return nil + } + out := new(DeploymentConditionType) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) { *out = *in @@ -118,15 +144,16 @@ func (in *DeploymentConfig) DeepCopy() *DeploymentConfig { func (in *DeploymentConfig) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DeploymentConfig, len(*in)) @@ -151,8 +178,9 @@ func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList { func (in *DeploymentConfigList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -184,8 +212,9 @@ func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback { func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -218,8 +247,12 @@ func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { } if in.RevisionHistoryLimit != nil { in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } } if in.Selector != nil { in, out := &in.Selector, &out.Selector @@ -230,8 +263,12 @@ func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { } if in.Template != nil { in, out := &in.Template, &out.Template - *out = new(corev1.PodTemplateSpec) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(core_v1.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } } return } @@ -251,8 +288,12 @@ func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) { *out = *in if in.Details != nil { in, out := &in.Details, &out.Details - *out = new(DeploymentDetails) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(DeploymentDetails) + (*in).DeepCopyInto(*out) + } } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions @@ -318,8 +359,9 @@ func (in *DeploymentLog) DeepCopy() *DeploymentLog { func (in *DeploymentLog) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -328,27 +370,48 @@ func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) { out.TypeMeta = in.TypeMeta if in.SinceSeconds != nil { in, out := &in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } if in.SinceTime != nil { in, out := &in.SinceTime, &out.SinceTime - *out = (*in).DeepCopy() + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.Time) + (*in).DeepCopyInto(*out) + } } if in.TailLines != nil { in, out := &in.TailLines, &out.TailLines - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } if in.LimitBytes != nil { in, out := &in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } if in.Version != nil { in, out := &in.Version, &out.Version - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } return } @@ -367,8 +430,9 @@ func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions { func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -397,8 +461,9 @@ func (in *DeploymentRequest) DeepCopy() *DeploymentRequest { func (in *DeploymentRequest) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -406,18 +471,30 @@ func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { *out = *in if in.CustomParams != nil { in, out := &in.CustomParams, &out.CustomParams - *out = new(CustomDeploymentStrategyParams) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(CustomDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } } if in.RecreateParams != nil { in, out := &in.RecreateParams, &out.RecreateParams - *out = new(RecreateDeploymentStrategyParams) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(RecreateDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } } if in.RollingParams != nil { in, out := &in.RollingParams, &out.RollingParams - *out = new(RollingDeploymentStrategyParams) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(RollingDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } } in.Resources.DeepCopyInto(&out.Resources) if in.Labels != nil { @@ -436,8 +513,12 @@ func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } return } @@ -452,6 +533,26 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategyType) DeepCopyInto(out *DeploymentStrategyType) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategyType. +func (in *DeploymentStrategyType) DeepCopy() *DeploymentStrategyType { + if in == nil { + return nil + } + out := new(DeploymentStrategyType) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) { *out = *in @@ -475,25 +576,26 @@ func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImage } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { +func (in *DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { { - in := &in - *out = make(DeploymentTriggerPolicies, len(*in)) + in := (*[]DeploymentTriggerPolicy)(unsafe.Pointer(in)) + out := (*[]DeploymentTriggerPolicy)(unsafe.Pointer(out)) + *out = make([]DeploymentTriggerPolicy, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } - return } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies. -func (in DeploymentTriggerPolicies) DeepCopy() DeploymentTriggerPolicies { +func (in *DeploymentTriggerPolicies) DeepCopy() *DeploymentTriggerPolicies { if in == nil { return nil } out := new(DeploymentTriggerPolicies) in.DeepCopyInto(out) - return *out + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -501,8 +603,12 @@ func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) { *out = *in if in.ImageChangeParams != nil { in, out := &in.ImageChangeParams, &out.ImageChangeParams - *out = new(DeploymentTriggerImageChangeParams) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(DeploymentTriggerImageChangeParams) + (*in).DeepCopyInto(*out) + } } return } @@ -517,6 +623,26 @@ func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerType) DeepCopyInto(out *DeploymentTriggerType) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerType. +func (in *DeploymentTriggerType) DeepCopy() *DeploymentTriggerType { + if in == nil { + return nil + } + out := new(DeploymentTriggerType) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { *out = *in @@ -527,7 +653,7 @@ func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]corev1.EnvVar, len(*in)) + *out = make([]core_v1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -555,8 +681,12 @@ func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { *out = *in if in.ExecNewPod != nil { in, out := &in.ExecNewPod, &out.ExecNewPod - *out = new(ExecNewPodHook) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(ExecNewPodHook) + (*in).DeepCopyInto(*out) + } } if in.TagImages != nil { in, out := &in.TagImages, &out.TagImages @@ -576,28 +706,64 @@ func (in *LifecycleHook) DeepCopy() *LifecycleHook { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHookFailurePolicy) DeepCopyInto(out *LifecycleHookFailurePolicy) { + { + in := (*string)(unsafe.Pointer(in)) + out := (*string)(unsafe.Pointer(out)) + *out = *in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHookFailurePolicy. +func (in *LifecycleHookFailurePolicy) DeepCopy() *LifecycleHookFailurePolicy { + if in == nil { + return nil + } + out := new(LifecycleHookFailurePolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) { *out = *in if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } if in.Pre != nil { in, out := &in.Pre, &out.Pre - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } } if in.Mid != nil { in, out := &in.Mid, &out.Mid - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } } if in.Post != nil { in, out := &in.Post, &out.Post - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } } return } @@ -617,38 +783,66 @@ func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentSt *out = *in if in.UpdatePeriodSeconds != nil { in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } if in.IntervalSeconds != nil { in, out := &in.IntervalSeconds, &out.IntervalSeconds - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int64) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } } if in.MaxUnavailable != nil { in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } } if in.MaxSurge != nil { in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } } if in.Pre != nil { in, out := &in.Pre, &out.Pre - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } } if in.Post != nil { in, out := &in.Post, &out.Post - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) + if *in == nil { + *out = nil + } else { + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } } return } diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go index 35cbc2284..07836449a 100644 --- a/vendor/github.com/openshift/api/project/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go @@ -1,22 +1,32 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: github.com/openshift/api/project/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + github.com/openshift/api/project/v1/generated.proto + + It has these top-level messages: + Project + ProjectList + ProjectRequest + ProjectSpec + ProjectStatus +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - proto "github.com/gogo/protobuf/proto" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -27,147 +37,27 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Project) Reset() { *m = Project{} } -func (*Project) ProtoMessage() {} -func (*Project) Descriptor() ([]byte, []int) { - return fileDescriptor_fbf46eaac05029bf, []int{0} -} -func (m *Project) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Project) XXX_Merge(src proto.Message) { - xxx_messageInfo_Project.Merge(m, src) -} -func (m *Project) XXX_Size() int { - return m.Size() -} -func (m *Project) XXX_DiscardUnknown() { - xxx_messageInfo_Project.DiscardUnknown(m) -} +func (m *Project) Reset() { *m = Project{} } +func (*Project) ProtoMessage() {} +func (*Project) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_Project proto.InternalMessageInfo +func (m *ProjectList) Reset() { *m = ProjectList{} } +func (*ProjectList) ProtoMessage() {} +func (*ProjectList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *ProjectList) Reset() { *m = ProjectList{} } -func (*ProjectList) ProtoMessage() {} -func (*ProjectList) Descriptor() ([]byte, []int) { - return fileDescriptor_fbf46eaac05029bf, []int{1} -} -func (m *ProjectList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ProjectList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectList.Merge(m, src) -} -func (m *ProjectList) XXX_Size() int { - return m.Size() -} -func (m *ProjectList) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectList.DiscardUnknown(m) -} +func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } +func (*ProjectRequest) ProtoMessage() {} +func (*ProjectRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_ProjectList proto.InternalMessageInfo +func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } +func (*ProjectSpec) ProtoMessage() {} +func (*ProjectSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } -func (*ProjectRequest) ProtoMessage() {} -func (*ProjectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fbf46eaac05029bf, []int{2} -} -func (m *ProjectRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ProjectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectRequest.Merge(m, src) -} -func (m *ProjectRequest) XXX_Size() int { - return m.Size() -} -func (m *ProjectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectRequest proto.InternalMessageInfo - -func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } -func (*ProjectSpec) ProtoMessage() {} -func (*ProjectSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_fbf46eaac05029bf, []int{3} -} -func (m *ProjectSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ProjectSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectSpec.Merge(m, src) -} -func (m *ProjectSpec) XXX_Size() int { - return m.Size() -} -func (m *ProjectSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo - -func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } -func (*ProjectStatus) ProtoMessage() {} -func (*ProjectStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_fbf46eaac05029bf, []int{4} -} -func (m *ProjectStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ProjectStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectStatus.Merge(m, src) -} -func (m *ProjectStatus) XXX_Size() int { - return m.Size() -} -func (m *ProjectStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo +func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } +func (*ProjectStatus) ProtoMessage() {} +func (*ProjectStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func init() { proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project") @@ -176,55 +66,10 @@ func init() { proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec") proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus") } - -func init() { - proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptor_fbf46eaac05029bf) -} - -var fileDescriptor_fbf46eaac05029bf = []byte{ - // 570 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x3d, 0x8f, 0xd3, 0x30, - 0x18, 0xc7, 0x9b, 0xf6, 0x7a, 0x5c, 0x5d, 0xee, 0x84, 0xc2, 0x52, 0x75, 0x48, 0x4b, 0x90, 0x50, - 0x07, 0x70, 0x68, 0x79, 0x11, 0x73, 0x40, 0x08, 0x24, 0x5e, 0x0e, 0xb3, 0x55, 0x0c, 0xb8, 0xa9, - 0x9b, 0x9a, 0x5e, 0x62, 0x13, 0xbb, 0x95, 0x8e, 0x89, 0x8f, 0xc0, 0xce, 0xe7, 0x60, 0x65, 0xee, - 0x78, 0xe3, 0x4d, 0xd5, 0x35, 0x7c, 0x8b, 0x9b, 0x90, 0x1d, 0x37, 0x09, 0x5c, 0x91, 0xee, 0x16, - 0xb6, 0xfa, 0xc9, 0xff, 0xf7, 0xb3, 0xfd, 0x3c, 0x2e, 0x78, 0x10, 0x52, 0x39, 0x9d, 0x8f, 0x60, - 0xc0, 0x22, 0x8f, 0x71, 0x12, 0x8b, 0x29, 0x9d, 0x48, 0x0f, 0x73, 0xea, 0xf1, 0x84, 0x7d, 0x22, - 0x81, 0xf4, 0x16, 0x7d, 0x2f, 0x24, 0x31, 0x49, 0xb0, 0x24, 0x63, 0xc8, 0x13, 0x26, 0x99, 0x7d, - 0xbb, 0x80, 0x60, 0x0e, 0x41, 0xcc, 0x29, 0x34, 0x10, 0x5c, 0xf4, 0xdb, 0xf7, 0x4a, 0xe6, 0x90, - 0x85, 0xcc, 0xd3, 0xec, 0x68, 0x3e, 0xd1, 0x2b, 0xbd, 0xd0, 0xbf, 0x32, 0x67, 0xdb, 0x9d, 0x3d, - 0x11, 0x90, 0x32, 0xbd, 0x75, 0xc0, 0x12, 0xb2, 0x65, 0xdf, 0xf6, 0xc3, 0x22, 0x13, 0xe1, 0x60, - 0x4a, 0x63, 0x92, 0x1c, 0x7b, 0x7c, 0x16, 0xaa, 0x82, 0xf0, 0x22, 0x22, 0xf1, 0x36, 0xea, 0xf1, - 0xbf, 0xa8, 0x64, 0x1e, 0x4b, 0x1a, 0x11, 0x4f, 0x04, 0x53, 0x12, 0xe1, 0xbf, 0x39, 0xf7, 0x7b, - 0x15, 0x5c, 0x3b, 0xcc, 0xee, 0x63, 0x7f, 0x04, 0x7b, 0x4a, 0x3f, 0xc6, 0x12, 0xb7, 0xac, 0xae, - 0xd5, 0x6b, 0x0e, 0xee, 0xc3, 0x4c, 0x0b, 0xcb, 0x5a, 0xc8, 0x67, 0xa1, 0x2a, 0x08, 0xa8, 0xd2, - 0x70, 0xd1, 0x87, 0x6f, 0x47, 0x8a, 0x7f, 0x4d, 0x24, 0xf6, 0xed, 0xe5, 0xaa, 0x53, 0x49, 0x57, - 0x1d, 0x50, 0xd4, 0x50, 0x6e, 0xb5, 0x11, 0xd8, 0x11, 0x9c, 0x04, 0xad, 0xaa, 0xb1, 0x5f, 0xa2, - 0xc5, 0xd0, 0x9c, 0xee, 0x3d, 0x27, 0x81, 0x7f, 0xdd, 0xd8, 0x77, 0xd4, 0x0a, 0x69, 0x97, 0x3d, - 0x04, 0xbb, 0x42, 0x62, 0x39, 0x17, 0xad, 0x9a, 0xb6, 0x0e, 0xae, 0x64, 0xd5, 0xa4, 0x7f, 0x60, - 0xbc, 0xbb, 0xd9, 0x1a, 0x19, 0xa3, 0xfb, 0xd3, 0x02, 0x4d, 0x93, 0x7c, 0x45, 0x85, 0xb4, 0x3f, - 0x5c, 0xe8, 0x10, 0xbc, 0x5c, 0x87, 0x14, 0xad, 0xfb, 0x73, 0xc3, 0xec, 0xb4, 0xb7, 0xa9, 0x94, - 0xba, 0xf3, 0x0e, 0xd4, 0xa9, 0x24, 0x91, 0x68, 0x55, 0xbb, 0xb5, 0x5e, 0x73, 0x70, 0xf7, 0x2a, - 0x17, 0xf1, 0xf7, 0x8d, 0xb8, 0xfe, 0x52, 0x29, 0x50, 0x66, 0x72, 0xcf, 0x2c, 0x70, 0x60, 0x12, - 0x88, 0x7c, 0x9e, 0x13, 0xf1, 0x3f, 0xa6, 0xfc, 0x08, 0x34, 0xc7, 0x54, 0xf0, 0x23, 0x7c, 0xfc, - 0x06, 0x47, 0x44, 0x0f, 0xbb, 0xe1, 0xdf, 0x34, 0x48, 0xf3, 0x59, 0xf1, 0x09, 0x95, 0x73, 0x1a, - 0x23, 0x22, 0x48, 0x28, 0x97, 0x94, 0xc5, 0x7a, 0x9a, 0x65, 0xac, 0xf8, 0x84, 0xca, 0x39, 0x17, - 0xe7, 0x23, 0x52, 0x8f, 0xc2, 0x46, 0x00, 0x4c, 0x68, 0x8c, 0x8f, 0xe8, 0x17, 0x92, 0x88, 0x96, - 0xd5, 0xad, 0xf5, 0x1a, 0xfe, 0x40, 0x1d, 0xf5, 0x79, 0x5e, 0x3d, 0x5f, 0x75, 0xba, 0x17, 0xff, - 0x88, 0x30, 0x0f, 0xe8, 0xa3, 0x95, 0x2c, 0xee, 0x0f, 0x0b, 0xec, 0xff, 0xf1, 0x60, 0xec, 0x17, - 0xa0, 0xce, 0xa7, 0x58, 0x10, 0xdd, 0xc1, 0x86, 0x3f, 0xd8, 0x34, 0xff, 0x50, 0x15, 0xcf, 0x57, - 0x9d, 0x5b, 0x5b, 0xfc, 0x4a, 0x2b, 0x38, 0x0e, 0x88, 0x0e, 0xa1, 0x4c, 0x60, 0x0f, 0x01, 0x08, - 0x58, 0x3c, 0xa6, 0xea, 0x2e, 0x9b, 0xc9, 0xdf, 0x29, 0x0d, 0x04, 0x2a, 0x1c, 0x96, 0xf1, 0xa7, - 0x9b, 0x78, 0x31, 0x86, 0xbc, 0x24, 0x50, 0xc9, 0xe6, 0xf7, 0x96, 0x6b, 0xa7, 0x72, 0xb2, 0x76, - 0x2a, 0xa7, 0x6b, 0xa7, 0xf2, 0x35, 0x75, 0xac, 0x65, 0xea, 0x58, 0x27, 0xa9, 0x63, 0x9d, 0xa6, - 0x8e, 0x75, 0x96, 0x3a, 0xd6, 0xb7, 0x5f, 0x4e, 0x65, 0x58, 0x5d, 0xf4, 0x7f, 0x07, 0x00, 0x00, - 0xff, 0xff, 0x0a, 0xd0, 0xf2, 0xe0, 0x22, 0x05, 0x00, 0x00, -} - func (m *Project) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -232,52 +77,41 @@ func (m *Project) Marshal() (dAtA []byte, err error) { } func (m *Project) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *ProjectList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -285,46 +119,37 @@ func (m *ProjectList) Marshal() (dAtA []byte, err error) { } func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -332,42 +157,33 @@ func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { } func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x1a - i -= len(m.DisplayName) - copy(dAtA[i:], m.DisplayName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i += copy(dAtA[i:], m.DisplayName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + return i, nil } func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -375,31 +191,32 @@ func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { } func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Finalizers) > 0 { - for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Finalizers[iNdEx]) - copy(dAtA[i:], m.Finalizers[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) - i-- + for _, s := range m.Finalizers { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -407,52 +224,45 @@ func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { } func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Project) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -465,9 +275,6 @@ func (m *Project) Size() (n int) { } func (m *ProjectList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -482,9 +289,6 @@ func (m *ProjectList) Size() (n int) { } func (m *ProjectRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -497,9 +301,6 @@ func (m *ProjectRequest) Size() (n int) { } func (m *ProjectSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Finalizers) > 0 { @@ -512,24 +313,22 @@ func (m *ProjectSpec) Size() (n int) { } func (m *ProjectStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Phase) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -539,7 +338,7 @@ func (this *Project) String() string { return "nil" } s := strings.Join([]string{`&Project{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -550,14 +349,9 @@ func (this *ProjectList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Project{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ProjectList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Project", "Project", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -567,7 +361,7 @@ func (this *ProjectRequest) String() string { return "nil" } s := strings.Join([]string{`&ProjectRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, `}`, @@ -588,14 +382,8 @@ func (this *ProjectStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]NamespaceCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += fmt.Sprintf("%v", f) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&ProjectStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -623,7 +411,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -651,7 +439,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -660,9 +448,6 @@ func (m *Project) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -684,7 +469,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -693,9 +478,6 @@ func (m *Project) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -717,7 +499,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -726,9 +508,6 @@ func (m *Project) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -745,9 +524,6 @@ func (m *Project) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -775,7 +551,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -803,7 +579,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -812,9 +588,6 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -836,7 +609,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -845,9 +618,6 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -865,9 +635,6 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -895,7 +662,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -923,7 +690,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -932,9 +699,6 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -956,7 +720,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -966,9 +730,6 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -988,7 +749,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -998,9 +759,6 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1015,9 +773,6 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1045,7 +800,7 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1073,7 +828,7 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1083,9 +838,6 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1100,9 +852,6 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1130,7 +879,7 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1158,7 +907,7 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1168,48 +917,11 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, v11.NamespaceCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1219,9 +931,6 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1237,7 +946,6 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1269,8 +977,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1287,34 +997,96 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 558 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x3f, 0x6f, 0xd3, 0x4e, + 0x18, 0xc7, 0xe3, 0xa4, 0xe9, 0xaf, 0xb9, 0xfc, 0x5a, 0x21, 0xb3, 0x44, 0x19, 0x9c, 0x60, 0x96, + 0x0c, 0x70, 0x26, 0x29, 0x20, 0x66, 0x0b, 0x21, 0x90, 0xf8, 0x53, 0xcc, 0x44, 0xc5, 0xc0, 0xc5, + 0x79, 0xe2, 0x1c, 0x89, 0xed, 0xc3, 0x77, 0x8e, 0x54, 0x26, 0x5e, 0x02, 0x3b, 0xef, 0x85, 0x39, + 0x63, 0xc7, 0x4e, 0x51, 0x63, 0xde, 0x45, 0x27, 0x74, 0xe7, 0x6b, 0x6c, 0x48, 0x2a, 0xda, 0x85, + 0x2d, 0xcf, 0xe3, 0xef, 0xe7, 0x73, 0xe7, 0xe7, 0x71, 0xd0, 0x61, 0x40, 0xc5, 0x24, 0x1d, 0x62, + 0x3f, 0x0e, 0x9d, 0x98, 0x41, 0xc4, 0x27, 0x74, 0x2c, 0x1c, 0xc2, 0xa8, 0xc3, 0x92, 0xf8, 0x13, + 0xf8, 0xc2, 0x99, 0xf7, 0x9d, 0x00, 0x22, 0x48, 0x88, 0x80, 0x11, 0x66, 0x49, 0x2c, 0x62, 0xf3, + 0x6e, 0x01, 0xe1, 0x35, 0x84, 0x09, 0xa3, 0x58, 0x43, 0x78, 0xde, 0x6f, 0xdf, 0x2f, 0x99, 0x83, + 0x38, 0x88, 0x1d, 0xc5, 0x0e, 0xd3, 0xb1, 0xaa, 0x54, 0xa1, 0x7e, 0xe5, 0xce, 0xb6, 0x3d, 0x7d, + 0xc2, 0x31, 0x8d, 0xd5, 0xd1, 0x7e, 0x9c, 0xc0, 0x96, 0x73, 0xdb, 0x0f, 0x8b, 0x4c, 0x48, 0xfc, + 0x09, 0x8d, 0x20, 0x39, 0x71, 0xd8, 0x34, 0x90, 0x0d, 0xee, 0x84, 0x20, 0xc8, 0x36, 0xca, 0xb9, + 0x8a, 0x4a, 0xd2, 0x48, 0xd0, 0x10, 0x36, 0x80, 0xc7, 0x7f, 0x03, 0xb8, 0x3f, 0x81, 0x90, 0x6c, + 0x70, 0x87, 0x57, 0x71, 0xa9, 0xa0, 0x33, 0x87, 0x46, 0x82, 0x8b, 0xe4, 0x4f, 0xc8, 0xfe, 0x5e, + 0x45, 0xff, 0x1d, 0xe5, 0x53, 0x33, 0x3f, 0xa2, 0x3d, 0xf9, 0x12, 0x23, 0x22, 0x48, 0xcb, 0xe8, + 0x1a, 0xbd, 0xe6, 0xe0, 0x01, 0xce, 0x9d, 0xb8, 0xec, 0xc4, 0x6c, 0x1a, 0xc8, 0x06, 0xc7, 0x32, + 0x8d, 0xe7, 0x7d, 0xfc, 0x66, 0x28, 0xf9, 0x57, 0x20, 0x88, 0x6b, 0x2e, 0x96, 0x9d, 0x4a, 0xb6, + 0xec, 0xa0, 0xa2, 0xe7, 0xad, 0xad, 0xa6, 0x87, 0x76, 0x38, 0x03, 0xbf, 0x55, 0xd5, 0xf6, 0x6b, + 0x2c, 0x12, 0xeb, 0xdb, 0xbd, 0x63, 0xe0, 0xbb, 0xff, 0x6b, 0xfb, 0x8e, 0xac, 0x3c, 0xe5, 0x32, + 0x8f, 0xd1, 0x2e, 0x17, 0x44, 0xa4, 0xbc, 0x55, 0x53, 0xd6, 0xc1, 0x8d, 0xac, 0x8a, 0x74, 0x0f, + 0xb4, 0x77, 0x37, 0xaf, 0x3d, 0x6d, 0xb4, 0x7f, 0x18, 0xa8, 0xa9, 0x93, 0x2f, 0x29, 0x17, 0xe6, + 0x87, 0x8d, 0x09, 0xe1, 0xeb, 0x4d, 0x48, 0xd2, 0x6a, 0x3e, 0xb7, 0xf4, 0x49, 0x7b, 0x97, 0x9d, + 0xd2, 0x74, 0xde, 0xa2, 0x3a, 0x15, 0x10, 0xf2, 0x56, 0xb5, 0x5b, 0xeb, 0x35, 0x07, 0xf7, 0x6e, + 0xf2, 0x22, 0xee, 0xbe, 0x16, 0xd7, 0x5f, 0x48, 0x85, 0x97, 0x9b, 0xec, 0x73, 0x03, 0x1d, 0xe8, + 0x84, 0x07, 0x9f, 0x53, 0xe0, 0xff, 0x62, 0xcb, 0x8f, 0x50, 0x73, 0x44, 0x39, 0x9b, 0x91, 0x93, + 0xd7, 0x24, 0x04, 0xb5, 0xec, 0x86, 0x7b, 0x5b, 0x23, 0xcd, 0xa7, 0xc5, 0x23, 0xaf, 0x9c, 0x53, + 0x18, 0x70, 0x3f, 0xa1, 0x4c, 0xd0, 0x38, 0x52, 0xdb, 0x2c, 0x63, 0xc5, 0x23, 0xaf, 0x9c, 0xb3, + 0xc9, 0x7a, 0x45, 0xf2, 0xa3, 0x30, 0x3d, 0x84, 0xc6, 0x34, 0x22, 0x33, 0xfa, 0x05, 0x12, 0xde, + 0x32, 0xba, 0xb5, 0x5e, 0xc3, 0x1d, 0xc8, 0xab, 0x3e, 0x5b, 0x77, 0x2f, 0x96, 0x9d, 0xee, 0xe6, + 0xdf, 0x1d, 0xaf, 0x03, 0xea, 0x6a, 0x25, 0x8b, 0xfd, 0x1e, 0xed, 0xff, 0xf6, 0xbd, 0x98, 0xcf, + 0x51, 0x9d, 0x4d, 0x08, 0x07, 0x35, 0xc0, 0x86, 0x3b, 0xb8, 0x9c, 0xfd, 0x91, 0x6c, 0x5e, 0x2c, + 0x3b, 0x77, 0xb6, 0xe8, 0xa5, 0x95, 0x33, 0xe2, 0x83, 0x0a, 0x79, 0xb9, 0xc0, 0xed, 0x2d, 0x56, + 0x56, 0xe5, 0x74, 0x65, 0x55, 0xce, 0x56, 0x56, 0xe5, 0x6b, 0x66, 0x19, 0x8b, 0xcc, 0x32, 0x4e, + 0x33, 0xcb, 0x38, 0xcb, 0x2c, 0xe3, 0x3c, 0xb3, 0x8c, 0x6f, 0x3f, 0xad, 0xca, 0x71, 0x75, 0xde, + 0xff, 0x15, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x82, 0x78, 0x13, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto index 2baaf6a03..f42956be3 100644 --- a/vendor/github.com/openshift/api/project/v1/generated.proto +++ b/vendor/github.com/openshift/api/project/v1/generated.proto @@ -7,7 +7,9 @@ package github.com.openshift.api.project.v1; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -18,25 +20,26 @@ option go_package = "v1"; // membership, editors can create and manage the resources, and viewers can see but not access running // containers. In a normal cluster project administrators are not able to alter their quotas - that is // restricted to cluster administrators. -// +// // Listing or watching projects will return only projects the user has the reader role on. -// +// // An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed // as editable to end users while namespaces are not. Direct creation of a project is typically restricted // to administrators, while end users should use the requestproject resource. message Project { + // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. optional ProjectSpec spec = 2; // Status describes the current status of a Namespace - // +optional optional ProjectStatus status = 3; } // ProjectList is a list of Project objects. message ProjectList { + // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of projects @@ -45,6 +48,7 @@ message ProjectList { // ProjecRequest is the set of options necessary to fully qualify a project request message ProjectRequest { + // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DisplayName is the display name to apply to a project @@ -63,13 +67,6 @@ message ProjectSpec { // ProjectStatus is information about the current status of a Project message ProjectStatus { // Phase is the current lifecycle phase of the project - // +optional optional string phase = 1; - - // Represents the latest available observations of the project current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated k8s.io.api.core.v1.NamespaceCondition conditions = 2; } diff --git a/vendor/github.com/openshift/api/project/v1/legacy.go b/vendor/github.com/openshift/api/project/v1/legacy.go deleted file mode 100644 index 186f905f3..000000000 --- a/vendor/github.com/openshift/api/project/v1/legacy.go +++ /dev/null @@ -1,23 +0,0 @@ -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} - legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) - DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme -) - -func addLegacyKnownTypes(scheme *runtime.Scheme) error { - types := []runtime.Object{ - &Project{}, - &ProjectList{}, - &ProjectRequest{}, - } - scheme.AddKnownTypes(legacyGroupVersion, types...) - return nil -} diff --git a/vendor/github.com/openshift/api/project/v1/register.go b/vendor/github.com/openshift/api/project/v1/register.go index e471716ce..07f866897 100644 --- a/vendor/github.com/openshift/api/project/v1/register.go +++ b/vendor/github.com/openshift/api/project/v1/register.go @@ -1,40 +1,49 @@ package v1 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -var ( - GroupName = "project.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // SchemeGroupVersion generated code relies on this name - // Deprecated - SchemeGroupVersion = GroupVersion - // AddToScheme exists solely to keep the old generators creating valid code - // DEPRECATED - AddToScheme = schemeBuilder.AddToScheme +const ( + GroupName = "project.openshift.io" + LegacyGroupName = "" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme ) -// Resource generated code relies on this being here, but it logically belongs to the group -// DEPRECATED func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} + return SchemeGroupVersion.WithResource(resource).GroupResource() } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(GroupVersion, + scheme.AddKnownTypes(SchemeGroupVersion, &Project{}, &ProjectList{}, &ProjectRequest{}, ) - metav1.AddToGroupVersion(scheme, GroupVersion) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + } + scheme.AddKnownTypes(LegacySchemeGroupVersion, types...) return nil } diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go index dea150f12..c4ed0555f 100644 --- a/vendor/github.com/openshift/api/project/v1/types.go +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -10,8 +10,8 @@ import ( // ProjectList is a list of Project objects. type ProjectList struct { metav1.TypeMeta `json:",inline"` + // Standard object's metadata. metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Items is the list of projects Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -19,13 +19,6 @@ type ProjectList struct { const ( // These are internal finalizer values to Origin FinalizerOrigin corev1.FinalizerName = "openshift.io/origin" - // ProjectNodeSelector is an annotation that holds the node selector; - // the node selector annotation determines which nodes will have pods from this project scheduled to them - ProjectNodeSelector = "openshift.io/node-selector" - - // ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present, - // but it is set by the default project template. - ProjectRequesterAnnotation = "openshift.io/requester" ) // ProjectSpec describes the attributes on a Project @@ -37,14 +30,7 @@ type ProjectSpec struct { // ProjectStatus is information about the current status of a Project type ProjectStatus struct { // Phase is the current lifecycle phase of the project - // +optional Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` - - // Represents the latest available observations of the project current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // +genclient @@ -64,14 +50,14 @@ type ProjectStatus struct { // as editable to end users while namespaces are not. Direct creation of a project is typically restricted // to administrators, while end users should use the requestproject resource. type Project struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace - // +optional Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -83,9 +69,9 @@ type Project struct { // ProjecRequest is the set of options necessary to fully qualify a project request type ProjectRequest struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // DisplayName is the display name to apply to a project DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` // Description is the description to apply to a project diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go similarity index 51% rename from vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go rename to vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go index 080f2677a..16c9a3e9f 100644 --- a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go @@ -8,13 +8,14 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-swagger-docs.sh +// Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_Project = map[string]string{ - "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.", - "spec": "Spec defines the behavior of the Namespace.", - "status": "Status describes the current status of a Namespace", + "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.", + "metadata": "Standard object's metadata.", + "spec": "Spec defines the behavior of the Namespace.", + "status": "Status describes the current status of a Namespace", } func (Project) SwaggerDoc() map[string]string { @@ -22,8 +23,9 @@ func (Project) SwaggerDoc() map[string]string { } var map_ProjectList = map[string]string{ - "": "ProjectList is a list of Project objects.", - "items": "Items is the list of projects", + "": "ProjectList is a list of Project objects.", + "metadata": "Standard object's metadata.", + "items": "Items is the list of projects", } func (ProjectList) SwaggerDoc() map[string]string { @@ -32,6 +34,7 @@ func (ProjectList) SwaggerDoc() map[string]string { var map_ProjectRequest = map[string]string{ "": "ProjecRequest is the set of options necessary to fully qualify a project request", + "metadata": "Standard object's metadata.", "displayName": "DisplayName is the display name to apply to a project", "description": "Description is the description to apply to a project", } @@ -50,9 +53,8 @@ func (ProjectSpec) SwaggerDoc() map[string]string { } var map_ProjectStatus = map[string]string{ - "": "ProjectStatus is information about the current status of a Project", - "phase": "Phase is the current lifecycle phase of the project", - "conditions": "Represents the latest available observations of the project current state.", + "": "ProjectStatus is information about the current status of a Project", + "phase": "Phase is the current lifecycle phase of the project", } func (ProjectStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go index 763383030..9c2ef00ba 100644 --- a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go @@ -1,11 +1,11 @@ // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// This file was autogenerated by deepcopy-gen. Do not edit it manually! package v1 import ( - corev1 "k8s.io/api/core/v1" + core_v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -15,7 +15,7 @@ func (in *Project) DeepCopyInto(out *Project) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + out.Status = in.Status return } @@ -33,15 +33,16 @@ func (in *Project) DeepCopy() *Project { func (in *Project) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectList) DeepCopyInto(out *ProjectList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Project, len(*in)) @@ -66,8 +67,9 @@ func (in *ProjectList) DeepCopy() *ProjectList { func (in *ProjectList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -92,8 +94,9 @@ func (in *ProjectRequest) DeepCopy() *ProjectRequest { func (in *ProjectRequest) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c + } else { + return nil } - return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -101,7 +104,7 @@ func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { *out = *in if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers - *out = make([]corev1.FinalizerName, len(*in)) + *out = make([]core_v1.FinalizerName, len(*in)) copy(*out, *in) } return @@ -120,13 +123,6 @@ func (in *ProjectSpec) DeepCopy() *ProjectSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]corev1.NamespaceCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } diff --git a/vendor/modules.txt b/vendor/modules.txt index a511e769b..cf05e33e0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -236,7 +236,7 @@ github.com/gobwas/glob/util/strings # github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 => github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 github.com/gocraft/dbr github.com/gocraft/dbr/dialect -# github.com/gogo/protobuf v1.3.1 => github.com/gogo/protobuf v1.3.0 +# github.com/gogo/protobuf v1.3.0 => github.com/gogo/protobuf v1.3.0 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/jsonpb github.com/gogo/protobuf/proto @@ -487,7 +487,7 @@ github.com/opencontainers/go-digest # github.com/opencontainers/image-spec v1.0.1 => github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/openshift/api v0.0.0-20200331152225-585af27e34fd => github.com/openshift/api v0.0.0-20200331152225-585af27e34fd +# github.com/openshift/api v0.0.0-20180801171038-322a19404e37 => github.com/openshift/api v0.0.0-20180801171038-322a19404e37 github.com/openshift/api/apps/v1 github.com/openshift/api/project/v1 # github.com/pborman/uuid v1.2.0 => github.com/pborman/uuid v1.2.0 @@ -625,7 +625,7 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/knownhosts golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 => golang.org/x/net v0.0.0-20190620200207-3b0461eec859 +# golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 => golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/html @@ -674,7 +674,7 @@ golang.org/x/text/unicode/norm golang.org/x/text/width # golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 => golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868 => golang.org/x/tools v0.0.0-20190710153321-831012c29e42 +# golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 => golang.org/x/tools v0.0.0-20190710153321-831012c29e42 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/gcimporter @@ -860,7 +860,7 @@ istio.io/client-go/pkg/listers/security/v1beta1 # istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a => istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a istio.io/gogo-genproto/googleapis/google/api istio.io/gogo-genproto/googleapis/google/rpc -# k8s.io/api v0.18.0 => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a +# k8s.io/api v0.0.0-20191114100352-16d7abae0d2a => k8s.io/api v0.0.0-20191114100352-16d7abae0d2a k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 k8s.io/api/admissionregistration/v1 @@ -909,7 +909,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.18.0 => k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb +# k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb => k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -1280,7 +1280,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.18.0 => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 +# k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 k8s.io/code-generator/cmd/client-gen k8s.io/code-generator/cmd/client-gen/args k8s.io/code-generator/cmd/client-gen/generators @@ -1336,9 +1336,6 @@ k8s.io/utils/path k8s.io/utils/pointer k8s.io/utils/trace # openpitrix.io/openpitrix v0.4.1-0.20190920134345-4d2be6e4965c => openpitrix.io/openpitrix v0.4.1-0.20190920134345-4d2be6e4965c -openpitrix.io/openpitrix/pkg/client -openpitrix.io/openpitrix/pkg/client/access -openpitrix.io/openpitrix/pkg/client/account openpitrix.io/openpitrix/pkg/config openpitrix.io/openpitrix/pkg/constants openpitrix.io/openpitrix/pkg/db diff --git a/vendor/openpitrix.io/openpitrix/pkg/client/access/client.go b/vendor/openpitrix.io/openpitrix/pkg/client/access/client.go deleted file mode 100644 index aa69fed00..000000000 --- a/vendor/openpitrix.io/openpitrix/pkg/client/access/client.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The OpenPitrix Authors. All rights reserved. -// Use of this source code is governed by a Apache license -// that can be found in the LICENSE file. - -package access - -import ( - "context" - - accountclient "openpitrix.io/openpitrix/pkg/client/account" - "openpitrix.io/openpitrix/pkg/constants" - "openpitrix.io/openpitrix/pkg/logger" - "openpitrix.io/openpitrix/pkg/manager" - "openpitrix.io/openpitrix/pkg/pb" -) - -type Client struct { - pb.AccessManagerClient -} - -func NewClient() (*Client, error) { - conn, err := manager.NewClient(constants.AccountServiceHost, constants.AccountServicePort) - if err != nil { - return nil, err - } - return &Client{ - AccessManagerClient: pb.NewAccessManagerClient(conn), - }, nil -} - -func (c *Client) CheckActionBundleUser(ctx context.Context, actionBundleIds []string, userId string) bool { - users, err := c.GetActionBundleUsers(ctx, actionBundleIds) - if err != nil { - return false - } - for _, user := range users { - if user.GetUserId().GetValue() == userId { - return true - } - } - return false -} - -func (c *Client) GetActionBundleRoles(ctx context.Context, actionBundleIds []string) ([]*pb.Role, error) { - response, err := c.DescribeRoles(ctx, &pb.DescribeRolesRequest{ - ActionBundleId: actionBundleIds, - Status: []string{constants.StatusActive}, - }) - if err != nil { - logger.Error(ctx, "Describe roles failed: %+v", err) - return nil, err - } - - return response.RoleSet, nil -} - -func (c *Client) GetActionBundleUsers(ctx context.Context, actionBundleIds []string) ([]*pb.User, error) { - roles, err := c.GetActionBundleRoles(ctx, actionBundleIds) - if err != nil { - return nil, err - } - var roleIds []string - for _, role := range roles { - roleIds = append(roleIds, role.RoleId) - } - - accountClient, err := accountclient.NewClient() - if err != nil { - logger.Error(ctx, "Get account manager client failed: %+v", err) - return nil, err - } - return accountClient.GetRoleUsers(ctx, roleIds) -} diff --git a/vendor/openpitrix.io/openpitrix/pkg/client/account/client.go b/vendor/openpitrix.io/openpitrix/pkg/client/account/client.go deleted file mode 100644 index b1bc16d33..000000000 --- a/vendor/openpitrix.io/openpitrix/pkg/client/account/client.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2018 The OpenPitrix Authors. All rights reserved. -// Use of this source code is governed by a Apache license -// that can be found in the LICENSE file. - -package account - -import ( - "context" - "fmt" - "math" - "strings" - - "openpitrix.io/openpitrix/pkg/constants" - "openpitrix.io/openpitrix/pkg/logger" - "openpitrix.io/openpitrix/pkg/manager" - "openpitrix.io/openpitrix/pkg/pb" - "openpitrix.io/openpitrix/pkg/util/pbutil" - "openpitrix.io/openpitrix/pkg/util/stringutil" -) - -type Client struct { - pb.AccountManagerClient -} - -func NewClient() (*Client, error) { - conn, err := manager.NewClient(constants.AccountServiceHost, constants.AccountServicePort) - if err != nil { - return nil, err - } - return &Client{ - AccountManagerClient: pb.NewAccountManagerClient(conn), - }, nil -} - -func (c *Client) GetUsers(ctx context.Context, userIds []string) ([]*pb.User, error) { - var internalUsers []*pb.User - var noInternalUserIds []string - for _, userId := range userIds { - if stringutil.StringIn(userId, constants.InternalUsers) { - internalUsers = append(internalUsers, &pb.User{ - UserId: pbutil.ToProtoString(userId), - }) - } else { - noInternalUserIds = append(noInternalUserIds, userId) - } - } - - if len(noInternalUserIds) == 0 { - return internalUsers, nil - } - - response, err := c.DescribeUsers(ctx, &pb.DescribeUsersRequest{ - UserId: noInternalUserIds, - }) - if err != nil { - logger.Error(ctx, "Describe users %s failed: %+v", noInternalUserIds, err) - return nil, err - } - if len(response.UserSet) != len(noInternalUserIds) { - logger.Error(ctx, "Describe users %s with return count [%d]", userIds, len(response.UserSet)+len(internalUsers)) - return nil, fmt.Errorf("describe users %s with return count [%d]", userIds, len(response.UserSet)+len(internalUsers)) - } - response.UserSet = append(response.UserSet, internalUsers...) - return response.UserSet, nil -} - -func (c *Client) GetUser(ctx context.Context, userId string) (*pb.User, error) { - users, err := c.GetUsers(ctx, []string{userId}) - if err != nil { - return nil, err - } - if len(users) == 0 { - return nil, fmt.Errorf("not found user [%s]", userId) - } - return users[0], nil -} - -func (c *Client) GetUserGroupPath(ctx context.Context, userId string) (string, error) { - var userGroupPath string - - response, err := c.DescribeUsersDetail(ctx, &pb.DescribeUsersRequest{ - UserId: []string{userId}, - }) - if err != nil || len(response.UserDetailSet) == 0 { - logger.Error(ctx, "Describe user [%s] failed: %+v", userId, err) - return "", err - } - - groups := response.UserDetailSet[0].GroupSet - - //If one user under different groups, get the highest group path. - minLevel := math.MaxInt32 - for _, group := range groups { - level := len(strings.Split(group.GroupPath.GetValue(), ".")) - if level < minLevel { - minLevel = level - userGroupPath = group.GetGroupPath().GetValue() - } - } - - return userGroupPath, nil - -} - -func (c *Client) GetRoleUsers(ctx context.Context, roleIds []string) ([]*pb.User, error) { - response, err := c.DescribeUsers(ctx, &pb.DescribeUsersRequest{ - RoleId: roleIds, - Status: []string{constants.StatusActive}, - }) - if err != nil { - logger.Error(ctx, "Describe users failed: %+v", err) - return nil, err - } - - return response.UserSet, nil -} - -func (c *Client) GetIsvFromUser(ctx context.Context, userId string) (*pb.User, error) { - groupPath, err := c.GetUserGroupPath(ctx, userId) - if err != nil { - return nil, err - } - - rootGroupId := strings.Split(groupPath, ".")[0] - - describeUsersResponse, err := c.DescribeUsers(ctx, &pb.DescribeUsersRequest{ - RootGroupId: []string{rootGroupId}, - Status: []string{constants.StatusActive}, - RoleId: []string{constants.RoleIsv}, - }) - if err != nil { - logger.Error(ctx, "Failed to describe users: %+v", err) - return nil, err - } - - if len(describeUsersResponse.UserSet) == 0 { - logger.Error(ctx, "Isv not exist with root group id [%s]", rootGroupId) - return nil, fmt.Errorf("isv not exist") - } - - return describeUsersResponse.UserSet[0], nil -} diff --git a/vendor/openpitrix.io/openpitrix/pkg/client/client.go b/vendor/openpitrix.io/openpitrix/pkg/client/client.go deleted file mode 100644 index 8c00d87c1..000000000 --- a/vendor/openpitrix.io/openpitrix/pkg/client/client.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2018 The OpenPitrix Authors. All rights reserved. -// Use of this source code is governed by a Apache license -// that can be found in the LICENSE file. - -package client - -import ( - "context" - - accessclient "openpitrix.io/openpitrix/pkg/client/access" - "openpitrix.io/openpitrix/pkg/pb" - "openpitrix.io/openpitrix/pkg/sender" - "openpitrix.io/openpitrix/pkg/util/ctxutil" -) - -func SetSystemUserToContext(ctx context.Context) context.Context { - return ctxutil.ContextWithSender(ctx, sender.GetSystemSender()) -} - -func SetUserToContext(ctx context.Context, userId, apiMethod string) (context.Context, error) { - accessClient, err := accessclient.NewClient() - if err != nil { - return nil, err - } - response, err := accessClient.CanDo(ctx, &pb.CanDoRequest{ - UserId: userId, - ApiMethod: apiMethod, - }) - if err != nil { - return nil, err - } - - userSender := sender.New(response.UserId, sender.OwnerPath(response.OwnerPath), sender.OwnerPath(response.AccessPath)) - return ctxutil.ContextWithSender(ctx, userSender), nil -}