From aa4d07c80a0db136fe54304d158772a4294d5697 Mon Sep 17 00:00:00 2001 From: Jeff Date: Wed, 20 Mar 2019 11:12:40 +0800 Subject: [PATCH] add controllers change kiali mux to go-restful add knative --- .gitmodules | 3 - .travis.yml | 3 +- Gopkg.lock | 515 +- Gopkg.toml | 9 +- Makefile | 6 +- build/controller-manager/Dockerfile | 2 +- cmd/controller-manager/app/controllers.go | 86 + cmd/controller-manager/app/helper.go | 38 + cmd/controller-manager/controller-manager.go | 66 +- .../crds/servicemesh_v1alpha2_strategy.yaml | 25 + .../servicemesh_v1alpha2_strategy.yaml | 2 + hack/docker_build.sh | 4 +- hack/docker_push.sh | 1 + pkg/apis/addtoscheme_servicemesh_v1alpha2.go | 2 +- .../servicemesh/v1alpha2/strategy_types.go | 13 + pkg/apiserver/servicemesh/metrics/handlers.go | 19 +- pkg/client/clientset/versioned/clientset.go | 98 + pkg/client/clientset/versioned/doc.go | 20 + .../versioned/fake/clientset_generated.go | 82 + pkg/client/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 56 + pkg/client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 + .../typed/servicemesh/v1alpha2/doc.go | 20 + .../typed/servicemesh/v1alpha2/fake/doc.go | 20 + .../v1alpha2/fake/fake_servicemesh_client.go | 40 + .../v1alpha2/fake/fake_strategy.go | 140 + .../v1alpha2/generated_expansion.go | 21 + .../v1alpha2/servicemesh_client.go | 90 + .../typed/servicemesh/v1alpha2/strategy.go | 191 + .../informers/externalversions/factory.go | 180 + .../informers/externalversions/generic.go | 62 + .../internalinterfaces/factory_interfaces.go | 40 + .../externalversions/servicemesh/interface.go | 46 + .../servicemesh/v1alpha2/interface.go | 45 + .../servicemesh/v1alpha2/strategy.go | 89 + .../v1alpha2/expansion_generated.go | 27 + .../listers/servicemesh/v1alpha2/strategy.go | 94 + pkg/controller/add_strategy.go | 3 +- .../destinationrule_controller.go | 370 + .../destinationrule_controller_test.go | 1 + pkg/controller/strategy/helper.go | 42 + .../strategy/strategy_controller.go | 90 +- .../strategy/strategy_controller_test.go | 46 +- pkg/controller/virtualservice/util/util.go | 72 + .../virtualservice_controller.go | 349 + .../virtualservice_controller_test.go | 1 + pkg/models/log/constants.go | 2 +- pkg/models/log/logcollector.go | 2 +- pkg/models/log/types.go | 2 +- pkg/models/servicemesh/application.go | 69 + .../client/prometheus/prometheusclient.go | 8 +- pkg/simple/controller/namespace/namespaces.go | 4 +- vendor/github.com/evanphx/json-patch/LICENSE | 25 + vendor/github.com/evanphx/json-patch/merge.go | 383 + vendor/github.com/evanphx/json-patch/patch.go | 682 ++ .../kiali/kiali/graph/options/options.go | 29 +- .../github.com/kiali/kiali/handlers/apps.go | 20 +- .../github.com/kiali/kiali/handlers/graph.go | 36 +- .../kiali/kiali/handlers/namespaces.go | 20 +- .../kiali/kiali/handlers/services.go | 20 +- .../kiali/kiali/handlers/workloads.go | 20 +- vendor/github.com/knative/pkg | 1 - vendor/github.com/knative/pkg/LICENSE | 201 + .../pkg/apis/istio/authentication/register.go | 21 + .../apis/istio/authentication/v1alpha1/doc.go | 22 + .../authentication/v1alpha1/policy_types.go | 345 + .../istio/authentication/v1alpha1/register.go | 52 + .../v1alpha1/zz_generated.deepcopy.go | 259 + .../pkg/apis/istio/common/v1alpha1/string.go | 35 + .../knative/pkg/apis/istio/register.go | 21 + .../istio/v1alpha3/destinationrule_types.go | 547 ++ .../knative/pkg/apis/istio/v1alpha3/doc.go | 23 + .../pkg/apis/istio/v1alpha3/gateway_types.go | 318 + .../pkg/apis/istio/v1alpha3/register.go | 56 + .../istio/v1alpha3/virtualservice_types.go | 852 ++ .../istio/v1alpha3/zz_generated.deepcopy.go | 1082 +++ .../client/clientset/versioned/clientset.go | 120 + .../pkg/client/clientset/versioned/doc.go | 20 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../v1alpha1/authentication_client.go | 90 + .../typed/authentication/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 21 + .../typed/authentication/v1alpha1/policy.go | 157 + .../typed/istio/v1alpha3/destinationrule.go | 157 + .../versioned/typed/istio/v1alpha3/doc.go | 20 + .../versioned/typed/istio/v1alpha3/gateway.go | 157 + .../istio/v1alpha3/generated_expansion.go | 25 + .../typed/istio/v1alpha3/istio_client.go | 100 + .../typed/istio/v1alpha3/virtualservice.go | 157 + .../authentication/interface.go | 46 + .../authentication/v1alpha1/interface.go | 45 + .../authentication/v1alpha1/policy.go | 89 + .../informers/externalversions/factory.go | 186 + .../informers/externalversions/generic.go | 71 + .../internalinterfaces/factory_interfaces.go | 38 + .../externalversions/istio/interface.go | 46 + .../istio/v1alpha3/destinationrule.go | 89 + .../istio/v1alpha3/gateway.go | 89 + .../istio/v1alpha3/interface.go | 59 + .../istio/v1alpha3/virtualservice.go | 89 + .../v1alpha1/expansion_generated.go | 27 + .../listers/authentication/v1alpha1/policy.go | 94 + .../listers/istio/v1alpha3/destinationrule.go | 94 + .../istio/v1alpha3/expansion_generated.go | 43 + .../client/listers/istio/v1alpha3/gateway.go | 94 + .../listers/istio/v1alpha3/virtualservice.go | 94 + .../pkg/apis/app/v1beta1/application.go | 0 .../pkg/apis/app/v1beta1/application_types.go | 0 .../pkg/apis/app/v1beta1/condition.go | 0 .../application/pkg/apis/app/v1beta1/doc.go | 0 .../pkg/apis/app/v1beta1/register.go | 0 .../pkg/apis/app/v1beta1/status.go | 0 .../apis/app/v1beta1/zz_generated.deepcopy.go | 0 .../application/pkg/genericreconciler}/doc.go | 6 +- .../genericreconciler/genericreconciler.go | 374 + .../pkg/genericreconciler/types.go | 41 + .../pkg/genericreconciler/utils.go | 23 + .../application/pkg/kbcontroller/doc.go | 18 + .../pkg/kbcontroller/kbcontroller.go | 42 + .../spf13/cobra/cobra/cmd/license_agpl.go | 683 ++ .../spf13/cobra/cobra/cmd/license_apache_2.go | 238 + .../cobra/cobra/cmd/license_bsd_clause_2.go | 71 + .../cobra/cobra/cmd/license_bsd_clause_3.go | 78 + .../spf13/cobra/cobra/cmd/license_gpl_2.go | 376 + .../spf13/cobra/cobra/cmd/license_gpl_3.go | 711 ++ .../spf13/cobra/cobra/cmd/license_lgpl.go | 186 + .../spf13/cobra/cobra/cmd/license_mit.go | 63 + .../spf13/cobra/cobra/cmd/licenses.go | 118 + .../gopkg.in/square/go-jose.v2/jwt/builder.go | 334 + .../gopkg.in/square/go-jose.v2/jwt/claims.go | 120 + vendor/gopkg.in/square/go-jose.v2/jwt/doc.go | 22 + .../gopkg.in/square/go-jose.v2/jwt/errors.go | 53 + vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go | 163 + .../square/go-jose.v2/jwt/validation.go | 114 + .../pkg/features/kube_features.go | 62 + .../apimachinery/pkg/api/equality/semantic.go | 49 + .../apimachinery/pkg/api/validation/doc.go | 18 + .../pkg/api/validation/generic.go | 85 + .../pkg/api/validation/objectmeta.go | 308 + .../pkg/apis/meta/v1/validation/validation.go | 110 + .../authenticator/audagnostic.go | 90 + .../authentication/authenticator/audiences.go | 63 + .../authenticator/interfaces.go | 80 + .../pkg/authentication/serviceaccount/util.go | 73 + .../apiserver/pkg/authentication/user/doc.go | 2 +- .../authorization/authorizer/interfaces.go | 2 +- .../apiserver/pkg/features/kube_features.go | 109 + .../pkg/util/feature/feature_gate.go | 323 + .../client-go/discovery/fake/discovery.go | 144 + vendor/k8s.io/client-go/testing/actions.go | 671 ++ vendor/k8s.io/client-go/testing/fake.go | 213 + vendor/k8s.io/client-go/testing/fixture.go | 547 ++ .../client-go/tools/watch/informerwatcher.go | 114 + vendor/k8s.io/client-go/tools/watch/until.go | 225 + .../kubernetes/pkg/api/legacyscheme/scheme.go | 35 + .../k8s.io/kubernetes/pkg/api/service/util.go | 85 + .../k8s.io/kubernetes/pkg/api/v1/pod/util.go | 304 + vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go | 19 + .../kubernetes/pkg/apis/apps/register.go | 64 + .../k8s.io/kubernetes/pkg/apis/apps/types.go | 801 ++ .../pkg/apis/apps/zz_generated.deepcopy.go | 800 ++ .../pkg/apis/autoscaling/annotations.go | 34 + .../kubernetes/pkg/apis/autoscaling/doc.go | 19 + .../pkg/apis/autoscaling/register.go | 53 + .../kubernetes/pkg/apis/autoscaling/types.go | 416 + .../apis/autoscaling/zz_generated.deepcopy.go | 547 ++ .../pkg/apis/core/annotation_key_constants.go | 104 + vendor/k8s.io/kubernetes/pkg/apis/core/doc.go | 24 + .../pkg/apis/core/field_constants.go | 38 + .../pkg/apis/core/helper/helpers.go | 539 ++ .../pkg/apis/core/install/install.go | 38 + .../k8s.io/kubernetes/pkg/apis/core/json.go | 28 + .../pkg/apis/core/objectreference.go | 34 + .../kubernetes/pkg/apis/core/pods/helpers.go | 63 + .../kubernetes/pkg/apis/core/register.go | 98 + .../kubernetes/pkg/apis/core/resource.go | 55 + .../k8s.io/kubernetes/pkg/apis/core/taint.go | 36 + .../kubernetes/pkg/apis/core/toleration.go | 30 + .../k8s.io/kubernetes/pkg/apis/core/types.go | 4725 ++++++++++ .../kubernetes/pkg/apis/core/v1/conversion.go | 547 ++ .../kubernetes/pkg/apis/core/v1/defaults.go | 425 + .../k8s.io/kubernetes/pkg/apis/core/v1/doc.go | 23 + .../pkg/apis/core/v1/helper/helpers.go | 527 ++ .../kubernetes/pkg/apis/core/v1/register.go | 46 + .../apis/core/v1/zz_generated.conversion.go | 7595 +++++++++++++++++ .../pkg/apis/core/v1/zz_generated.defaults.go | 646 ++ .../pkg/apis/core/validation/doc.go | 19 + .../pkg/apis/core/validation/events.go | 94 + .../pkg/apis/core/validation/validation.go | 5378 ++++++++++++ .../pkg/apis/core/zz_generated.deepcopy.go | 5415 ++++++++++++ .../kubernetes/pkg/apis/scheduling/doc.go | 20 + .../kubernetes/pkg/apis/scheduling/helpers.go | 65 + .../pkg/apis/scheduling/register.go | 51 + .../kubernetes/pkg/apis/scheduling/types.go | 81 + .../apis/scheduling/zz_generated.deepcopy.go | 84 + .../pkg/capabilities/capabilities.go | 95 + .../k8s.io/kubernetes/pkg/capabilities/doc.go | 18 + .../pkg/controller/client_builder.go | 261 + .../pkg/controller/controller_ref_manager.go | 501 ++ .../pkg/controller/controller_utils.go | 1053 +++ .../k8s.io/kubernetes/pkg/controller/doc.go | 19 + .../kubernetes/pkg/controller/lookup_cache.go | 92 + .../kubernetes/pkg/features/kube_features.go | 483 ++ vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go | 19 + .../kubernetes/pkg/fieldpath/fieldpath.go | 109 + .../kubernetes/pkg/kubelet/types/constants.go | 32 + .../kubernetes/pkg/kubelet/types/doc.go | 18 + .../kubernetes/pkg/kubelet/types/labels.go | 41 + .../pkg/kubelet/types/pod_status.go | 40 + .../pkg/kubelet/types/pod_update.go | 199 + .../kubernetes/pkg/kubelet/types/types.go | 100 + .../k8s.io/kubernetes/pkg/master/ports/doc.go | 19 + .../kubernetes/pkg/master/ports/ports.go | 57 + .../kubernetes/pkg/scheduler/api/doc.go | 20 + .../kubernetes/pkg/scheduler/api/register.go | 55 + .../kubernetes/pkg/scheduler/api/types.go | 328 + .../pkg/scheduler/api/well_known_labels.go | 85 + .../scheduler/api/zz_generated.deepcopy.go | 639 ++ .../pkg/security/apparmor/helpers.go | 80 + .../pkg/security/apparmor/validate.go | 229 + .../security/apparmor/validate_disabled.go | 24 + .../kubernetes/pkg/serviceaccount/claims.go | 180 + .../kubernetes/pkg/serviceaccount/jwt.go | 232 + .../kubernetes/pkg/serviceaccount/legacy.go | 139 + .../kubernetes/pkg/serviceaccount/util.go | 81 + .../k8s.io/kubernetes/pkg/util/file/file.go | 57 + .../k8s.io/kubernetes/pkg/util/hash/hash.go | 37 + .../kubernetes/pkg/util/metrics/util.go | 76 + .../kubernetes/pkg/util/net/sets/doc.go | 28 + .../kubernetes/pkg/util/net/sets/ipnet.go | 121 + .../kubernetes/pkg/util/parsers/parsers.go | 58 + .../kubernetes/pkg/util/taints/taints.go | 342 + .../third_party/forked/godep/license.go | 59 + vendor/k8s.io/utils/LICENSE | 202 + vendor/k8s.io/utils/pointer/pointer.go | 86 + .../utils/third_party/forked/golang/LICENSE | 27 + .../utils/third_party/forked/golang/PATENTS | 22 + .../application/application_controller.go | 51 + .../controllerutil/controllerutil.go | 178 - 241 files changed, 53767 insertions(+), 749 deletions(-) create mode 100644 cmd/controller-manager/app/controllers.go create mode 100644 cmd/controller-manager/app/helper.go create mode 100644 pkg/client/clientset/versioned/clientset.go create mode 100644 pkg/client/clientset/versioned/doc.go create mode 100644 pkg/client/clientset/versioned/fake/clientset_generated.go create mode 100644 pkg/client/clientset/versioned/fake/doc.go create mode 100644 pkg/client/clientset/versioned/fake/register.go create mode 100644 pkg/client/clientset/versioned/scheme/doc.go create mode 100644 pkg/client/clientset/versioned/scheme/register.go create mode 100644 pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/doc.go create mode 100644 pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/doc.go create mode 100644 pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_servicemesh_client.go create mode 100644 pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_strategy.go create mode 100644 pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/generated_expansion.go create mode 100644 pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/servicemesh_client.go create mode 100644 pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/strategy.go create mode 100644 pkg/client/informers/externalversions/factory.go create mode 100644 pkg/client/informers/externalversions/generic.go create mode 100644 pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 pkg/client/informers/externalversions/servicemesh/interface.go create mode 100644 pkg/client/informers/externalversions/servicemesh/v1alpha2/interface.go create mode 100644 pkg/client/informers/externalversions/servicemesh/v1alpha2/strategy.go create mode 100644 pkg/client/listers/servicemesh/v1alpha2/expansion_generated.go create mode 100644 pkg/client/listers/servicemesh/v1alpha2/strategy.go create mode 100644 pkg/controller/destinationrule/destinationrule_controller.go create mode 100644 pkg/controller/destinationrule/destinationrule_controller_test.go create mode 100644 pkg/controller/strategy/helper.go create mode 100644 pkg/controller/virtualservice/util/util.go create mode 100644 pkg/controller/virtualservice/virtualservice_controller.go create mode 100644 pkg/controller/virtualservice/virtualservice_controller_test.go create mode 100644 pkg/models/servicemesh/application.go create mode 100644 vendor/github.com/evanphx/json-patch/LICENSE create mode 100644 vendor/github.com/evanphx/json-patch/merge.go create mode 100644 vendor/github.com/evanphx/json-patch/patch.go delete mode 160000 vendor/github.com/knative/pkg create mode 100644 vendor/github.com/knative/pkg/LICENSE create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/register.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/register.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/doc.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/register.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go create mode 100644 vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go create mode 100644 vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/authentication/interface.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/interface.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/factory.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/generic.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/istio/interface.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/interface.go create mode 100644 vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go create mode 100644 vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/expansion_generated.go create mode 100644 vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go create mode 100644 vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go rename vendor/{sigs.k8s.io => github.com/kubernetes-sigs}/application/pkg/apis/app/v1beta1/application.go (100%) rename vendor/{sigs.k8s.io => github.com/kubernetes-sigs}/application/pkg/apis/app/v1beta1/application_types.go (100%) rename vendor/{sigs.k8s.io => github.com/kubernetes-sigs}/application/pkg/apis/app/v1beta1/condition.go (100%) rename vendor/{sigs.k8s.io => github.com/kubernetes-sigs}/application/pkg/apis/app/v1beta1/doc.go (100%) rename vendor/{sigs.k8s.io => github.com/kubernetes-sigs}/application/pkg/apis/app/v1beta1/register.go (100%) rename vendor/{sigs.k8s.io => github.com/kubernetes-sigs}/application/pkg/apis/app/v1beta1/status.go (100%) rename vendor/{sigs.k8s.io => github.com/kubernetes-sigs}/application/pkg/apis/app/v1beta1/zz_generated.deepcopy.go (100%) rename vendor/{sigs.k8s.io/controller-runtime/pkg/controller/controllerutil => github.com/kubernetes-sigs/application/pkg/genericreconciler}/doc.go (81%) create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/genericreconciler.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/types.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/utils.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/doc.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/kbcontroller.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go create mode 100644 vendor/github.com/spf13/cobra/cobra/cmd/licenses.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/builder.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/claims.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/doc.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/errors.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go create mode 100644 vendor/gopkg.in/square/go-jose.v2/jwt/validation.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/generic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go create mode 100644 vendor/k8s.io/client-go/discovery/fake/discovery.go create mode 100644 vendor/k8s.io/client-go/testing/actions.go create mode 100644 vendor/k8s.io/client-go/testing/fake.go create mode 100644 vendor/k8s.io/client-go/testing/fixture.go create mode 100644 vendor/k8s.io/client-go/tools/watch/informerwatcher.go create mode 100644 vendor/k8s.io/client-go/tools/watch/until.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/legacyscheme/scheme.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/service/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/json.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/resource.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/taint.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/scheduling/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/scheduling/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go create mode 100644 vendor/k8s.io/kubernetes/pkg/capabilities/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/client_builder.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go create mode 100644 vendor/k8s.io/kubernetes/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go create mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/master/ports/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/master/ports/ports.go create mode 100644 vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go create mode 100644 vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go create mode 100644 vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go create mode 100644 vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go create mode 100644 vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go create mode 100644 vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go create mode 100644 vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/file/file.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/hash/hash.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/metrics/util.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/taints/taints.go create mode 100644 vendor/k8s.io/kubernetes/third_party/forked/godep/license.go create mode 100644 vendor/k8s.io/utils/LICENSE create mode 100644 vendor/k8s.io/utils/pointer/pointer.go create mode 100644 vendor/k8s.io/utils/third_party/forked/golang/LICENSE create mode 100644 vendor/k8s.io/utils/third_party/forked/golang/PATENTS create mode 100644 vendor/sigs.k8s.io/application/pkg/controller/application/application_controller.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go diff --git a/.gitmodules b/.gitmodules index 89b8ba826..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "vendor/github.com/knative/pkg"] - path = vendor/github.com/knative/pkg - url = https://github.com/knative/pkg.git diff --git a/.travis.yml b/.travis.yml index 69416cda2..5bb0c2a33 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,12 +15,11 @@ before_install: - go get -u github.com/golang/dep/cmd/dep before_script: - - dep ensure -v - docker --version - bash hack/install_kubebuilder.sh script: - - make all && make test && bash hack/docker_build.sh + - make all && bash hack/docker_build.sh deploy: skip_cleanup: true diff --git a/Gopkg.lock b/Gopkg.lock index 0df27697a..5f5ed4df3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,127 +2,98 @@ [[projects]] - digest = "1:4d6f036ea3fe636bcb2e89850bcdc62a771354e157cd51b8b22a2de8562bf663" name = "cloud.google.com/go" packages = ["compute/metadata"] - pruneopts = "NUT" revision = "f52f9bc132541d2aa914f42100c36d10b1ef7e0c" version = "v0.37.0" [[projects]] - digest = "1:26b14a6dc72ace253599e969997d5ecf2143c63833c015179786bc756c76eaa4" name = "github.com/Microsoft/go-winio" packages = ["."] - pruneopts = "NUT" revision = "1a8911d1ed007260465c3bfbbc785ac6915a0bb8" version = "v0.4.12" [[projects]] - digest = "1:0a111edd8693fd977f42a0c4f199a0efb13c20aec9da99ad8830c7bb6a87e8d6" name = "github.com/PuerkitoBio/purell" packages = ["."] - pruneopts = "NUT" revision = "44968752391892e1b0d0b821ee79e9a85fa13049" version = "v1.1.1" [[projects]] branch = "master" - digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" name = "github.com/PuerkitoBio/urlesc" packages = ["."] - pruneopts = "NUT" revision = "de5bf2ad457846296e2031421a34e2568e304e35" [[projects]] - digest = "1:bb9033d47c116ea3b981ff159bdef73df8351b0b9700da2066339b97211b1bf0" name = "github.com/Sirupsen/logrus" packages = ["."] - pruneopts = "NUT" revision = "dae0fa8d5b0c810a8ab733fbd5510c7cae84eca4" version = "v1.4.0" [[projects]] branch = "master" - digest = "1:1e31f7a894b511019f6d5ce4a9462cb60151440645bb25dd48b5940ff2bbbdf1" name = "github.com/aead/chacha20" packages = [ ".", - "chacha", + "chacha" ] - pruneopts = "NUT" revision = "8b13a72661dae6e9e5dea04f344f0dc95ea29547" [[projects]] - digest = "1:680b63a131506e668818d630d3ca36123ff290afa0afc9f4be21940adca3f27d" name = "github.com/appscode/jsonpatch" packages = ["."] - pruneopts = "NUT" revision = "7c0e3b262f30165a8ec3d0b4c6059fd92703bfb2" version = "1.0.0" [[projects]] branch = "master" - digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd" name = "github.com/beorn7/perks" packages = ["quantile"] - pruneopts = "NUT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:4d43362859489b0672b9a1544d5ae02231202a43172c7eda7a55d44799f97f53" name = "github.com/bifurcation/mint" packages = [ ".", - "syntax", + "syntax" ] - pruneopts = "NUT" revision = "824af65410658916142a7600349144e1289f2110" [[projects]] - digest = "1:cdee563173093e5ae7ab2a19c298e0904129719e1919a3c532b7bb0c3398b818" name = "github.com/cenkalti/backoff" packages = ["."] - pruneopts = "NUT" revision = "1e4cf3da559842a91afcb6ea6141451e6c30c618" version = "v2.1.1" [[projects]] - digest = "1:f438d91be142877c3ad83157992c91de787ddfbddcc2a7da1ef6ef61606cadc4" name = "github.com/cheekybits/genny" packages = ["generic"] - pruneopts = "NUT" revision = "d2cf3cdd35ce0d789056c4bc02a4d6349c947caf" version = "v1.0.0" [[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" packages = ["spew"] - pruneopts = "NUT" revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" version = "v1.1.1" [[projects]] - digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2" name = "github.com/dgrijalva/jwt-go" packages = ["."] - pruneopts = "NUT" revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" [[projects]] branch = "master" - digest = "1:5b5643142a4d48d55f7a2ad09da4565be2b0ecc554c903a25d18b18159d39b9f" name = "github.com/docker/distribution" packages = [ "digestset", - "reference", + "reference" ] - pruneopts = "NUT" revision = "6d62eb1d4a3515399431b713fde3ce5a9b40e8d5" [[projects]] - digest = "1:addad6aa992efe1cd74f5bec0b069a68b35db1da8b042581002215d59b3ac54d" name = "github.com/docker/docker" packages = [ "api", @@ -144,141 +115,115 @@ "pkg/ioutils", "pkg/longpath", "pkg/system", - "pkg/tlsconfig", + "pkg/tlsconfig" ] - pruneopts = "NUT" revision = "90d35abf7b3535c1c319c872900fbd76374e521c" version = "v17.05.0-ce-rc3" [[projects]] branch = "master" - digest = "1:0c6d0df813996e4b7bfd40eb2f0671755159576e0b38263babeef1ee7bbfe4d0" name = "github.com/docker/go-connections" packages = [ "nat", "sockets", - "tlsconfig", + "tlsconfig" ] - pruneopts = "NUT" revision = "97c2040d34dfae1d1b1275fa3a78dbdd2f41cf7e" [[projects]] - digest = "1:4340101f42556a9cb2f7a360a0e95a019bfef6247d92e6c4c46f2433cf86a482" name = "github.com/docker/go-units" packages = ["."] - pruneopts = "NUT" revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" version = "v0.3.3" [[projects]] branch = "master" - digest = "1:ce43438a8204a4259b4461153a392bc3e504bef7e4785a8192344f002c7bd935" name = "github.com/docker/libtrust" packages = ["."] - pruneopts = "NUT" revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" [[projects]] - digest = "1:b7ffca49e9cfd3dfb04a8e0a59347708c6f78f68476a32c5e0a0edca5d1b258c" name = "github.com/dustin/go-humanize" packages = ["."] - pruneopts = "NUT" revision = "9f541cc9db5d55bce703bd99987c9d5cb8eea45e" version = "v1.0.0" [[projects]] - digest = "1:3267a1e17bf240decf62c364e861d79e68b52b5804d63b510b0baed70b9a6aee" name = "github.com/emicklei/go-restful" packages = [ ".", - "log", + "log" ] - pruneopts = "NUT" revision = "85d198d05a92d31823b852b4a5928114912e8949" version = "v2.9.0" [[projects]] - digest = "1:70e2ebe3622a59b59cb111fd547fcb62d6abe0fdeb268325e98cb369789ef8c6" name = "github.com/emicklei/go-restful-openapi" packages = ["."] - pruneopts = "NUT" revision = "b7062368c258c9e8f8cbe9dd2e6aebfa1b747be6" version = "v1.0.0" +[[projects]] + name = "github.com/evanphx/json-patch" + packages = ["."] + revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" + version = "v4.1.0" + [[projects]] branch = "master" - digest = "1:1ccd7321e62f680a988bba496f0f5a9c80410b8104d55b0f6b8ecf84ad328476" name = "github.com/flynn/go-shlex" packages = ["."] - pruneopts = "NUT" revision = "3f9db97f856818214da2e1057f8ad84803971cff" [[projects]] - digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" name = "github.com/ghodss/yaml" packages = ["."] - pruneopts = "NUT" revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" [[projects]] - digest = "1:b5df43d8cd265a39f9d375d76ad00b53d6ec8ff8074f598eb173e4939a196a3c" name = "github.com/go-ldap/ldap" packages = ["."] - pruneopts = "NUT" revision = "729c20c2694d870bcd631f0dadaecd088bd7ccbc" version = "v3.0.2" [[projects]] - digest = "1:53becd66889185091b58ea3fc49294996f2179fb05a89702f4de7d15e581b509" name = "github.com/go-logr/logr" packages = ["."] - pruneopts = "NUT" revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" version = "v0.1.0" [[projects]] - digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687" name = "github.com/go-logr/zapr" packages = ["."] - pruneopts = "NUT" revision = "03f06a783fbb7dfaf3f629c7825480e43a7105e6" version = "v0.1.1" [[projects]] - digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" name = "github.com/go-openapi/jsonpointer" packages = ["."] - pruneopts = "NUT" revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" version = "v0.18.0" [[projects]] - digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" name = "github.com/go-openapi/jsonreference" packages = ["."] - pruneopts = "NUT" revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" version = "v0.18.0" [[projects]] - digest = "1:4da4ea0a664ba528965683d350f602d0f11464e6bb2e17aad0914723bc25d163" name = "github.com/go-openapi/spec" packages = ["."] - pruneopts = "NUT" revision = "5b6cdde3200976e3ecceb2868706ee39b6aff3e4" version = "v0.18.0" [[projects]] - digest = "1:dc0f590770e5a6c70ea086232324f7b7dc4857c60eca63ab8ff78e0a5cfcdbf3" name = "github.com/go-openapi/swag" packages = ["."] - pruneopts = "NUT" revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909" version = "v0.18.0" [[projects]] - digest = "1:589fc282fd63dd84be3c91d87feaaa67a969612ba1ca33b911755fd95f460358" name = "github.com/go-redis/redis" packages = [ ".", @@ -287,231 +232,181 @@ "internal/hashtag", "internal/pool", "internal/proto", - "internal/util", + "internal/util" ] - pruneopts = "NUT" revision = "d22fde8721cc915a55aeb6b00944a76a92bfeb6e" version = "v6.15.2" [[projects]] - digest = "1:13e9a236e56e18fecb73f5b2f2eb7f60060a1a9bc27e5a7328ab22e275ecb63e" name = "github.com/go-sql-driver/mysql" packages = ["."] - pruneopts = "NUT" revision = "72cd26f257d44c1114970e19afddcd812016007e" version = "v1.4.1" [[projects]] - digest = "1:9059915429f7f3a5f18cfa6b7cab9a28721d7ac6db4079a62044aa229eb7f2a8" name = "github.com/gobuffalo/envy" packages = ["."] - pruneopts = "NUT" revision = "fa0dfdc10b5366ce365b7d9d1755a03e4e797bc5" version = "v1.6.15" [[projects]] - digest = "1:f1631663db3b95aec6a1610560b33ede5e70011b0e8cad87b110016a6fbfc2db" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys", + "sortkeys" ] - pruneopts = "NUT" revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c" version = "v1.2.1" [[projects]] branch = "master" - digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" name = "github.com/golang/glog" packages = ["."] - pruneopts = "NUT" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] branch = "master" - digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" name = "github.com/golang/groupcache" packages = ["lru"] - pruneopts = "NUT" revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b" [[projects]] - digest = "1:a8b59d8995b50db3b206d9160817e00aace183e456cb60abf5157de16d12e3c9" name = "github.com/golang/protobuf" packages = [ "proto", "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/timestamp", + "ptypes/timestamp" ] - pruneopts = "NUT" revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" version = "v1.3.1" [[projects]] branch = "master" - digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" name = "github.com/google/btree" packages = ["."] - pruneopts = "NUT" revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" [[projects]] branch = "master" - digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" name = "github.com/google/gofuzz" packages = ["."] - pruneopts = "NUT" revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" [[projects]] - digest = "1:ab3ec1fe3e39bac4b3ab63390767766622be35b7cab03f47f787f9ec60522a53" name = "github.com/google/uuid" packages = ["."] - pruneopts = "NUT" revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" version = "v1.1.1" [[projects]] - digest = "1:add738701bd5b2b985c0c37011092c57218bdc46caf1e682a73dc210ad36b03f" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", "compiler", - "extensions", + "extensions" ] - pruneopts = "NUT" revision = "7c663266750e7d82587642f65e60bc4083f1f84e" version = "v0.2.0" [[projects]] - digest = "1:f9c7b395f10bd6d881b1f2d71715906b4481a04e46870cdd2647d50caa3358c2" name = "github.com/gorilla/mux" packages = ["."] - pruneopts = "NUT" revision = "a7962380ca08b5a188038c69871b8d3fbdf31e89" version = "v1.7.0" [[projects]] - digest = "1:4a0c072e44da763409da72d41492373a034baf2e6d849c76d239b4abdfbb6c49" name = "github.com/gorilla/websocket" packages = ["."] - pruneopts = "NUT" revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" version = "v1.4.0" [[projects]] branch = "master" - digest = "1:790debb569e0ca4a39c168cae515a5f6b353229ca351c0b4208ef6964934aaed" name = "github.com/gregjones/httpcache" packages = [ ".", - "diskcache", + "diskcache" ] - pruneopts = "NUT" revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f" [[projects]] - digest = "1:f499d582d00ecc9744962e23ef8f271cb85e0d63014a99527b0ed66b618a24f0" name = "github.com/hashicorp/go-syslog" packages = ["."] - pruneopts = "NUT" revision = "8d1874e3e8d1862b74e0536851e218c4571066a5" version = "v1.0.0" [[projects]] - digest = "1:c7d9de42b661ba85788f5f631cbac165795a2ff7dc1c59a4241d6228b129c3e4" name = "github.com/hashicorp/go-version" packages = ["."] - pruneopts = "NUT" revision = "d40cf49b3a77bba84a7afdbd7f1dc295d114efb1" version = "v1.1.0" [[projects]] - digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129" name = "github.com/hashicorp/golang-lru" packages = [ ".", - "simplelru", + "simplelru" ] - pruneopts = "NUT" revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c" version = "v0.5.1" [[projects]] - digest = "1:e4d4b786065b1879481dcfa5da9886f40fde00d6ab7dadea53d6d7dc943c4792" name = "github.com/hpcloud/tail" packages = [ ".", "ratelimiter", "util", "watch", - "winfile", + "winfile" ] - pruneopts = "NUT" revision = "a30252cb686a21eb2d0b98132633053ec2f7f1e5" version = "v1.0.0" [[projects]] - digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f" name = "github.com/imdario/mergo" packages = ["."] - pruneopts = "NUT" revision = "7c29201646fa3de8506f701213473dd407f19646" version = "v0.3.7" [[projects]] - digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb" name = "github.com/inconshreveable/mousetrap" packages = ["."] - pruneopts = "NUT" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" [[projects]] branch = "master" - digest = "1:4e4b686999d470502871c6df51c47a5413edd50a667b09c63391542cf7bf83e8" name = "github.com/jimstudt/http-authentication" packages = ["basic"] - pruneopts = "NUT" revision = "3eca13d6893afd7ecabe15f4445f5d2872a1b012" [[projects]] - digest = "1:2ddfc1382a659966038282873c9e33e7694fa503130d445e97c4fdc3b8c5db66" name = "github.com/jinzhu/gorm" packages = ["."] - pruneopts = "NUT" revision = "472c70caa40267cb89fd8facb07fe6454b578626" version = "v1.9.2" [[projects]] branch = "master" - digest = "1:802f75230c29108e787d40679f9bf5da1a5673eaf5c10eb89afd993e18972909" name = "github.com/jinzhu/inflection" packages = ["."] - pruneopts = "NUT" revision = "04140366298a54a039076d798123ffa108fff46c" [[projects]] - digest = "1:da62aa6632d04e080b8a8b85a59ed9ed1550842a0099a55f3ae3a20d02a3745a" name = "github.com/joho/godotenv" packages = ["."] - pruneopts = "NUT" revision = "23d116af351c84513e1946b527c88823e476be13" version = "v1.3.0" [[projects]] - digest = "1:4e903242fe176238aaa469f59d7035f5abf2aa9acfefb8964ddd203651b574e9" name = "github.com/json-iterator/go" packages = ["."] - pruneopts = "NUT" revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29" version = "v1.1.6" [[projects]] - digest = "1:8c4c4f442d26906bec1ece789e0510db50653e04995ab31242ee89cf427e07d2" name = "github.com/kiali/kiali" packages = [ "business", @@ -534,55 +429,62 @@ "prometheus/internalmetrics", "status", "util", - "util/intutil", + "util/intutil" ] - pruneopts = "NUT" revision = "4e0f3eff11a4ab3a743328cac78c9fe9be36498c" version = "v0.15.0" [[projects]] - digest = "1:f44ca3e400a23dc9cf76a09d71891da95193c0c7da2008205f8f20154f49b22d" name = "github.com/klauspost/cpuid" packages = ["."] - pruneopts = "NUT" revision = "e7e905edc00ea8827e58662220139109efea09db" version = "v1.2.0" [[projects]] branch = "master" - digest = "1:c5ef63322e47aaf183978f09b6382f8e3d0db2ad92cf2f7d918dd0082efaffee" name = "github.com/knative/pkg" packages = [ "apis/istio", + "apis/istio/authentication", + "apis/istio/authentication/v1alpha1", "apis/istio/common/v1alpha1", "apis/istio/v1alpha3", + "client/clientset/versioned", + "client/clientset/versioned/scheme", + "client/clientset/versioned/typed/authentication/v1alpha1", + "client/clientset/versioned/typed/istio/v1alpha3", + "client/informers/externalversions", + "client/informers/externalversions/authentication", + "client/informers/externalversions/authentication/v1alpha1", + "client/informers/externalversions/internalinterfaces", + "client/informers/externalversions/istio", + "client/informers/externalversions/istio/v1alpha3", + "client/listers/authentication/v1alpha1", + "client/listers/istio/v1alpha3" ] - pruneopts = "NUT" revision = "cd278f2d3394c865fda66bca12459e879e0279b8" [[projects]] - digest = "1:58999a98719fddbac6303cb17e8d85b945f60b72f48e3a2df6b950b97fa926f1" name = "github.com/konsorten/go-windows-terminal-sequences" packages = ["."] - pruneopts = "NUT" revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" version = "v1.0.2" [[projects]] branch = "master" - digest = "1:78cf6bdecb0014413eeafe6cc8b9ffda3c8510c7f252cab6cb4e4cd26bee9609" name = "github.com/kubernetes-sigs/application" packages = [ + "pkg/apis/app/v1beta1", "pkg/component", "pkg/customresource", "pkg/finalizer", - "pkg/resource", + "pkg/genericreconciler", + "pkg/kbcontroller", + "pkg/resource" ] - pruneopts = "NUT" revision = "4ead7f1b87048b7717b3e474a21fdc07e6bce636" [[projects]] - digest = "1:917d2500b71bdd4be2a28fa5fb45eb1a8a3e07184b5f7faf90f8fb3edaef2b62" name = "github.com/kubesphere/s2ioperator" packages = [ "pkg/apis/devops/v1alpha1", @@ -593,22 +495,18 @@ "pkg/client/informers/externalversions/devops", "pkg/client/informers/externalversions/devops/v1alpha1", "pkg/client/informers/externalversions/internalinterfaces", - "pkg/client/listers/devops/v1alpha1", + "pkg/client/listers/devops/v1alpha1" ] - pruneopts = "NUT" revision = "cb9f6d6145324977e5904ded95de7bfca7a01151" version = "v0.0.4" [[projects]] branch = "master" - digest = "1:2d137c17dacc803b85c06b7a0cc9e9a1d68e3104e567caf27fea2fb067ef424e" name = "github.com/lucas-clemente/aes12" packages = ["."] - pruneopts = "NUT" revision = "cd47fb39b79f867c6e4e5cd39cf7abd799f71670" [[projects]] - digest = "1:3a12648ef8bab4b7310adf715fe4d74fc1d52eb81f1df6e9fcb168770ffdbd6e" name = "github.com/lucas-clemente/quic-go" packages = [ ".", @@ -621,50 +519,40 @@ "internal/protocol", "internal/utils", "internal/wire", - "qerr", + "qerr" ] - pruneopts = "NUT" revision = "714f38d5d0aff85894fd890718b991e361f03e7d" version = "v0.10.1" [[projects]] branch = "master" - digest = "1:b0074de3abb351adbc353b4f621359949d3cfd4a02847b2074fbc7047c3d0113" name = "github.com/lucas-clemente/quic-go-certificates" packages = ["."] - pruneopts = "NUT" revision = "d2f86524cced5186554df90d92529757d22c1cb6" [[projects]] branch = "master" - digest = "1:0b14bf0c424137a702e793fe3570f41320cc3b8495d124e279a4c362c57f214b" name = "github.com/mailru/easyjson" packages = [ "buffer", "jlexer", - "jwriter", + "jwriter" ] - pruneopts = "NUT" revision = "1de009706dbeb9d05f18586f0735fcdb7c524481" [[projects]] - digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde" name = "github.com/markbates/inflect" packages = ["."] - pruneopts = "NUT" revision = "24b83195037b3bc61fcda2d28b7b0518bce293b6" version = "v1.0.4" [[projects]] - digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - pruneopts = "NUT" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] - digest = "1:0a36dc212d1b91a38b435accb11764fe6506c503cc6b75a014b4fd0bd9db44df" name = "github.com/mholt/caddy" packages = [ ".", @@ -704,73 +592,57 @@ "caddytls", "onevent", "onevent/hook", - "telemetry", + "telemetry" ] - pruneopts = "NUT" revision = "80dfb8b2a7f89b120a627bc4d866a1dc5ed3d92f" version = "v0.11.5" [[projects]] branch = "master" - digest = "1:bd070c3513a00c87f32654c774c559283598fecd19db97360c8876fff8ff620b" name = "github.com/mholt/certmagic" packages = ["."] - pruneopts = "NUT" revision = "e3e89d1096d76d61680f8eeb8f67649baa6c54b8" [[projects]] - digest = "1:e68c51f73a9fa89a6cfbd25f24543b37f96a7ac142dcf4e2adcadad6fec230e3" name = "github.com/miekg/dns" packages = ["."] - pruneopts = "NUT" revision = "cc8cd02140663157ce797c6650488d6c8563f31f" version = "v1.1.6" [[projects]] - digest = "1:f9f72e583aaacf1d1ac5d6121abd4afd3c690baa9e14e1d009df26bf831ba347" name = "github.com/mitchellh/go-homedir" packages = ["."] - pruneopts = "NUT" revision = "af06845cf3004701891bf4fdb884bfe4920b3727" version = "v1.1.0" [[projects]] - digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" name = "github.com/modern-go/concurrent" packages = ["."] - pruneopts = "NUT" revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" version = "1.0.3" [[projects]] - digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" name = "github.com/modern-go/reflect2" packages = ["."] - pruneopts = "NUT" revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" [[projects]] - digest = "1:93bfc0a09815fa2b1269c4e065e2591a9eac2c93bc78ccd6f73612ecb4bad1e7" name = "github.com/naoina/go-stringutil" packages = ["."] - pruneopts = "NUT" revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b" version = "v0.1.0" [[projects]] - digest = "1:9cd0478666845c9fb37ce8458c8b455d73c0a74d1e4c7603be298d9e28df83a1" name = "github.com/naoina/toml" packages = [ ".", - "ast", + "ast" ] - pruneopts = "NUT" revision = "e6f5723bf2a66af014955e0888881314cf294129" version = "v0.1.1" [[projects]] - digest = "1:a84acfd5bc2d90ec8d9e0c2f2728aa8f2a33512f1247a099be4330ef9b926094" name = "github.com/onsi/ginkgo" packages = [ ".", @@ -790,14 +662,12 @@ "reporters/stenographer", "reporters/stenographer/support/go-colorable", "reporters/stenographer/support/go-isatty", - "types", + "types" ] - pruneopts = "NUT" revision = "eea6ad008b96acdaa524f5b409513bf062b500ad" version = "v1.8.0" [[projects]] - digest = "1:ad29fb22a545681b5b5d0e07bc9fe20cbdaf3f954ffcad361017f3e34ac6b4ce" name = "github.com/onsi/gomega" packages = [ ".", @@ -813,187 +683,149 @@ "matchers/support/goraph/edge", "matchers/support/goraph/node", "matchers/support/goraph/util", - "types", + "types" ] - pruneopts = "NUT" revision = "90e289841c1ed79b7a598a7cd9959750cb5e89e2" version = "v1.5.0" [[projects]] - digest = "1:e0cc8395ea893c898ff5eb0850f4d9851c1f57c78c232304a026379a47a552d0" name = "github.com/opencontainers/go-digest" packages = ["."] - pruneopts = "NUT" revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf" version = "v1.0.0-rc1" [[projects]] - digest = "1:290add25d7ce226bce0d6880f38c4fbb7129346827a71f37293143cf9dade289" name = "github.com/openshift/api" packages = [ "apps/v1", - "project/v1", + "project/v1" ] - pruneopts = "NUT" revision = "0d921e363e951d89f583292c60d013c318df64dc" version = "v3.9.0" [[projects]] - digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" name = "github.com/pborman/uuid" packages = ["."] - pruneopts = "NUT" revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" version = "v1.2" [[projects]] branch = "master" - digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" name = "github.com/petar/GoLLRB" packages = ["llrb"] - pruneopts = "NUT" revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" [[projects]] - digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6" name = "github.com/peterbourgon/diskv" packages = ["."] - pruneopts = "NUT" revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" version = "v2.0.1" [[projects]] - digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24" name = "github.com/pkg/errors" packages = ["."] - pruneopts = "NUT" revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4" version = "v0.8.1" [[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" name = "github.com/pmezard/go-difflib" packages = ["difflib"] - pruneopts = "NUT" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] - digest = "1:a6b0f01bc76800f17cb666c82e1dde9f219fa35cd6989a351a9f0ef8a5a7cbd6" name = "github.com/prometheus/client_golang" packages = [ "api", "api/prometheus/v1", "prometheus", "prometheus/internal", - "prometheus/promhttp", + "prometheus/promhttp" ] - pruneopts = "NUT" revision = "505eaef017263e299324067d40ca2c48f6a2cf50" version = "v0.9.2" [[projects]] branch = "master" - digest = "1:0f37e09b3e92aaeda5991581311f8dbf38944b36a3edec61cc2d1991f527554a" name = "github.com/prometheus/client_model" packages = ["go"] - pruneopts = "NUT" revision = "fd36f4220a901265f90734c3183c5f0c91daa0b8" [[projects]] - digest = "1:718e954e70b5f876b6bf756173ddc35603be265423e8d80661b1882cb1bd9a91" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model", + "model" ] - pruneopts = "NUT" revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250" version = "v0.2.0" [[projects]] branch = "master" - digest = "1:2aa721eb10a93c6216e8b864b42e78c20f787aa5b11010f8cfba5f7a8891f13f" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "iostats", "nfs", - "xfs", + "xfs" ] - pruneopts = "NUT" revision = "e56f2e22fc761e82a34aca553f6725e2aff4fe6c" [[projects]] - digest = "1:9e5d599747d9210d7dee1d10efe147f344f99f9521bb3c6d557ab65f4c2cf4ef" name = "github.com/rogpeppe/go-internal" packages = [ "modfile", "module", - "semver", + "semver" ] - pruneopts = "NUT" revision = "1cf9852c553c5b7da2d5a4a091129a7822fed0c9" version = "v1.2.2" [[projects]] - digest = "1:d9afa09f6a45f68ec50047f8187c9d273b1fbf507858a104064bb6d9eb2e2795" name = "github.com/russross/blackfriday" packages = ["."] - pruneopts = "NUT" revision = "05f3235734ad95d0016f6a23902f06461fcf567a" version = "v1.5.2" [[projects]] - digest = "1:d0af1fc7035b6d64dfbe4af440d8c70e593ea44e60284d98b74787db004a7452" name = "github.com/spf13/afero" packages = [ ".", - "mem", + "mem" ] - pruneopts = "NUT" revision = "f4711e4db9e9a1d3887343acb72b2bbfc2f686f5" version = "v1.2.1" [[projects]] - digest = "1:2c3b60fc961b7ddca4336bb7bb39146cb73ea2ad73d4afc6b4ffea05571e712a" name = "github.com/spf13/cobra" packages = ["."] - pruneopts = "NUT" revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" version = "v0.0.3" [[projects]] - digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" name = "github.com/spf13/pflag" packages = ["."] - pruneopts = "NUT" revision = "298182f68c66c05229eb03ac171abe6e309ee79a" version = "v1.0.3" [[projects]] - digest = "1:60a46e2410edbf02b419f833372dd1d24d7aa1b916a990a7370e792fada1eadd" name = "github.com/stretchr/objx" packages = ["."] - pruneopts = "NUT" revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" version = "v0.1.1" [[projects]] - digest = "1:5f1187480cbc19150e63c774ed8007e7e64c1cb919a5e9ff7b0a0e5e1900775e" name = "github.com/stretchr/testify" packages = [ "assert", - "mock", + "mock" ] - pruneopts = "NUT" revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053" version = "v1.3.0" [[projects]] - digest = "1:6ae48b637ad9c325f1eeb116f1780cc310fe93d524bc8a2eb78a350f21a35dff" name = "github.com/xenolf/lego" packages = [ "acme", @@ -1011,30 +843,24 @@ "lego", "log", "platform/wait", - "registration", + "registration" ] - pruneopts = "NUT" revision = "2952cdaebd4da7cd560e195343bdd3cb78a67643" version = "v2.3.0" [[projects]] - digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" name = "go.uber.org/atomic" packages = ["."] - pruneopts = "NUT" revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" version = "v1.3.2" [[projects]] - digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" name = "go.uber.org/multierr" packages = ["."] - pruneopts = "NUT" revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" version = "v1.1.0" [[projects]] - digest = "1:244145848a183fe658a95c682d379142595c79733d854a7d4076fa72fb1b9eb8" name = "go.uber.org/zap" packages = [ ".", @@ -1042,15 +868,13 @@ "internal/bufferpool", "internal/color", "internal/exit", - "zapcore", + "zapcore" ] - pruneopts = "NUT" revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" version = "v1.9.1" [[projects]] branch = "master" - digest = "1:c7b6921d7671083242df0454b03ead5849175f650b15d31097c2af3ec2b9f34c" name = "golang.org/x/crypto" packages = [ "curve25519", @@ -1059,14 +883,12 @@ "hkdf", "ocsp", "pbkdf2", - "ssh/terminal", + "ssh/terminal" ] - pruneopts = "NUT" revision = "a1f597ede03a7bef967a422b5b3a5bd08805a01e" [[projects]] branch = "master" - digest = "1:4ef584461a8ab7d5e5883daca745b525450fe886a6839e583fbe24b65af527f1" name = "golang.org/x/net" packages = [ "bpf", @@ -1084,39 +906,33 @@ "internal/socks", "ipv4", "ipv6", - "proxy", + "proxy" ] - pruneopts = "NUT" revision = "9f648a60d9775ef5c977e7669d1673a7a67bef33" [[projects]] branch = "master" - digest = "1:cd20b2392f53dd5a6b3aff0ac551d02fae60274c673bf2b885e9a3ddadff5c4f" name = "golang.org/x/oauth2" packages = [ ".", "google", "internal", "jws", - "jwt", + "jwt" ] - pruneopts = "NUT" revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3" [[projects]] branch = "master" - digest = "1:a243573e9643276548ac3219e2243ccb7b3abce72d3fe442170a0dbf64a997a2" name = "golang.org/x/sys" packages = [ "cpu", "unix", - "windows", + "windows" ] - pruneopts = "NUT" revision = "fead79001313d15903fb4605b4a1b781532cd93e" [[projects]] - digest = "1:0ebfea43f83a5127e01acb49438dffeb32416312c3b73fa87c7fa6e39f7e03bc" name = "golang.org/x/text" packages = [ "collate", @@ -1145,23 +961,19 @@ "unicode/cldr", "unicode/norm", "unicode/rangetable", - "width", + "width" ] - pruneopts = "NUT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" - digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" name = "golang.org/x/time" packages = ["rate"] - pruneopts = "NUT" revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef" [[projects]] branch = "master" - digest = "1:bfe8b9dd25f69b3d6d87c7f1b6c9e0d5f09e89f2470b21f299e875fb97a8c051" name = "golang.org/x/tools" packages = [ "container/intsets", @@ -1176,13 +988,11 @@ "internal/fastwalk", "internal/gopathwalk", "internal/module", - "internal/semver", + "internal/semver" ] - pruneopts = "NUT" revision = "8b67d361bba210f5fbb3c1a0fc121e0847b10b57" [[projects]] - digest = "1:faa5e95fa65f513292bea81c0baa5b8697e3a4c04b77a968327d9b73bfa9e8e0" name = "google.golang.org/appengine" packages = [ ".", @@ -1195,75 +1005,60 @@ "internal/modules", "internal/remote_api", "internal/urlfetch", - "urlfetch", + "urlfetch" ] - pruneopts = "NUT" revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" version = "v1.4.0" [[projects]] - digest = "1:797db40073a8a1d30cc43864b6190372f2fca7af38b1cf1532e4e92a8283e075" name = "gopkg.in/asn1-ber.v1" packages = ["."] - pruneopts = "NUT" revision = "f715ec2f112d1e4195b827ad68cf44017a3ef2b1" version = "v1.3" [[projects]] - digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" name = "gopkg.in/fsnotify.v1" packages = ["."] - pruneopts = "NUT" revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" source = "https://github.com/fsnotify/fsnotify.git" version = "v1.4.7" [[projects]] - digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" name = "gopkg.in/inf.v0" packages = ["."] - pruneopts = "NUT" revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" version = "v0.9.1" [[projects]] - digest = "1:80c34337e8a734e190f2d1b716cae774cca74db98315166f92074434e9af0227" name = "gopkg.in/natefinch/lumberjack.v2" packages = ["."] - pruneopts = "NUT" revision = "a96e63847dc3c67d17befa69c303767e2f84e54f" version = "v2.1" [[projects]] - digest = "1:bca78b987070b8659f4679cf6a6888d23bec62ed98583a6bdf5088b75bdfac7b" name = "gopkg.in/square/go-jose.v2" packages = [ ".", "cipher", "json", + "jwt" ] - pruneopts = "NUT" revision = "628223f44a71f715d2881ea69afc795a1e9c01be" version = "v2.3.0" [[projects]] branch = "v1" - digest = "1:8fb1ccb16a6cfecbfdfeb84d8ea1cc7afa8f9ef16526bc2326f72d993e32cef1" name = "gopkg.in/tomb.v1" packages = ["."] - pruneopts = "NUT" revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" [[projects]] - digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" name = "gopkg.in/yaml.v2" packages = ["."] - pruneopts = "NUT" revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" version = "v2.2.2" [[projects]] - digest = "1:54a81502271a4e19f45dabc7ac58b6ba06513c0fff6f1ce9c555bde0e5061113" name = "k8s.io/api" packages = [ "admission/v1beta1", @@ -1298,14 +1093,12 @@ "settings/v1alpha1", "storage/v1", "storage/v1alpha1", - "storage/v1beta1", + "storage/v1beta1" ] - pruneopts = "NUT" revision = "05914d821849570fba9eacfb29466f2d8d3cd229" version = "kubernetes-1.13.1" [[projects]] - digest = "1:3a662c31f0b40195b3d619fe065a5756c7d167e893811843fb991cdee0898dcc" name = "k8s.io/apiextensions-apiserver" packages = [ "pkg/apis/apiextensions", @@ -1313,22 +1106,24 @@ "pkg/client/clientset/clientset", "pkg/client/clientset/clientset/scheme", "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", + "pkg/features" ] - pruneopts = "NUT" revision = "0fe22c71c47604641d9aa352c785b7912c200562" version = "kubernetes-1.13.1" [[projects]] - digest = "1:6892d9a65ba272b1b84b4244bed3bd8f86dd9ca5759c4d1f591353332cf388db" name = "k8s.io/apimachinery" packages = [ + "pkg/api/equality", "pkg/api/errors", "pkg/api/meta", "pkg/api/resource", + "pkg/api/validation", "pkg/api/validation/path", "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1/validation", "pkg/apis/meta/v1beta1", "pkg/conversion", "pkg/conversion/queryparams", @@ -1366,30 +1161,31 @@ "pkg/version", "pkg/watch", "third_party/forked/golang/json", - "third_party/forked/golang/reflect", + "third_party/forked/golang/reflect" ] - pruneopts = "NUT" revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd" version = "kubernetes-1.13.1" [[projects]] - branch = "master" - digest = "1:2222d2c05e987340ddd2792997d834d74a7dec466fe7b507bf3071f484fa2d14" name = "k8s.io/apiserver" packages = [ "pkg/apis/audit", + "pkg/authentication/authenticator", + "pkg/authentication/serviceaccount", "pkg/authentication/user", "pkg/authorization/authorizer", "pkg/endpoints/request", + "pkg/features", + "pkg/util/feature" ] - pruneopts = "NUT" - revision = "39e839dff03462945cc991fb9dac4469c951772d" + revision = "3ccfe8365421eb08e334b195786a2973460741d8" + version = "kubernetes-1.13.1" [[projects]] - digest = "1:f11d97a5cfe3bdfa414391693763675007879344228ee519a6f355ec9cfdd00c" name = "k8s.io/client-go" packages = [ "discovery", + "discovery/fake", "dynamic", "informers", "informers/admissionregistration", @@ -1508,6 +1304,7 @@ "rest", "rest/watch", "restmapper", + "testing", "third_party/forked/golang/template", "tools/auth", "tools/cache", @@ -1521,6 +1318,7 @@ "tools/pager", "tools/record", "tools/reference", + "tools/watch", "transport", "util/buffer", "util/cert", @@ -1530,14 +1328,12 @@ "util/integer", "util/jsonpath", "util/retry", - "util/workqueue", + "util/workqueue" ] - pruneopts = "NUT" revision = "8d9ed539ba3134352c586810e749e58df4e94e4f" version = "kubernetes-1.13.1" [[projects]] - digest = "1:8f29fd4c578cae70e2e1673656e19e9bae4896c72b458f42b824dfe03d7ea3b2" name = "k8s.io/code-generator" packages = [ "cmd/client-gen", @@ -1548,15 +1344,13 @@ "cmd/client-gen/generators/util", "cmd/client-gen/path", "cmd/client-gen/types", - "pkg/util", + "pkg/util" ] - pruneopts = "T" revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae" version = "kubernetes-1.13.1" [[projects]] branch = "master" - digest = "1:8279bfdd72fb8eb68d03b5a43acbff4109a79725a2717d98e90f15915eefdbd0" name = "k8s.io/gengo" packages = [ "args", @@ -1566,45 +1360,71 @@ "generator", "namer", "parser", - "types", + "types" ] - pruneopts = "NUT" revision = "b90029ef6cd877cb3f422d75b3a07707e3aac6b7" [[projects]] - digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a" name = "k8s.io/klog" packages = ["."] - pruneopts = "NUT" revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0" version = "v0.2.0" [[projects]] branch = "master" - digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f" name = "k8s.io/kube-openapi" packages = ["pkg/util/proto"] - pruneopts = "NUT" revision = "15615b16d372105f0c69ff47dfe7402926a65aaa" [[projects]] - digest = "1:631b2ca05cd438462c9f87f26e16cb65f8cd61aba05bcb8b06a8bd199e264ec2" name = "k8s.io/kubernetes" - packages = ["pkg/util/slice"] - pruneopts = "NUT" + packages = [ + "pkg/api/legacyscheme", + "pkg/api/service", + "pkg/api/v1/pod", + "pkg/apis/apps", + "pkg/apis/autoscaling", + "pkg/apis/core", + "pkg/apis/core/helper", + "pkg/apis/core/install", + "pkg/apis/core/pods", + "pkg/apis/core/v1", + "pkg/apis/core/v1/helper", + "pkg/apis/core/validation", + "pkg/apis/scheduling", + "pkg/capabilities", + "pkg/controller", + "pkg/features", + "pkg/fieldpath", + "pkg/kubelet/types", + "pkg/master/ports", + "pkg/scheduler/api", + "pkg/security/apparmor", + "pkg/serviceaccount", + "pkg/util/file", + "pkg/util/hash", + "pkg/util/metrics", + "pkg/util/net/sets", + "pkg/util/parsers", + "pkg/util/slice", + "pkg/util/taints" + ] revision = "c27b913fddd1a6c480c229191a087698aa92f0b1" version = "v1.13.4" [[projects]] branch = "master" - digest = "1:93cbc4fa2be8f4b012dce390eadcd6a2064f06b7547edc9df151ee034d892d22" + name = "k8s.io/utils" + packages = ["pointer"] + revision = "21c4ce38f2a793ec01e925ddc31216500183b773" + +[[projects]] + branch = "master" name = "sigs.k8s.io/application" - packages = ["pkg/apis/app/v1beta1"] - pruneopts = "NUT" + packages = ["pkg/controller/application"] revision = "4ead7f1b87048b7717b3e474a21fdc07e6bce636" [[projects]] - digest = "1:41e0826a5968b00d321ba7981be205017ca2bea433a8f85c01e2b3f4c62445eb" name = "sigs.k8s.io/controller-runtime" packages = [ "pkg/cache", @@ -1613,7 +1433,6 @@ "pkg/client/apiutil", "pkg/client/config", "pkg/controller", - "pkg/controller/controllerutil", "pkg/envtest", "pkg/envtest/printer", "pkg/event", @@ -1642,14 +1461,12 @@ "pkg/webhook/internal/cert/writer", "pkg/webhook/internal/cert/writer/atomic", "pkg/webhook/internal/metrics", - "pkg/webhook/types", + "pkg/webhook/types" ] - pruneopts = "NUT" revision = "12d98582e72927b6cd0123e2b4e819f9341ce62c" version = "v0.1.10" [[projects]] - digest = "1:170bb3fef81543de085fe9bcdfc97c8677563151b5badba8f6e8ebeabb73a310" name = "sigs.k8s.io/controller-tools" packages = [ "cmd/controller-gen", @@ -1661,118 +1478,30 @@ "pkg/rbac", "pkg/util", "pkg/webhook", - "pkg/webhook/internal", + "pkg/webhook/internal" ] - pruneopts = "NUT" revision = "fbf141159251d035089e7acdd5a343f8cec91b94" version = "v0.1.9" [[projects]] - digest = "1:a9ab998a89bcd0ee9a5cf293943b79e2f889aa6a587b4f2ca3de113340e20418" name = "sigs.k8s.io/testing_frameworks" packages = [ "integration", "integration/addr", - "integration/internal", + "integration/internal" ] - pruneopts = "NUT" revision = "d348cb12705b516376e0c323bacca72b00a78425" version = "v0.1.1" [[projects]] - digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" name = "sigs.k8s.io/yaml" packages = ["."] - pruneopts = "NUT" revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480" version = "v1.1.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/dgrijalva/jwt-go", - "github.com/docker/docker/api/types", - "github.com/docker/docker/client", - "github.com/emicklei/go-restful", - "github.com/emicklei/go-restful-openapi", - "github.com/go-ldap/ldap", - "github.com/go-openapi/spec", - "github.com/go-redis/redis", - "github.com/go-sql-driver/mysql", - "github.com/golang/glog", - "github.com/jinzhu/gorm", - "github.com/json-iterator/go", - "github.com/kiali/kiali/config", - "github.com/kiali/kiali/handlers", - "github.com/knative/pkg/apis/istio/common/v1alpha1", - "github.com/knative/pkg/apis/istio/v1alpha3", - "github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1", - "github.com/kubesphere/s2ioperator/pkg/client/clientset/versioned", - "github.com/kubesphere/s2ioperator/pkg/client/informers/externalversions", - "github.com/mholt/caddy", - "github.com/mholt/caddy/caddy/caddymain", - "github.com/mholt/caddy/caddyhttp/httpserver", - "github.com/mitchellh/go-homedir", - "github.com/onsi/ginkgo", - "github.com/onsi/gomega", - "github.com/spf13/cobra", - "github.com/spf13/pflag", - "golang.org/x/net/context", - "golang.org/x/tools/container/intsets", - "gopkg.in/yaml.v2", - "k8s.io/api/apps/v1", - "k8s.io/api/apps/v1beta2", - "k8s.io/api/batch/v1", - "k8s.io/api/batch/v1beta1", - "k8s.io/api/core/v1", - "k8s.io/api/extensions/v1beta1", - "k8s.io/api/policy/v1beta1", - "k8s.io/api/rbac/v1", - "k8s.io/api/storage/v1", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions", - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", - "k8s.io/apimachinery/pkg/api/errors", - "k8s.io/apimachinery/pkg/api/resource", - "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/labels", - "k8s.io/apimachinery/pkg/runtime", - "k8s.io/apimachinery/pkg/runtime/schema", - "k8s.io/apimachinery/pkg/types", - "k8s.io/apimachinery/pkg/util/json", - "k8s.io/apimachinery/pkg/util/runtime", - "k8s.io/apimachinery/pkg/util/wait", - "k8s.io/apiserver/pkg/authorization/authorizer", - "k8s.io/apiserver/pkg/endpoints/request", - "k8s.io/client-go/informers", - "k8s.io/client-go/informers/core/v1", - "k8s.io/client-go/informers/rbac/v1", - "k8s.io/client-go/kubernetes", - "k8s.io/client-go/kubernetes/scheme", - "k8s.io/client-go/plugin/pkg/client/auth/gcp", - "k8s.io/client-go/rest", - "k8s.io/client-go/tools/cache", - "k8s.io/client-go/tools/clientcmd", - "k8s.io/client-go/util/workqueue", - "k8s.io/code-generator/cmd/client-gen", - "k8s.io/gengo/examples/deepcopy-gen/generators", - "k8s.io/gengo/examples/defaulter-gen/generators", - "k8s.io/kubernetes/pkg/util/slice", - "sigs.k8s.io/application/pkg/apis/app/v1beta1", - "sigs.k8s.io/controller-runtime/pkg/client", - "sigs.k8s.io/controller-runtime/pkg/client/config", - "sigs.k8s.io/controller-runtime/pkg/controller", - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil", - "sigs.k8s.io/controller-runtime/pkg/envtest", - "sigs.k8s.io/controller-runtime/pkg/handler", - "sigs.k8s.io/controller-runtime/pkg/manager", - "sigs.k8s.io/controller-runtime/pkg/reconcile", - "sigs.k8s.io/controller-runtime/pkg/runtime/log", - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", - "sigs.k8s.io/controller-runtime/pkg/runtime/signals", - "sigs.k8s.io/controller-runtime/pkg/source", - "sigs.k8s.io/controller-tools/cmd/controller-gen", - "sigs.k8s.io/testing_frameworks/integration", - ] + inputs-digest = "662b6da91343ff0a611e4487b8eef803b103b676f5b2a5db7ed8351846218fc5" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 5f0f1a6ed..7322e0653 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -14,7 +14,6 @@ required = [ "sigs.k8s.io/controller-runtime/pkg/runtime/signals", "sigs.k8s.io/controller-runtime/pkg/source", "sigs.k8s.io/testing_frameworks/integration", # for integration testing - "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", "github.com/kubesphere/s2ioperator/pkg/client/clientset/versioned", "github.com/kubesphere/s2ioperator/pkg/client/informers/externalversions", "github.com/kubesphere/s2ioperator/pkg/apis/devops/v1alpha1" @@ -32,6 +31,10 @@ required = [ name = "k8s.io/apimachinery" version = "kubernetes-1.13.1" +[[override]] + name = "k8s.io/apiserver" + version = "kubernetes-1.13.1" + [[constraint]] name = "k8s.io/code-generator" version = "kubernetes-1.13.1" @@ -109,3 +112,7 @@ required = [ [[constraint]] name = "github.com/gorilla/mux" version = "1.7.0" + +[[constraint]] + branch = "master" + name = "github.com/knative/pkg" diff --git a/Makefile b/Makefile index 133921710..23a5901f9 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ define ALL_HELP_INFO # debugging tools like delve. endef .PHONY: all -all: test ks-apiserver ks-apigateway ks-iam +all: test ks-apiserver ks-apigateway ks-iam controller-manager # Build ks-apiserver binary ks-apiserver: test @@ -42,6 +42,10 @@ ks-apigateway: test ks-iam: test hack/gobuild.sh cmd/ks-iam +# Build controller-manager binary +controller-manager: test + hack/gobuild.sh cmd/controller-manager + # Run go fmt against code fmt: go fmt ./pkg/... ./cmd/... diff --git a/build/controller-manager/Dockerfile b/build/controller-manager/Dockerfile index a655999ba..45b1025c7 100644 --- a/build/controller-manager/Dockerfile +++ b/build/controller-manager/Dockerfile @@ -5,7 +5,7 @@ # Copyright 2018 The KubeSphere Authors. All rights reserved. # Use of this source code is governed by a Apache license # that can be found in the LICENSE file. -FROM golang:1.10.3 as controller-manager-builder +FROM golang:1.12 as controller-manager-builder COPY / /go/src/kubesphere.io/kubesphere WORKDIR /go/src/kubesphere.io/kubesphere diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go new file mode 100644 index 000000000..1882fcd60 --- /dev/null +++ b/cmd/controller-manager/app/controllers.go @@ -0,0 +1,86 @@ +package app + +import ( + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "kubesphere.io/kubesphere/pkg/controller/destinationrule" + "kubesphere.io/kubesphere/pkg/controller/virtualservice" + "kubesphere.io/kubesphere/pkg/informers" + "kubesphere.io/kubesphere/pkg/simple/controller/namespace" + "sigs.k8s.io/controller-runtime/pkg/manager" + "time" + + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + istioclientset "github.com/knative/pkg/client/clientset/versioned" + istioinformers "github.com/knative/pkg/client/informers/externalversions" + servicemeshclientset "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + servicemeshinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions" +) + +const defaultResync = 600 * time.Second + +var log = logf.Log.WithName("controller-manager") + +func AddControllers(mgr manager.Manager, cfg *rest.Config, stopCh <-chan struct{}) error { + + kubeClient, err := kubernetes.NewForConfig(cfg) + if err != nil { + log.Error(err, "building kubernetes client failed") + } + + istioclient, err := istioclientset.NewForConfig(cfg) + if err != nil { + log.Error(err, "create istio client failed") + return err + } + + informerFactory := informers.SharedInformerFactory() + istioInformer := istioinformers.NewSharedInformerFactory(istioclient, defaultResync) + + servicemeshclient, err := servicemeshclientset.NewForConfig(cfg) + if err != nil { + log.Error(err, "create servicemesh client failed") + return err + } + + servicemeshinformer := servicemeshinformers.NewSharedInformerFactory(servicemeshclient, defaultResync) + + vsController := virtualservice.NewVirtualServiceController(informerFactory.Core().V1().Services(), + istioInformer.Networking().V1alpha3().VirtualServices(), + istioInformer.Networking().V1alpha3().DestinationRules(), + servicemeshinformer.Servicemesh().V1alpha2().Strategies(), + kubeClient, + istioclient) + + drController := destinationrule.NewDestinationRuleController(informerFactory.Apps().V1().Deployments(), + istioInformer.Networking().V1alpha3().DestinationRules(), + informerFactory.Core().V1().Services(), + kubeClient, + istioclient) + + nsController := namespace.NewNamespaceController(kubeClient, + informerFactory.Core().V1().Namespaces(), + informerFactory.Rbac().V1().Roles(), + ) + + servicemeshinformer.Start(stopCh) + istioInformer.Start(stopCh) + informerFactory.Start(stopCh) + + controllers := map[string]manager.Runnable{ + "virtualservice-controller": vsController, + "destinationrule-controller": drController, + "namespace-controller": nsController, + } + + for name, ctrl := range controllers { + err = mgr.Add(ctrl) + if err != nil { + log.Error(err, "add controller to manager failed", "name", name) + return err + } + } + + return nil +} diff --git a/cmd/controller-manager/app/helper.go b/cmd/controller-manager/app/helper.go new file mode 100644 index 000000000..621b3555e --- /dev/null +++ b/cmd/controller-manager/app/helper.go @@ -0,0 +1,38 @@ +package app + +import ( + "fmt" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" + "net/http" + "time" +) + +// WaitForAPIServer waits for the API Server's /healthz endpoint to report "ok" with timeout. +func WaitForAPIServer(client clientset.Interface, timeout time.Duration) error { + var lastErr error + + err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { + healthStatus := 0 + result := client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus) + if result.Error() != nil { + lastErr = fmt.Errorf("failed to get apiserver /healthz status: %v", result.Error()) + return false, nil + } + if healthStatus != http.StatusOK { + content, _ := result.Raw() + lastErr = fmt.Errorf("APIServer isn't healthy: %v", string(content)) + klog.Warningf("APIServer isn't healthy yet: %v. Waiting a little while.", string(content)) + return false, nil + } + + return true, nil + }) + + if err != nil { + return fmt.Errorf("%v: %v", err, lastErr) + } + + return nil +} diff --git a/cmd/controller-manager/controller-manager.go b/cmd/controller-manager/controller-manager.go index 7ce0805fc..284328f7a 100644 --- a/cmd/controller-manager/controller-manager.go +++ b/cmd/controller-manager/controller-manager.go @@ -20,93 +20,59 @@ package main import ( "flag" - + "k8s.io/client-go/tools/clientcmd" + "kubesphere.io/kubesphere/cmd/controller-manager/app" + "kubesphere.io/kubesphere/pkg/apis" + "kubesphere.io/kubesphere/pkg/controller" "os" - "sigs.k8s.io/application/pkg/apis/app/v1beta1" - - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "kubesphere.io/kubesphere/pkg/informers" - "kubesphere.io/kubesphere/pkg/simple/client/k8s" - "kubesphere.io/kubesphere/pkg/simple/controller/namespace" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - - "kubesphere.io/kubesphere/pkg/apis" - "kubesphere.io/kubesphere/pkg/controller" - "kubesphere.io/kubesphere/pkg/webhook" ) func main() { - var metricsAddr string + var metricsAddr, kubeConfigPath, masterURL string flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.Parse() logf.SetLogger(logf.ZapLogger(false)) - log := logf.Log.WithName("entrypoint") + log := logf.Log.WithName("controller-manager") - // Get a config to talk to the apiserver - log.Info("setting up client for manager") - cfg, err := k8s.Config() + kubeConfig, err := clientcmd.BuildConfigFromFlags(masterURL, kubeConfigPath) if err != nil { - log.Error(err, "unable to set up client config") + log.Error(err, "failed to build kubeconfig") os.Exit(1) } - // Create a new Cmd to provide shared dependencies and start components + stopCh := signals.SetupSignalHandler() + log.Info("setting up manager") - mgr, err := manager.New(cfg, manager.Options{}) + mgr, err := manager.New(kubeConfig, manager.Options{}) if err != nil { log.Error(err, "unable to set up overall controller manager") os.Exit(1) } - log.Info("Registering Components.") - - // Setup Scheme for all resources log.Info("setting up scheme") if err := apis.AddToScheme(mgr.GetScheme()); err != nil { log.Error(err, "unable add APIs to scheme") os.Exit(1) } - log.Info("Print all known types") - for k, v := range mgr.GetScheme().AllKnownTypes() { - if k.Group == v1beta1.SchemeGroupVersion.Group { - log.Info(k.String() + " / " + v.String()) - } - } - - // Setup all Controllers - log.Info("Setting up controller") + log.Info("Setting up controllers") if err := controller.AddToManager(mgr); err != nil { log.Error(err, "unable to register controllers to the manager") os.Exit(1) } - log.Info("setting up webhooks") - if err := webhook.AddToManager(mgr); err != nil { - log.Error(err, "unable to register webhooks to the manager") + if err := app.AddControllers(mgr, kubeConfig, stopCh); err != nil { + log.Error(err, "unable to register controllers to the manager") os.Exit(1) } - err = mgr.Add(manager.RunnableFunc(func(s <-chan struct{}) error { - informerFactory := informers.SharedInformerFactory() - informerFactory.Start(s) - namespace.NewNamespaceController(k8s.Client(), - informerFactory.Core().V1().Namespaces(), - informerFactory.Rbac().V1().Roles()).Start(s) - return nil - })) - - if err != nil { - log.Error(err, "error Adding controllers to the Manager") - os.Exit(1) - } - - // Start the Cmd log.Info("Starting the Cmd.") - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { + if err := mgr.Start(stopCh); err != nil { log.Error(err, "unable to run the manager") os.Exit(1) } + } diff --git a/config/crds/servicemesh_v1alpha2_strategy.yaml b/config/crds/servicemesh_v1alpha2_strategy.yaml index 8bd8515bd..b4a73efba 100644 --- a/config/crds/servicemesh_v1alpha2_strategy.yaml +++ b/config/crds/servicemesh_v1alpha2_strategy.yaml @@ -6,6 +6,23 @@ metadata: controller-tools.k8s.io: "1.0" name: strategies.servicemesh.kubesphere.io spec: + additionalPrinterColumns: + - JSONPath: .spec.type + description: type of strategy + name: Type + type: string + - JSONPath: .spec.template.spec.hosts + description: destination hosts + name: Hosts + type: string + - JSONPath: .metadata.creationTimestamp + description: 'CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + name: Age + type: date group: servicemesh.kubesphere.io names: kind: Strategy @@ -28,10 +45,18 @@ spec: type: object spec: properties: + governor: + description: Governor version, the version takes control of all incoming + traffic label version value + type: string paused: description: Indicates that the strategy is paused and will not be processed by the strategy controller type: boolean + principal: + description: Principal version, the one as reference version label version + value + type: string selector: description: Label selector for virtual services. type: object diff --git a/config/samples/servicemesh_v1alpha2_strategy.yaml b/config/samples/servicemesh_v1alpha2_strategy.yaml index cfc60055c..2ca5280c5 100644 --- a/config/samples/servicemesh_v1alpha2_strategy.yaml +++ b/config/samples/servicemesh_v1alpha2_strategy.yaml @@ -12,6 +12,8 @@ spec: "servicemesh.kubesphere.io/type": "canary" template: spec: + service: "details" + principal: "v1" hosts: - details http: diff --git a/hack/docker_build.sh b/hack/docker_build.sh index 9ca83c455..c11a8883d 100755 --- a/hack/docker_build.sh +++ b/hack/docker_build.sh @@ -2,4 +2,6 @@ docker build -f build/ks-apigateway/Dockerfile -t kubespheredev/ks-apigateway:latest . docker build -f build/ks-apiserver/Dockerfile -t kubespheredev/ks-apiserver:latest . - docker build -f build/ks-iam/Dockerfile -t kubespheredev/ks-iam:latest . \ No newline at end of file + docker build -f build/ks-iam/Dockerfile -t kubespheredev/ks-iam:latest . + + docker build -f build/controller-manager/Dockerfile -t kubespheredev/controller-manager:latest . \ No newline at end of file diff --git a/hack/docker_push.sh b/hack/docker_push.sh index ed4609ca3..f49caa053 100755 --- a/hack/docker_push.sh +++ b/hack/docker_push.sh @@ -6,3 +6,4 @@ echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin docker push kubespheredev/ks-apigateway:latest docker push kubespheredev/ks-apiserver:latest docker push kubespheredev/ks-iam:latest +docker push kubespheredev/controller-manager:latest diff --git a/pkg/apis/addtoscheme_servicemesh_v1alpha2.go b/pkg/apis/addtoscheme_servicemesh_v1alpha2.go index b6d708737..d1969154c 100644 --- a/pkg/apis/addtoscheme_servicemesh_v1alpha2.go +++ b/pkg/apis/addtoscheme_servicemesh_v1alpha2.go @@ -20,7 +20,7 @@ import ( "github.com/knative/pkg/apis/istio/v1alpha3" "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" - "sigs.k8s.io/application/pkg/apis/app/v1beta1" + "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1" ) func init() { diff --git a/pkg/apis/servicemesh/v1alpha2/strategy_types.go b/pkg/apis/servicemesh/v1alpha2/strategy_types.go index 41f90fd7c..b4197a332 100644 --- a/pkg/apis/servicemesh/v1alpha2/strategy_types.go +++ b/pkg/apis/servicemesh/v1alpha2/strategy_types.go @@ -45,6 +45,16 @@ type StrategySpec struct { // Strategy type Type StrategyType `json:"type,omitempty"` + // Principal version, the one as reference version + // label version value + // +optional + PrincipalVersion string `json:"principal,omitempty"` + + // Governor version, the version takes control of all incoming traffic + // label version value + // +optional + GovernorVersion string `json:"governor,omitempty"` + // Label selector for virtual services. // +optional Selector *metav1.LabelSelector `json:"selector,omitempty"` @@ -128,6 +138,9 @@ type StrategyCondition struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Strategy is the Schema for the strategies API +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type",description="type of strategy" +// +kubebuilder:printcolumn:name="Hosts",type="string",JSONPath=".spec.template.spec.hosts",description="destination hosts" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata" // +k8s:openapi-gen=true type Strategy struct { metav1.TypeMeta `json:",inline"` diff --git a/pkg/apiserver/servicemesh/metrics/handlers.go b/pkg/apiserver/servicemesh/metrics/handlers.go index 8d5dce012..a5a7801ec 100644 --- a/pkg/apiserver/servicemesh/metrics/handlers.go +++ b/pkg/apiserver/servicemesh/metrics/handlers.go @@ -8,22 +8,29 @@ import ( // Get app metrics func GetAppMetrics(request *restful.Request, response *restful.Response) { - handlers.AppMetrics(response.ResponseWriter, request.Request) + handlers.AppMetrics(request, response) } // Get workload metrics func GetWorkloadMetrics(request *restful.Request, response *restful.Response) { - handlers.WorkloadMetrics(response.ResponseWriter, request.Request) + namespace := request.PathParameter("namespace") + workload := request.PathParameter("workload") + + if len(namespace) > 0 && len(workload) > 0 { + request.Request.URL.RawQuery = fmt.Sprintf("%s&namespaces=%s&workload=%s", request.Request.URL.RawQuery, namespace, workload) + } + + handlers.WorkloadMetrics(request, response) } // Get service metrics func GetServiceMetrics(request *restful.Request, response *restful.Response) { - handlers.ServiceMetrics(response.ResponseWriter, request.Request) + handlers.ServiceMetrics(request, response) } // Get namespace metrics func GetNamespaceMetrics(request *restful.Request, response *restful.Response) { - handlers.NamespaceMetrics(response.ResponseWriter, request.Request) + handlers.NamespaceMetrics(request, response) } // Get service graph for namespace @@ -34,10 +41,10 @@ func GetNamespaceGraph(request *restful.Request, response *restful.Response) { request.Request.URL.RawQuery = fmt.Sprintf("%s&namespaces=%s", request.Request.URL.RawQuery, namespace) } - handlers.GraphNamespaces(response.ResponseWriter, request.Request) + handlers.GetNamespaceGraph(request, response) } // Get service graph for namespaces func GetNamespacesGraph(request *restful.Request, response *restful.Response) { - handlers.GraphNamespaces(response.ResponseWriter, request.Request) + handlers.GraphNamespaces(request, response) } diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 000000000..5ca07dc56 --- /dev/null +++ b/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface + // Deprecated: please explicitly pick a version if possible. + Servicemesh() servicemeshv1alpha2.ServicemeshV1alpha2Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + servicemeshV1alpha2 *servicemeshv1alpha2.ServicemeshV1alpha2Client +} + +// ServicemeshV1alpha2 retrieves the ServicemeshV1alpha2Client +func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface { + return c.servicemeshV1alpha2 +} + +// Deprecated: Servicemesh retrieves the default version of ServicemeshClient. +// Please explicitly pick a version. +func (c *Clientset) Servicemesh() servicemeshv1alpha2.ServicemeshV1alpha2Interface { + return c.servicemeshV1alpha2 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.servicemeshV1alpha2, err = servicemeshv1alpha2.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.servicemeshV1alpha2 = servicemeshv1alpha2.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.servicemeshV1alpha2 = servicemeshv1alpha2.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go new file mode 100644 index 000000000..41721ca52 --- /dev/null +++ b/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..2d5febe62 --- /dev/null +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" + fakeservicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +var _ clientset.Interface = &Clientset{} + +// ServicemeshV1alpha2 retrieves the ServicemeshV1alpha2Client +func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface { + return &fakeservicemeshv1alpha2.FakeServicemeshV1alpha2{Fake: &c.Fake} +} + +// Servicemesh retrieves the ServicemeshV1alpha2Client +func (c *Clientset) Servicemesh() servicemeshv1alpha2.ServicemeshV1alpha2Interface { + return &fakeservicemeshv1alpha2.FakeServicemeshV1alpha2{Fake: &c.Fake} +} diff --git a/pkg/client/clientset/versioned/fake/doc.go b/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 000000000..9b99e7167 --- /dev/null +++ b/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 000000000..e99a87018 --- /dev/null +++ b/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + servicemeshv1alpha2.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/pkg/client/clientset/versioned/scheme/doc.go b/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7dc375616 --- /dev/null +++ b/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..05e0d720e --- /dev/null +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + servicemeshv1alpha2.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/doc.go b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/doc.go new file mode 100644 index 000000000..baaf2d985 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha2 diff --git a/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/doc.go b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/doc.go new file mode 100644 index 000000000..16f443990 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_servicemesh_client.go b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_servicemesh_client.go new file mode 100644 index 000000000..85a33cebf --- /dev/null +++ b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_servicemesh_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" +) + +type FakeServicemeshV1alpha2 struct { + *testing.Fake +} + +func (c *FakeServicemeshV1alpha2) Strategies(namespace string) v1alpha2.StrategyInterface { + return &FakeStrategies{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeServicemeshV1alpha2) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_strategy.go b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_strategy.go new file mode 100644 index 000000000..bd0e35db0 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake/fake_strategy.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" +) + +// FakeStrategies implements StrategyInterface +type FakeStrategies struct { + Fake *FakeServicemeshV1alpha2 + ns string +} + +var strategiesResource = schema.GroupVersionResource{Group: "servicemesh.kubesphere.io", Version: "v1alpha2", Resource: "strategies"} + +var strategiesKind = schema.GroupVersionKind{Group: "servicemesh.kubesphere.io", Version: "v1alpha2", Kind: "Strategy"} + +// Get takes name of the strategy, and returns the corresponding strategy object, and an error if there is any. +func (c *FakeStrategies) Get(name string, options v1.GetOptions) (result *v1alpha2.Strategy, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(strategiesResource, c.ns, name), &v1alpha2.Strategy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Strategy), err +} + +// List takes label and field selectors, and returns the list of Strategies that match those selectors. +func (c *FakeStrategies) List(opts v1.ListOptions) (result *v1alpha2.StrategyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(strategiesResource, strategiesKind, c.ns, opts), &v1alpha2.StrategyList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha2.StrategyList{ListMeta: obj.(*v1alpha2.StrategyList).ListMeta} + for _, item := range obj.(*v1alpha2.StrategyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested strategies. +func (c *FakeStrategies) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(strategiesResource, c.ns, opts)) + +} + +// Create takes the representation of a strategy and creates it. Returns the server's representation of the strategy, and an error, if there is any. +func (c *FakeStrategies) Create(strategy *v1alpha2.Strategy) (result *v1alpha2.Strategy, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(strategiesResource, c.ns, strategy), &v1alpha2.Strategy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Strategy), err +} + +// Update takes the representation of a strategy and updates it. Returns the server's representation of the strategy, and an error, if there is any. +func (c *FakeStrategies) Update(strategy *v1alpha2.Strategy) (result *v1alpha2.Strategy, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(strategiesResource, c.ns, strategy), &v1alpha2.Strategy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Strategy), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeStrategies) UpdateStatus(strategy *v1alpha2.Strategy) (*v1alpha2.Strategy, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(strategiesResource, "status", c.ns, strategy), &v1alpha2.Strategy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Strategy), err +} + +// Delete takes name of the strategy and deletes it. Returns an error if one occurs. +func (c *FakeStrategies) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(strategiesResource, c.ns, name), &v1alpha2.Strategy{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeStrategies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(strategiesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha2.StrategyList{}) + return err +} + +// Patch applies the patch and returns the patched strategy. +func (c *FakeStrategies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Strategy, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(strategiesResource, c.ns, name, pt, data, subresources...), &v1alpha2.Strategy{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha2.Strategy), err +} diff --git a/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/generated_expansion.go b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/generated_expansion.go new file mode 100644 index 000000000..32f9341fc --- /dev/null +++ b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +type StrategyExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/servicemesh_client.go b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/servicemesh_client.go new file mode 100644 index 000000000..19d5bb18b --- /dev/null +++ b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/servicemesh_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +type ServicemeshV1alpha2Interface interface { + RESTClient() rest.Interface + StrategiesGetter +} + +// ServicemeshV1alpha2Client is used to interact with features provided by the servicemesh.kubesphere.io group. +type ServicemeshV1alpha2Client struct { + restClient rest.Interface +} + +func (c *ServicemeshV1alpha2Client) Strategies(namespace string) StrategyInterface { + return newStrategies(c, namespace) +} + +// NewForConfig creates a new ServicemeshV1alpha2Client for the given config. +func NewForConfig(c *rest.Config) (*ServicemeshV1alpha2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ServicemeshV1alpha2Client{client}, nil +} + +// NewForConfigOrDie creates a new ServicemeshV1alpha2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ServicemeshV1alpha2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ServicemeshV1alpha2Client for the given RESTClient. +func New(c rest.Interface) *ServicemeshV1alpha2Client { + return &ServicemeshV1alpha2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ServicemeshV1alpha2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/strategy.go b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/strategy.go new file mode 100644 index 000000000..3865d3945 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/strategy.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +// StrategiesGetter has a method to return a StrategyInterface. +// A group's client should implement this interface. +type StrategiesGetter interface { + Strategies(namespace string) StrategyInterface +} + +// StrategyInterface has methods to work with Strategy resources. +type StrategyInterface interface { + Create(*v1alpha2.Strategy) (*v1alpha2.Strategy, error) + Update(*v1alpha2.Strategy) (*v1alpha2.Strategy, error) + UpdateStatus(*v1alpha2.Strategy) (*v1alpha2.Strategy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha2.Strategy, error) + List(opts v1.ListOptions) (*v1alpha2.StrategyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Strategy, err error) + StrategyExpansion +} + +// strategies implements StrategyInterface +type strategies struct { + client rest.Interface + ns string +} + +// newStrategies returns a Strategies +func newStrategies(c *ServicemeshV1alpha2Client, namespace string) *strategies { + return &strategies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the strategy, and returns the corresponding strategy object, and an error if there is any. +func (c *strategies) Get(name string, options v1.GetOptions) (result *v1alpha2.Strategy, err error) { + result = &v1alpha2.Strategy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("strategies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Strategies that match those selectors. +func (c *strategies) List(opts v1.ListOptions) (result *v1alpha2.StrategyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha2.StrategyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("strategies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested strategies. +func (c *strategies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("strategies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a strategy and creates it. Returns the server's representation of the strategy, and an error, if there is any. +func (c *strategies) Create(strategy *v1alpha2.Strategy) (result *v1alpha2.Strategy, err error) { + result = &v1alpha2.Strategy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("strategies"). + Body(strategy). + Do(). + Into(result) + return +} + +// Update takes the representation of a strategy and updates it. Returns the server's representation of the strategy, and an error, if there is any. +func (c *strategies) Update(strategy *v1alpha2.Strategy) (result *v1alpha2.Strategy, err error) { + result = &v1alpha2.Strategy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("strategies"). + Name(strategy.Name). + Body(strategy). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *strategies) UpdateStatus(strategy *v1alpha2.Strategy) (result *v1alpha2.Strategy, err error) { + result = &v1alpha2.Strategy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("strategies"). + Name(strategy.Name). + SubResource("status"). + Body(strategy). + Do(). + Into(result) + return +} + +// Delete takes name of the strategy and deletes it. Returns an error if one occurs. +func (c *strategies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("strategies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *strategies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("strategies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched strategy. +func (c *strategies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha2.Strategy, err error) { + result = &v1alpha2.Strategy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("strategies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go new file mode 100644 index 000000000..4344b82c4 --- /dev/null +++ b/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + servicemesh "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Servicemesh() servicemesh.Interface +} + +func (f *sharedInformerFactory) Servicemesh() servicemesh.Interface { + return servicemesh.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go new file mode 100644 index 000000000..ec21cbfdf --- /dev/null +++ b/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=servicemesh.kubesphere.io, Version=v1alpha2 + case v1alpha2.SchemeGroupVersion.WithResource("strategies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Servicemesh().V1alpha2().Strategies().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..bc3f8aec1 --- /dev/null +++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" + versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/pkg/client/informers/externalversions/servicemesh/interface.go b/pkg/client/informers/externalversions/servicemesh/interface.go new file mode 100644 index 000000000..eacae4816 --- /dev/null +++ b/pkg/client/informers/externalversions/servicemesh/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package servicemesh + +import ( + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + v1alpha2 "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh/v1alpha2" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha2 provides access to shared informers for resources in V1alpha2. + V1alpha2() v1alpha2.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha2 returns a new v1alpha2.Interface. +func (g *group) V1alpha2() v1alpha2.Interface { + return v1alpha2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/client/informers/externalversions/servicemesh/v1alpha2/interface.go b/pkg/client/informers/externalversions/servicemesh/v1alpha2/interface.go new file mode 100644 index 000000000..61a9c3d82 --- /dev/null +++ b/pkg/client/informers/externalversions/servicemesh/v1alpha2/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Strategies returns a StrategyInformer. + Strategies() StrategyInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Strategies returns a StrategyInformer. +func (v *version) Strategies() StrategyInformer { + return &strategyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/client/informers/externalversions/servicemesh/v1alpha2/strategy.go b/pkg/client/informers/externalversions/servicemesh/v1alpha2/strategy.go new file mode 100644 index 000000000..1d92ed5fe --- /dev/null +++ b/pkg/client/informers/externalversions/servicemesh/v1alpha2/strategy.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + v1alpha2 "kubesphere.io/kubesphere/pkg/client/listers/servicemesh/v1alpha2" +) + +// StrategyInformer provides access to a shared informer and lister for +// Strategies. +type StrategyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha2.StrategyLister +} + +type strategyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStrategyInformer constructs a new informer for Strategy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStrategyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStrategyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStrategyInformer constructs a new informer for Strategy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStrategyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServicemeshV1alpha2().Strategies(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ServicemeshV1alpha2().Strategies(namespace).Watch(options) + }, + }, + &servicemeshv1alpha2.Strategy{}, + resyncPeriod, + indexers, + ) +} + +func (f *strategyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStrategyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *strategyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&servicemeshv1alpha2.Strategy{}, f.defaultInformer) +} + +func (f *strategyInformer) Lister() v1alpha2.StrategyLister { + return v1alpha2.NewStrategyLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/listers/servicemesh/v1alpha2/expansion_generated.go b/pkg/client/listers/servicemesh/v1alpha2/expansion_generated.go new file mode 100644 index 000000000..1d68ccc64 --- /dev/null +++ b/pkg/client/listers/servicemesh/v1alpha2/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +// StrategyListerExpansion allows custom methods to be added to +// StrategyLister. +type StrategyListerExpansion interface{} + +// StrategyNamespaceListerExpansion allows custom methods to be added to +// StrategyNamespaceLister. +type StrategyNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/servicemesh/v1alpha2/strategy.go b/pkg/client/listers/servicemesh/v1alpha2/strategy.go new file mode 100644 index 000000000..d2a9a86b0 --- /dev/null +++ b/pkg/client/listers/servicemesh/v1alpha2/strategy.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" +) + +// StrategyLister helps list Strategies. +type StrategyLister interface { + // List lists all Strategies in the indexer. + List(selector labels.Selector) (ret []*v1alpha2.Strategy, err error) + // Strategies returns an object that can list and get Strategies. + Strategies(namespace string) StrategyNamespaceLister + StrategyListerExpansion +} + +// strategyLister implements the StrategyLister interface. +type strategyLister struct { + indexer cache.Indexer +} + +// NewStrategyLister returns a new StrategyLister. +func NewStrategyLister(indexer cache.Indexer) StrategyLister { + return &strategyLister{indexer: indexer} +} + +// List lists all Strategies in the indexer. +func (s *strategyLister) List(selector labels.Selector) (ret []*v1alpha2.Strategy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Strategy)) + }) + return ret, err +} + +// Strategies returns an object that can list and get Strategies. +func (s *strategyLister) Strategies(namespace string) StrategyNamespaceLister { + return strategyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StrategyNamespaceLister helps list and get Strategies. +type StrategyNamespaceLister interface { + // List lists all Strategies in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha2.Strategy, err error) + // Get retrieves the Strategy from the indexer for a given namespace and name. + Get(name string) (*v1alpha2.Strategy, error) + StrategyNamespaceListerExpansion +} + +// strategyNamespaceLister implements the StrategyNamespaceLister +// interface. +type strategyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Strategies in the indexer for a given namespace. +func (s strategyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha2.Strategy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha2.Strategy)) + }) + return ret, err +} + +// Get retrieves the Strategy from the indexer for a given namespace and name. +func (s strategyNamespaceLister) Get(name string) (*v1alpha2.Strategy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha2.Resource("strategy"), name) + } + return obj.(*v1alpha2.Strategy), nil +} diff --git a/pkg/controller/add_strategy.go b/pkg/controller/add_strategy.go index 29e97c529..ff7f88282 100644 --- a/pkg/controller/add_strategy.go +++ b/pkg/controller/add_strategy.go @@ -18,6 +18,7 @@ package controller import ( "kubesphere.io/kubesphere/pkg/controller/strategy" + "sigs.k8s.io/application/pkg/controller/application" ) func init() { @@ -25,6 +26,6 @@ func init() { AddToManagerFuncs = append(AddToManagerFuncs, strategy.Add) // Add application to manager functions - //AddToManagerFuncs = append(AddToManagerFuncs, application.Add) + AddToManagerFuncs = append(AddToManagerFuncs, application.Add) } diff --git a/pkg/controller/destinationrule/destinationrule_controller.go b/pkg/controller/destinationrule/destinationrule_controller.go new file mode 100644 index 000000000..2e28bd85c --- /dev/null +++ b/pkg/controller/destinationrule/destinationrule_controller.go @@ -0,0 +1,370 @@ +package destinationrule + +import ( + "fmt" + "github.com/knative/pkg/apis/istio/v1alpha3" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/util/metrics" + "kubesphere.io/kubesphere/pkg/controller/virtualservice/util" + "reflect" + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + istioclientset "github.com/knative/pkg/client/clientset/versioned" + istioinformers "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3" + istiolisters "github.com/knative/pkg/client/listers/istio/v1alpha3" + informersv1 "k8s.io/client-go/informers/apps/v1" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + listersv1 "k8s.io/client-go/listers/apps/v1" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "time" +) + +const ( + // maxRetries is the number of times a service will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the + // sequence of delays between successive queuings of a service. + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 +) + +var log = logf.Log.WithName("destinationrule-controller") + +type DestinationRuleController struct { + client clientset.Interface + + destinationRuleClient istioclientset.Interface + + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + + serviceLister corelisters.ServiceLister + serviceSynced cache.InformerSynced + + deploymentLister listersv1.DeploymentLister + deploymentSynced cache.InformerSynced + + destinationRuleLister istiolisters.DestinationRuleLister + destinationRuleSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + workerLoopPeriod time.Duration +} + +func NewDestinationRuleController(deploymentInformer informersv1.DeploymentInformer, + destinationRuleInformer istioinformers.DestinationRuleInformer, + serviceInformer coreinformers.ServiceInformer, + client clientset.Interface, + destinationRuleClient istioclientset.Interface) *DestinationRuleController { + + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(log.Info) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) + recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "destinationrule-controller"}) + + if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("virtualservice_controller", client.CoreV1().RESTClient().GetRateLimiter()) + } + + v := &DestinationRuleController{ + client: client, + destinationRuleClient: destinationRuleClient, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "destinationrule"), + workerLoopPeriod: time.Second, + } + + v.deploymentLister = deploymentInformer.Lister() + v.deploymentSynced = deploymentInformer.Informer().HasSynced + + deploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: v.addDeployment, + DeleteFunc: v.deleteDeployment, + }) + + v.serviceLister = serviceInformer.Lister() + v.serviceSynced = serviceInformer.Informer().HasSynced + + serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: v.enqueueService, + DeleteFunc: v.enqueueService, + UpdateFunc: func(old, cur interface{}) { + v.enqueueService(cur) + }, + }) + + v.destinationRuleLister = destinationRuleInformer.Lister() + v.destinationRuleSynced = destinationRuleInformer.Informer().HasSynced + + v.eventBroadcaster = broadcaster + v.eventRecorder = recorder + + return v + +} + +func (v *DestinationRuleController) Start(stopCh <-chan struct{}) error { + v.Run(5, stopCh) + + return nil +} + +func (v *DestinationRuleController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer v.queue.ShutDown() + + log.Info("starting destinationrule controller") + defer log.Info("shutting down destinationrule controller") + + if !controller.WaitForCacheSync("destinationrule-controller", stopCh, v.serviceSynced, v.destinationRuleSynced, v.deploymentSynced) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(v.worker, v.workerLoopPeriod, stopCh) + } + + go func() { + defer utilruntime.HandleCrash() + }() + + <-stopCh +} + +func (v *DestinationRuleController) enqueueService(obj interface{}) { + key, err := controller.KeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + + v.queue.Add(key) +} + +func (v *DestinationRuleController) worker() { + for v.processNextWorkItem() { + + } +} + +func (v *DestinationRuleController) processNextWorkItem() bool { + eKey, quit := v.queue.Get() + if quit { + return false + } + + defer v.queue.Done(eKey) + + err := v.syncService(eKey.(string)) + v.handleErr(err, eKey) + + return true +} + +func (v *DestinationRuleController) syncService(key string) error { + startTime := time.Now() + defer func() { + log.V(4).Info("Finished syncing service destinationrule.", "key", key, "duration", time.Since(startTime)) + }() + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + service, err := v.serviceLister.Services(namespace).Get(name) + if err != nil { + // Delete the corresponding destinationrule, as the service has been deleted. + err = v.destinationRuleClient.NetworkingV1alpha3().DestinationRules(namespace).Delete(name, nil) + if err != nil && !errors.IsNotFound(err) { + return err + } + + return nil + } + + if len(service.Labels) < len(util.ApplicationLabels) || !util.IsApplicationComponent(&service.ObjectMeta) || + len(service.Spec.Ports) == 0 { + // services don't have enough labels to create a virtualservice + // or they don't have necessary labels + // or they don't have any ports defined + return nil + } + + deployments, err := v.deploymentLister.Deployments(namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated()) + if err != nil { + return err + } + + subsets := []v1alpha3.Subset{} + for _, deployment := range deployments { + + version := util.GetComponentVersion(&deployment.ObjectMeta) + + if len(version) == 0 { + log.V(4).Info("Deployment doesn't have a version label", "key", types.NamespacedName{Namespace: deployment.Namespace, Name: deployment.Name}.String()) + continue + } + + subset := v1alpha3.Subset{ + Name: util.NormalizeVersionName(name), + Labels: map[string]string{ + util.VersionLabel: name, + }, + } + + subsets = append(subsets, subset) + } + + currentDestinationRule, err := v.destinationRuleLister.DestinationRules(namespace).Get(name) + if err != nil { + if errors.IsNotFound(err) { + currentDestinationRule = &v1alpha3.DestinationRule{ + ObjectMeta: metav1.ObjectMeta{ + Name: service.Name, + Labels: service.Labels, + }, + } + } + log.Error(err, "Couldn't get destinationrule for service", "key", key) + return err + } + + createDestinationRule := len(currentDestinationRule.Spec.Subsets) == 0 + + if !createDestinationRule && reflect.DeepEqual(currentDestinationRule.Spec.Subsets, subsets) && + reflect.DeepEqual(currentDestinationRule.Labels, service.Labels) { + log.V(5).Info("destinationrule are equal, skipping update", "key", types.NamespacedName{Namespace: service.Namespace, Name: service.Name}.String()) + return nil + } + + newDestinationRule := currentDestinationRule.DeepCopy() + newDestinationRule.Spec.Subsets = subsets + newDestinationRule.Labels = service.Labels + if newDestinationRule.Annotations == nil { + newDestinationRule.Annotations = make(map[string]string) + } + + if createDestinationRule { + _, err = v.destinationRuleClient.NetworkingV1alpha3().DestinationRules(namespace).Create(newDestinationRule) + } else { + _, err = v.destinationRuleClient.NetworkingV1alpha3().DestinationRules(namespace).Update(newDestinationRule) + } + + if err != nil { + if createDestinationRule && errors.IsForbidden(err) { + // A request is forbidden primarily for two reasons: + // 1. namespace is terminating, endpoint creation is not allowed by default. + // 2. policy is misconfigured, in which case no service would function anywhere. + // Given the frequency of 1, we log at a lower level. + log.V(5).Info("Forbidden from creating endpoints", "error", err) + } + + if createDestinationRule { + v.eventRecorder.Eventf(newDestinationRule, v1.EventTypeWarning, "FailedToCreateDestinationRule", "Failed to create destinationrule for service %v/%v: %v", service.Namespace, service.Name, err) + } else { + v.eventRecorder.Eventf(newDestinationRule, v1.EventTypeWarning, "FailedToUpdateDestinationRule", "Failed to update destinationrule for service %v/%v: %v", service.Namespace, service.Name, err) + } + + return err + } + + return nil +} + +// When a destinationrule is added, figure out which service it will be used +// and enqueue it. obj must have *appsv1.Deployment type +func (v *DestinationRuleController) addDeployment(obj interface{}) { + deploy := obj.(*appsv1.Deployment) + services, err := v.getDeploymentServiceMemberShip(deploy) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to get deployment %s/%s's service memberships", deploy.Namespace, deploy.Name)) + return + } + + for key := range services { + v.queue.Add(key) + } + + return +} + +func (v *DestinationRuleController) deleteDeployment(obj interface{}) { + if _, ok := obj.(*appsv1.Deployment); ok { + v.addDeployment(obj) + return + } + + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + + deploy, ok := tombstone.Obj.(*appsv1.Deployment) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a deployment %#v", obj)) + return + } + + v.addDeployment(deploy) +} + +func (v *DestinationRuleController) getDeploymentServiceMemberShip(deployment *appsv1.Deployment) (sets.String, error) { + set := sets.String{} + + allServices, err := v.serviceLister.Services(deployment.Namespace).List(labels.Everything()) + if err != nil { + return set, err + } + + for i := range allServices { + service := allServices[i] + if service.Spec.Selector == nil { + // services with nil selectors match nothing, not everything. + continue + } + selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() + if selector.Matches(labels.Set(deployment.Spec.Selector.MatchLabels)) { + key, err := controller.KeyFunc(service) + if err != nil { + return nil, err + } + set.Insert(key) + } + } + + return set, nil +} + +func (v *DestinationRuleController) handleErr(err error, key interface{}) { + if err != nil { + v.queue.Forget(key) + return + } + + if v.queue.NumRequeues(key) < maxRetries { + log.V(2).Info("Error syncing virtualservice for service, retrying.", "key", key, "error", err) + v.queue.AddRateLimited(key) + return + } + + log.V(0).Info("Dropping service out of the queue", "key", key, "error", err) + v.queue.Forget(key) + utilruntime.HandleError(err) +} diff --git a/pkg/controller/destinationrule/destinationrule_controller_test.go b/pkg/controller/destinationrule/destinationrule_controller_test.go new file mode 100644 index 000000000..8aae18746 --- /dev/null +++ b/pkg/controller/destinationrule/destinationrule_controller_test.go @@ -0,0 +1 @@ +package destinationrule diff --git a/pkg/controller/strategy/helper.go b/pkg/controller/strategy/helper.go new file mode 100644 index 000000000..cbe98e13f --- /dev/null +++ b/pkg/controller/strategy/helper.go @@ -0,0 +1,42 @@ +package strategy + +import ( + "fmt" + "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/api/core/v1" + "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" +) + +const ( + AppLabel = "app" +) + +func getAppNameByStrategy(strategy *v1alpha2.Strategy) string { + if len(strategy.Labels) > 0 && len(strategy.Labels[AppLabel]) > 0 { + return strategy.Labels[AppLabel] + } + return "" +} + +func fillDestinationPort(vs *v1alpha3.VirtualService, service *v1.Service) error { + + if len(service.Spec.Ports) == 0 { + return fmt.Errorf("service %s/%s spec doesn't canotain any ports", service.Namespace, service.Name) + } + + // fill http port + for i := range vs.Spec.Http { + for j := range vs.Spec.Http[i].Route { + vs.Spec.Http[i].Route[j].Destination.Port.Number = uint32(service.Spec.Ports[0].Port) + } + } + + // fill tcp port + for i := range vs.Spec.Tcp { + for j := range vs.Spec.Tcp[i].Route { + vs.Spec.Tcp[i].Route[j].Destination.Port.Number = uint32(service.Spec.Ports[0].Port) + } + } + + return nil +} diff --git a/pkg/controller/strategy/strategy_controller.go b/pkg/controller/strategy/strategy_controller.go index ce506ef79..fd61d6fd9 100644 --- a/pkg/controller/strategy/strategy_controller.go +++ b/pkg/controller/strategy/strategy_controller.go @@ -18,17 +18,17 @@ package strategy import ( "context" + "fmt" "github.com/knative/pkg/apis/istio/v1alpha3" - "reflect" - + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" + "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" ) -var log = logf.Log.WithName("controller") +var log = logf.Log.WithName("strategy-controller") // Add creates a new Strategy Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. @@ -62,10 +62,6 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { if err != nil { return err } - err = c.Watch(&source.Kind{Type: &v1alpha3.VirtualService{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } // TODO(user): Modify this to be the types you create // Watch a VirtualService created by Strategy @@ -97,32 +93,33 @@ type ReconcileStrategy struct { // +kubebuilder:rbac:groups=servicemesh.kubesphere.io,resources=strategies,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=servicemesh.kubesphere.io,resources=strategies/status,verbs=get;update;patch func (r *ReconcileStrategy) Reconcile(request reconcile.Request) (reconcile.Result, error) { + // Fetch the Strategy instance strategy := &servicemeshv1alpha2.Strategy{} err := r.Get(context.TODO(), request.NamespacedName, strategy) + if err != nil { if errors.IsNotFound(err) { - // Object not found, return. Created objects are automatically garbage collected. - // For additional cleanup logic use finalizers. return reconcile.Result{}, nil } - // Error reading the object - requeue the request. return reconcile.Result{}, err } - // Define VirtualService to be created - vs := &v1alpha3.VirtualService{ - ObjectMeta: metav1.ObjectMeta{ - Name: strategy.Name + "-virtualservice", - Namespace: strategy.Namespace, - Labels: strategy.Spec.Selector.MatchLabels, - }, - Spec: strategy.Spec.Template.Spec, + return r.reconcileStrategy(strategy) +} + +func (r *ReconcileStrategy) reconcileStrategy(strategy *servicemeshv1alpha2.Strategy) (reconcile.Result, error) { + + appName := getAppNameByStrategy(strategy) + service := &v1.Service{} + + err := r.Get(context.TODO(), types.NamespacedName{Namespace: strategy.Namespace, Name: appName}, service) + if err != nil { + log.Error(err, "couldn't find service %s/%s,", strategy.Namespace, appName) + return reconcile.Result{}, errors.NewBadRequest(fmt.Sprintf("service %s not found", appName)) } - if err := controllerutil.SetControllerReference(strategy, vs, r.scheme); err != nil { - return reconcile.Result{}, err - } + vs, err := r.generateVirtualService(strategy, service) // Check if the VirtualService already exists found := &v1alpha3.VirtualService{} @@ -130,14 +127,16 @@ func (r *ReconcileStrategy) Reconcile(request reconcile.Request) (reconcile.Resu if err != nil && errors.IsNotFound(err) { log.Info("Creating VirtualService", "namespace", vs.Namespace, "name", vs.Name) err = r.Create(context.TODO(), vs) + return reconcile.Result{}, err } else if err != nil { return reconcile.Result{}, err } // Update the found object and write the result back if there are any changes - if !reflect.DeepEqual(vs.Spec, found.Spec) { + if !reflect.DeepEqual(vs.Spec, found.Spec) || len(vs.OwnerReferences) == 0 { found.Spec = vs.Spec + found.OwnerReferences = vs.OwnerReferences log.Info("Updating VirtualService", "namespace", vs.Namespace, "name", vs.Name) err = r.Update(context.TODO(), found) if err != nil { @@ -146,3 +145,48 @@ func (r *ReconcileStrategy) Reconcile(request reconcile.Request) (reconcile.Resu } return reconcile.Result{}, nil } + +func (r *ReconcileStrategy) generateVirtualService(strategy *servicemeshv1alpha2.Strategy, service *v1.Service) (*v1alpha3.VirtualService, error) { + + // Define VirtualService to be created + vs := &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: getAppNameByStrategy(strategy), + Namespace: strategy.Namespace, + Labels: strategy.Spec.Selector.MatchLabels, + }, + Spec: strategy.Spec.Template.Spec, + } + + // one version rules them all + if len(strategy.Spec.GovernorVersion) > 0 { + + governorDestinationWeight := v1alpha3.DestinationWeight{ + Destination: v1alpha3.Destination{ + Host: getAppNameByStrategy(strategy), + Subset: strategy.Spec.GovernorVersion, + }, + Weight: 100, + } + + if len(strategy.Spec.Template.Spec.Http) > 0 { + governorRoute := v1alpha3.HTTPRoute{ + Route: []v1alpha3.DestinationWeight{governorDestinationWeight}, + } + + vs.Spec.Http = []v1alpha3.HTTPRoute{governorRoute} + } else if len(strategy.Spec.Template.Spec.Tcp) > 0 { + governorRoute := v1alpha3.TCPRoute{ + Route: []v1alpha3.DestinationWeight{governorDestinationWeight}, + } + vs.Spec.Tcp = []v1alpha3.TCPRoute{governorRoute} + } + + } + + if err := fillDestinationPort(vs, service); err != nil { + return nil, err + } + + return vs, nil +} diff --git a/pkg/controller/strategy/strategy_controller_test.go b/pkg/controller/strategy/strategy_controller_test.go index dd4b2f714..f3cc5786a 100644 --- a/pkg/controller/strategy/strategy_controller_test.go +++ b/pkg/controller/strategy/strategy_controller_test.go @@ -19,6 +19,7 @@ package strategy import ( "github.com/knative/pkg/apis/istio/common/v1alpha1" "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/json" "testing" "time" @@ -37,23 +38,47 @@ import ( var c client.Client var expectedRequest = reconcile.Request{NamespacedName: types.NamespacedName{Name: "foo", Namespace: "default"}} -var depKey = types.NamespacedName{Name: "foo-virtualservice", Namespace: "default"} +var depKey = types.NamespacedName{Name: "details", Namespace: "default"} const timeout = time.Second * 5 +var labels = map[string]string{ + "app.kubernetes.io/name": "details", + "app.kubernetes.io/version": "v1", + "app": "details", + "servicemesh.kubesphere.io/enabled": "", +} + +var svc = v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "details", + Namespace: "default", + Labels: labels, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "http", + Port: 8080, + Protocol: v1.ProtocolTCP, + }, + }, + Selector: labels, + }, +} + func TestReconcile(t *testing.T) { g := gomega.NewGomegaWithT(t) instance := &servicemeshv1alpha2.Strategy{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "default", + Labels: labels, }, Spec: servicemeshv1alpha2.StrategySpec{ Type: servicemeshv1alpha2.CanaryType, Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "type": "Canary", - }, + MatchLabels: labels, }, Template: servicemeshv1alpha2.VirtualServiceTemplateSpec{ Spec: v1alpha3.VirtualServiceSpec{ @@ -111,6 +136,14 @@ func TestReconcile(t *testing.T) { mgrStopped.Wait() }() + err = c.Create(context.TODO(), &svc) + if apierrors.IsInvalid(err) { + t.Logf("failed to create service, %v", err) + return + } + g.Expect(err).NotTo(gomega.HaveOccurred()) + //defer c.Delete(context.TODO(), &svc) + // Create the Strategy object and expect the Reconcile and Deployment to be created err = c.Create(context.TODO(), instance) // The instance object may not be a valid object because it might be missing some required fields. @@ -119,6 +152,7 @@ func TestReconcile(t *testing.T) { t.Logf("failed to create object, got an invalid object error: %v", err) return } + g.Expect(err).NotTo(gomega.HaveOccurred()) defer c.Delete(context.TODO(), instance) g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest))) @@ -133,11 +167,11 @@ func TestReconcile(t *testing.T) { // Delete the Deployment and expect Reconcile to be called for Deployment deletion g.Expect(c.Delete(context.TODO(), vs)).NotTo(gomega.HaveOccurred()) - g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest))) + //g.Eventually(requests, timeout).Should(gomega.Receive(gomega.Equal(expectedRequest))) //g.Eventually(func() error { return c.Get(context.TODO(), depKey, vs) }, timeout).Should(gomega.Succeed()) // Manually delete Deployment since GC isn't enabled in the test control plane g.Eventually(func() error { return c.Delete(context.TODO(), vs) }, timeout). - Should(gomega.MatchError("virtualservices.networking.istio.io \"foo-virtualservice\" not found")) + Should(gomega.MatchError("virtualservices.networking.istio.io \"details\" not found")) } diff --git a/pkg/controller/virtualservice/util/util.go b/pkg/controller/virtualservice/util/util.go new file mode 100644 index 000000000..a0e1861be --- /dev/null +++ b/pkg/controller/virtualservice/util/util.go @@ -0,0 +1,72 @@ +package util + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +const ( + AppLabel = "app" + VersionLabel = "version" + ApplicationNameLabel = "app.kubernetes.io/name" + ApplicationVersionLabel = "app.kubernetes.io/version" + ServiceMeshEnabledLabel = "servicemesh.kubesphere.io/enabled" +) + +// resource with these following labels considered as part of servicemesh +var ApplicationLabels = [...]string{ + ApplicationNameLabel, + ApplicationVersionLabel, + ServiceMeshEnabledLabel, + AppLabel, +} + +var TrimChars = [...]string{".", "_", "-"} + +// normalize version names +// strip [_.-] +func NormalizeVersionName(version string) string { + for _, char := range TrimChars { + version = strings.ReplaceAll(version, char, "") + } + return version +} + +func GetComponentName(meta *metav1.ObjectMeta) string { + if len(meta.Labels[AppLabel]) > 0 { + return meta.Labels[AppLabel] + } + return "" +} + +func GetComponentVersion(meta *metav1.ObjectMeta) string { + if len(meta.Labels[VersionLabel]) > 0 { + return meta.Labels[VersionLabel] + } + return "" +} + +func ExtractApplicationLabels(meta *metav1.ObjectMeta) map[string]string { + + labels := make(map[string]string, 0) + for _, label := range ApplicationLabels { + if len(meta.Labels[label]) == 0 { + return nil + } else { + labels[label] = meta.Labels[label] + } + } + + return labels +} + +func IsApplicationComponent(meta *metav1.ObjectMeta) bool { + + for _, label := range ApplicationLabels { + if len(meta.Labels[label]) == 0 { + return false + } + } + + return true +} diff --git a/pkg/controller/virtualservice/virtualservice_controller.go b/pkg/controller/virtualservice/virtualservice_controller.go new file mode 100644 index 000000000..1289bee2e --- /dev/null +++ b/pkg/controller/virtualservice/virtualservice_controller.go @@ -0,0 +1,349 @@ +package virtualservice + +import ( + "fmt" + "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/util/metrics" + "kubesphere.io/kubesphere/pkg/controller/virtualservice/util" + "strings" + + logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" + + istioclient "github.com/knative/pkg/client/clientset/versioned" + istioinformers "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3" + istiolisters "github.com/knative/pkg/client/listers/istio/v1alpha3" + coreinformers "k8s.io/client-go/informers/core/v1" + clientset "k8s.io/client-go/kubernetes" + corelisters "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + servicemeshinformers "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh/v1alpha2" + servicemeshlisters "kubesphere.io/kubesphere/pkg/client/listers/servicemesh/v1alpha2" + + "time" +) + +const ( + // maxRetries is the number of times a service will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the + // sequence of delays between successive queuings of a service. + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 +) + +var log = logf.Log.WithName("virtualservice-controller") + +type VirtualServiceController struct { + client clientset.Interface + + virtualServiceClient istioclient.Interface + + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + + serviceLister corelisters.ServiceLister + serviceSynced cache.InformerSynced + + virtualServiceLister istiolisters.VirtualServiceLister + virtualServiceSynced cache.InformerSynced + + destinationRuleLister istiolisters.DestinationRuleLister + destinationRuleSynced cache.InformerSynced + + strategyLister servicemeshlisters.StrategyLister + strategySynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + workerLoopPeriod time.Duration +} + +func NewVirtualServiceController(serviceInformer coreinformers.ServiceInformer, + virtualServiceInformer istioinformers.VirtualServiceInformer, + destinationRuleInformer istioinformers.DestinationRuleInformer, + strategyInformer servicemeshinformers.StrategyInformer, + client clientset.Interface, + virtualServiceClient istioclient.Interface) *VirtualServiceController { + + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(log.Info) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) + recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "virtualservice-controller"}) + + if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil { + metrics.RegisterMetricAndTrackRateLimiterUsage("virtualservice_controller", client.CoreV1().RESTClient().GetRateLimiter()) + } + + v := &VirtualServiceController{ + client: client, + virtualServiceClient: virtualServiceClient, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "virtualservice"), + workerLoopPeriod: time.Second, + } + + serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: v.enqueueService, + DeleteFunc: v.enqueueService, + UpdateFunc: func(old, cur interface{}) { + v.enqueueService(cur) + }, + }) + + v.serviceLister = serviceInformer.Lister() + v.serviceSynced = serviceInformer.Informer().HasSynced + + v.strategyLister = strategyInformer.Lister() + v.strategySynced = strategyInformer.Informer().HasSynced + + strategyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + DeleteFunc: v.deleteStrategy, + }) + + v.destinationRuleLister = destinationRuleInformer.Lister() + v.destinationRuleSynced = destinationRuleInformer.Informer().HasSynced + + destinationRuleInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: v.addDestinationRule, + }) + + v.virtualServiceLister = virtualServiceInformer.Lister() + v.virtualServiceSynced = virtualServiceInformer.Informer().HasSynced + + v.eventBroadcaster = broadcaster + v.eventRecorder = recorder + + return v + +} + +func (v *VirtualServiceController) Start(stopCh <-chan struct{}) error { + v.Run(5, stopCh) + return nil +} + +func (v *VirtualServiceController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer v.queue.ShutDown() + + log.Info("starting virtualservice controller") + defer log.Info("shutting down virtualservice controller") + + if !controller.WaitForCacheSync("virtualservice-controller", stopCh, v.serviceSynced, v.virtualServiceSynced, v.destinationRuleSynced, v.strategySynced) { + return + } + + for i := 0; i < workers; i++ { + go wait.Until(v.worker, v.workerLoopPeriod, stopCh) + } + + go func() { + defer utilruntime.HandleCrash() + }() + + <-stopCh +} + +func (v *VirtualServiceController) enqueueService(obj interface{}) { + key, err := controller.KeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err)) + return + } + + v.queue.Add(key) +} + +func (v *VirtualServiceController) worker() { + + for v.processNextWorkItem() { + } +} + +func (v *VirtualServiceController) processNextWorkItem() bool { + eKey, quit := v.queue.Get() + if quit { + return false + } + + defer v.queue.Done(eKey) + + err := v.syncService(eKey.(string)) + v.handleErr(err, eKey) + + return true +} + +func (v *VirtualServiceController) syncService(key string) error { + startTime := time.Now() + defer func() { + log.V(4).Info("Finished syncing service virtualservice. ", "service", key, "duration", time.Since(startTime)) + }() + + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + + service, err := v.serviceLister.Services(namespace).Get(name) + if err != nil { + // Delete the corresponding virtualservice, as the service has been deleted. + err = v.virtualServiceClient.NetworkingV1alpha3().VirtualServices(namespace).Delete(service.Name, nil) + if err != nil && !errors.IsNotFound(err) { + return err + } + + return nil + } + + if len(service.Labels) < len(util.ApplicationLabels) || !util.IsApplicationComponent(&service.ObjectMeta) || + len(service.Spec.Ports) == 0 { + // services don't have enough labels to create a virtualservice + // or they don't have necessary labels + // or they don't have any ports defined + return nil + } + + vs, err := v.virtualServiceLister.VirtualServices(namespace).Get(name) + if err == nil { + // there already is virtual service there, no need to create another one + return nil + } + + destinationRule, err := v.destinationRuleLister.DestinationRules(namespace).Get(name) + if err != nil { + if errors.IsNotFound(err) { + // there is no destinationrule for this service + // maybe corresponding workloads are not created yet + return nil + } + log.Error(err, "Couldn't get destinationrule for service.", "service", types.NamespacedName{Name: service.Name, Namespace: service.Namespace}.String()) + return err + } + + subsets := destinationRule.Spec.Subsets + if len(subsets) == 0 { + // destination rule with no subsets, not possibly + err = fmt.Errorf("find destinationrule with no subsets for service %s", name) + log.Error(err, "Find destinationrule with no subsets for service", "service", service.String()) + return err + } else { + vs = &v1alpha3.VirtualService{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: util.ExtractApplicationLabels(&service.ObjectMeta), + }, + Spec: v1alpha3.VirtualServiceSpec{ + Hosts: []string{name}, + }, + } + + // check if service has TCP protocol ports + for _, port := range service.Spec.Ports { + var route v1alpha3.DestinationWeight + if port.Protocol == v1.ProtocolTCP { + route = v1alpha3.DestinationWeight{ + Destination: v1alpha3.Destination{ + Host: name, + Subset: subsets[0].Name, + Port: v1alpha3.PortSelector{ + Number: uint32(port.Port), + }, + }, + Weight: 100, + } + + // a http port, add to HTTPRoute + if len(port.Name) > 0 && (port.Name == "http" || strings.HasPrefix(port.Name, "http-")) { + vs.Spec.Http = []v1alpha3.HTTPRoute{{Route: []v1alpha3.DestinationWeight{route}}} + break + } + + // everything else treated as TCPRoute + vs.Spec.Tcp = []v1alpha3.TCPRoute{{Route: []v1alpha3.DestinationWeight{route}}} + } + } + + if len(vs.Spec.Http) > 0 || len(vs.Spec.Tcp) > 0 { + _, err := v.virtualServiceClient.NetworkingV1alpha3().VirtualServices(namespace).Create(vs) + if err != nil { + + v.eventRecorder.Eventf(vs, v1.EventTypeWarning, "FailedToCreateVirtualService", "Failed to create virtualservice for service %v/%v: %v", service.Namespace, service.Name, err) + + log.Error(err, "create virtualservice for service failed.", "service", service) + return err + } + } else { + log.Info("service doesn't have a tcp port.") + return nil + } + + } + + return nil +} + +// When a destinationrule is added, figure out which service it will be used +// and enqueue it. obj must have *v1alpha3.DestinationRule type +func (v *VirtualServiceController) addDestinationRule(obj interface{}) { + dr := obj.(*v1alpha3.DestinationRule) + service, err := v.serviceLister.Services(dr.Namespace).Get(dr.Name) + if err != nil { + if errors.IsNotFound(err) { + log.V(0).Info("service not created yet", "key", dr.Name) + return + } + utilruntime.HandleError(fmt.Errorf("unable to get service with name %s/%s", dr.Namespace, dr.Name)) + return + } + + _, err = v.virtualServiceLister.VirtualServices(dr.Namespace).Get(dr.Name) + if err != nil { + if errors.IsNotFound(err) { + key, err := controller.KeyFunc(service) + if err != nil { + utilruntime.HandleError(fmt.Errorf("get service %s/%s key failed", service.Namespace, service.Name)) + return + } + + v.queue.Add(key) + } + } else { + // Already have a virtualservice created. + } + + return +} + +func (v *VirtualServiceController) deleteStrategy(obj interface{}) { + // nothing to do right now + +} + +func (v *VirtualServiceController) handleErr(err error, key interface{}) { + if err != nil { + v.queue.Forget(key) + return + } + + if v.queue.NumRequeues(key) < maxRetries { + log.V(2).Info("Error syncing virtualservice for service retrying.", "key", key, "error", err) + v.queue.AddRateLimited(key) + return + } + + log.V(0).Info("Dropping service %q out of the queue: %v", "key", key, "error", err) + v.queue.Forget(key) + utilruntime.HandleError(err) +} diff --git a/pkg/controller/virtualservice/virtualservice_controller_test.go b/pkg/controller/virtualservice/virtualservice_controller_test.go new file mode 100644 index 000000000..06c5c3f57 --- /dev/null +++ b/pkg/controller/virtualservice/virtualservice_controller_test.go @@ -0,0 +1 @@ +package virtualservice diff --git a/pkg/models/log/constants.go b/pkg/models/log/constants.go index a184b6f64..cfb8efcce 100644 --- a/pkg/models/log/constants.go +++ b/pkg/models/log/constants.go @@ -27,4 +27,4 @@ const ( QueryLevelWorkload QueryLevelPod QueryLevelContainer -) \ No newline at end of file +) diff --git a/pkg/models/log/logcollector.go b/pkg/models/log/logcollector.go index 0afe56c54..d0a30894f 100644 --- a/pkg/models/log/logcollector.go +++ b/pkg/models/log/logcollector.go @@ -306,4 +306,4 @@ func GetWorkspaceOfNamesapce(namespace string) string { } return workspace -} \ No newline at end of file +} diff --git a/pkg/models/log/types.go b/pkg/models/log/types.go index ca7911067..072b3b13e 100644 --- a/pkg/models/log/types.go +++ b/pkg/models/log/types.go @@ -61,4 +61,4 @@ type OutputDBBinding struct { Internal bool Enable bool `gorm:"not null"` Updatetime time.Time `gorm:"not null"` -} \ No newline at end of file +} diff --git a/pkg/models/servicemesh/application.go b/pkg/models/servicemesh/application.go new file mode 100644 index 000000000..1c4db0710 --- /dev/null +++ b/pkg/models/servicemesh/application.go @@ -0,0 +1,69 @@ +package servicemesh + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +const ( + AppLabel = "app" + VersionLabel = "version" + ApplicationNameLabel = "app.kubernetes.io/name" + ApplicationVersionLabel = "app.kubernetes.io/version" +) + +var ApplicationLabels = [...]string{ + ApplicationNameLabel, + ApplicationVersionLabel, + AppLabel, +} + +var TrimChars = [...]string{".", "_", "-"} + +// normalize version names +// strip [_.-] +func NormalizeVersionName(version string) string { + for _, char := range TrimChars { + version = strings.ReplaceAll(version, char, "") + } + return version +} + +func GetComponentName(meta *metav1.ObjectMeta) string { + if len(meta.Labels[AppLabel]) > 0 { + return meta.Labels[AppLabel] + } + return "" +} + +func GetComponentVersion(meta *metav1.ObjectMeta) string { + if len(meta.Labels[VersionLabel]) > 0 { + return meta.Labels[VersionLabel] + } + return "" +} + +func ExtractApplicationLabels(meta *metav1.ObjectMeta) map[string]string { + + labels := make(map[string]string, 0) + for _, label := range ApplicationLabels { + if len(meta.Labels[label]) == 0 { + return nil + } else { + labels[label] = meta.Labels[label] + } + } + + return labels +} + +func IsApplicationComponent(meta *metav1.ObjectMeta) bool { + + for _, label := range ApplicationLabels { + if len(meta.Labels[label]) == 0 { + return false + } + } + + return true +} diff --git a/pkg/simple/client/prometheus/prometheusclient.go b/pkg/simple/client/prometheus/prometheusclient.go index 0c3855a38..de931b2f3 100644 --- a/pkg/simple/client/prometheus/prometheusclient.go +++ b/pkg/simple/client/prometheus/prometheusclient.go @@ -18,6 +18,7 @@ package prometheus import ( + "flag" "io/ioutil" "net/http" "net/url" @@ -25,8 +26,6 @@ import ( "strings" "time" - "os" - "github.com/emicklei/go-restful" "github.com/golang/glog" ) @@ -46,10 +45,7 @@ var PrometheusAPIServer = "prometheus-k8s.kubesphere-monitoring-system.svc" var PrometheusAPIEndpoint string func init() { - if env := os.Getenv(PrometheusAPIServerEnv); env != "" { - PrometheusAPIServer = env - } - PrometheusAPIEndpoint = DefaultScheme + "://" + PrometheusAPIServer + ":" + DefaultPrometheusPort + PrometheusApiPath + flag.StringVar(&PrometheusAPIEndpoint, "prometheus-endpoint", "http://prometheus-k8s.kubesphere-monitoring-system.svc:9090/api/v1", "") } type MonitoringRequestParams struct { diff --git a/pkg/simple/controller/namespace/namespaces.go b/pkg/simple/controller/namespace/namespaces.go index 2c27cf6f9..4881fd6de 100644 --- a/pkg/simple/controller/namespace/namespaces.go +++ b/pkg/simple/controller/namespace/namespaces.go @@ -85,7 +85,7 @@ func NewNamespaceController( return controller } -func (c *NamespaceController) Start(stopCh <-chan struct{}) { +func (c *NamespaceController) Start(stopCh <-chan struct{}) error { go func() { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() @@ -109,6 +109,8 @@ func (c *NamespaceController) Start(stopCh <-chan struct{}) { <-stopCh glog.Info("shutting down workers") }() + + return nil } func (c *NamespaceController) runWorker() { diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 000000000..0eb9b72d8 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 000000000..6806c4c20 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,383 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 000000000..f26b6824b --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,682 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var SupportNegativeIndices bool = true + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +type operation map[string]*json.RawMessage + +// Patch is an ordered collection of operations. +type Patch []operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, fmt.Errorf("Unknown type") + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document") + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array") + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +func (o operation) kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) path() string { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) from() string { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +func (d *partialArray) set(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + sz := len(*d) + if idx+1 > sz { + sz = idx + 1 + } + + ary := make([]*lazyNode, sz) + + cur := *d + + copy(ary, cur) + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + ary[idx] = val + + *d = ary + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + ary := make([]*lazyNode, len(*d)+1) + + cur := *d + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(ary) + } + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(cur) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(cur) + } + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path) + } + + return con.add(key, op.value()) +} + +func (p Patch) remove(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path) + } + + return con.remove(key) +} + +func (p Patch) replace(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) + } + + return con.set(key, op.value()) +} + +func (p Patch) move(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + err = con.remove(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +func (p Patch) test(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + + if err != nil { + return err + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return fmt.Errorf("Testing value %s failed", path) + } else if op.value() == nil { + return fmt.Errorf("Testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return fmt.Errorf("Testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + for _, op := range p { + switch op.kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op) + default: + err = fmt.Errorf("Unexpected kind: %s", op.kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/kiali/kiali/graph/options/options.go b/vendor/github.com/kiali/kiali/graph/options/options.go index 05f7047f6..f24247d9b 100644 --- a/vendor/github.com/kiali/kiali/graph/options/options.go +++ b/vendor/github.com/kiali/kiali/graph/options/options.go @@ -3,14 +3,12 @@ package options import ( "fmt" - "net/http" + "github.com/emicklei/go-restful" "net/url" "strconv" "strings" "time" - "github.com/gorilla/mux" - "github.com/kiali/kiali/business" "github.com/kiali/kiali/graph" "github.com/kiali/kiali/graph/appender" @@ -64,17 +62,26 @@ type Options struct { VendorOptions } -func NewOptions(r *http.Request) Options { +func getParameters(key string, request *restful.Request) string { + value, ok := request.PathParameters()[key] + + if !ok { + return request.QueryParameter(key) + } + + return value +} + +func NewOptions(request *restful.Request) Options { // path variables (0 or more will be set) - vars := mux.Vars(r) - app := vars["app"] - namespace := vars["namespace"] - service := vars["service"] - version := vars["version"] - workload := vars["workload"] + app := getParameters("app", request) + namespace := getParameters("namespace", request) + service := getParameters("service", request) + version := getParameters("version", request) + workload := getParameters("workload", request) // query params - params := r.URL.Query() + params := request.Request.URL.Query() var duration time.Duration var includeIstio bool var injectServiceNodes bool diff --git a/vendor/github.com/kiali/kiali/handlers/apps.go b/vendor/github.com/kiali/kiali/handlers/apps.go index 7216e20c0..eec7c4a05 100644 --- a/vendor/github.com/kiali/kiali/handlers/apps.go +++ b/vendor/github.com/kiali/kiali/handlers/apps.go @@ -1,6 +1,7 @@ package handlers import ( + "github.com/emicklei/go-restful" "net/http" "github.com/gorilla/mux" @@ -61,31 +62,30 @@ func AppDetails(w http.ResponseWriter, r *http.Request) { } // AppMetrics is the API handler to fetch metrics to be displayed, related to an app-label grouping -func AppMetrics(w http.ResponseWriter, r *http.Request) { - getAppMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +func AppMetrics(request *restful.Request, response *restful.Response) { + getAppMetrics(request, response, defaultPromClientSupplier, defaultK8SClientSupplier) } // getAppMetrics (mock-friendly version) -func getAppMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { - vars := mux.Vars(r) - namespace := vars["namespace"] - app := vars["app"] +func getAppMetrics(request *restful.Request, response *restful.Response, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + namespace := request.PathParameters()["namespace"] + app := request.PathParameters()["app"] - prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + prom, _, namespaceInfo := initClientsForMetrics(response.ResponseWriter, promSupplier, k8sSupplier, namespace) if prom == nil { // any returned value nil means error & response already written return } params := prometheus.IstioMetricsQuery{Namespace: namespace, App: app} - err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + err := extractIstioMetricsQueryParams(request.Request, ¶ms, namespaceInfo) if err != nil { - RespondWithError(w, http.StatusBadRequest, err.Error()) + RespondWithError(response.ResponseWriter, http.StatusBadRequest, err.Error()) return } metrics := prom.GetMetrics(¶ms) - RespondWithJSON(w, http.StatusOK, metrics) + RespondWithJSON(response.ResponseWriter, http.StatusOK, metrics) } // CustomDashboard is the API handler to fetch runtime metrics to be displayed, related to a single app diff --git a/vendor/github.com/kiali/kiali/handlers/graph.go b/vendor/github.com/kiali/kiali/handlers/graph.go index 2245919ca..5214eec84 100644 --- a/vendor/github.com/kiali/kiali/handlers/graph.go +++ b/vendor/github.com/kiali/kiali/handlers/graph.go @@ -34,6 +34,7 @@ package handlers import ( "context" "fmt" + "github.com/emicklei/go-restful" "net/http" "runtime/debug" "time" @@ -51,26 +52,35 @@ import ( "github.com/kiali/kiali/prometheus/internalmetrics" ) -// GraphNamespaces is a REST http.HandlerFunc handling graph generation for 1 or more namespaces -func GraphNamespaces(w http.ResponseWriter, r *http.Request) { - defer handlePanic(w) +func GetNamespaceGraph(request * restful.Request, response *restful.Response) { + defer handlePanic(response.ResponseWriter) client, err := prometheus.NewClient() graph.CheckError(err) - graphNamespaces(w, r, client) + graphNamespaces(request, response, client) +} + +// GraphNamespaces is a REST http.HandlerFunc handling graph generation for 1 or more namespaces +func GraphNamespaces(request *restful.Request, response *restful.Response) { + defer handlePanic(response.ResponseWriter) + + client, err := prometheus.NewClient() + graph.CheckError(err) + + graphNamespaces(request, response, client) } // graphNamespaces provides a testing hook that can supply a mock client -func graphNamespaces(w http.ResponseWriter, r *http.Request, client *prometheus.Client) { - o := options.NewOptions(r) +func graphNamespaces(reqeust *restful.Request, response *restful.Response, client *prometheus.Client) { + o := options.NewOptions(reqeust) // time how long it takes to generate this graph promtimer := internalmetrics.GetGraphGenerationTimePrometheusTimer(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes) defer promtimer.ObserveDuration() trafficMap := buildNamespacesTrafficMap(o, client) - generateGraph(trafficMap, w, o) + generateGraph(trafficMap, response.ResponseWriter, o) // update metrics internalmetrics.SetGraphNodes(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes, len(trafficMap)) @@ -613,18 +623,18 @@ func addNode(trafficMap graph.TrafficMap, namespace, workload, app, version, ser // GraphNode is a REST http.HandlerFunc handling node-detail graph // config generation. -func GraphNode(w http.ResponseWriter, r *http.Request) { - defer handlePanic(w) +func GraphNode(request *restful.Request, response *restful.Response) { + defer handlePanic(response.ResponseWriter) client, err := prometheus.NewClient() graph.CheckError(err) - graphNode(w, r, client) + graphNode(request, response, client) } // graphNode provides a testing hook that can supply a mock client -func graphNode(w http.ResponseWriter, r *http.Request, client *prometheus.Client) { - o := options.NewOptions(r) +func graphNode(request *restful.Request, response *restful.Response, client *prometheus.Client) { + o := options.NewOptions(request) switch o.Vendor { case "cytoscape": default: @@ -664,7 +674,7 @@ func graphNode(w http.ResponseWriter, r *http.Request, client *prometheus.Client // the current decision is to not reduce the node graph to provide more detail. This may be // confusing to users, we'll see... - generateGraph(trafficMap, w, o) + generateGraph(trafficMap, response.ResponseWriter, o) // update metrics internalmetrics.SetGraphNodes(o.GetGraphKind(), o.GraphType, o.InjectServiceNodes, len(trafficMap)) diff --git a/vendor/github.com/kiali/kiali/handlers/namespaces.go b/vendor/github.com/kiali/kiali/handlers/namespaces.go index 5d8448f2a..d4d64a59a 100644 --- a/vendor/github.com/kiali/kiali/handlers/namespaces.go +++ b/vendor/github.com/kiali/kiali/handlers/namespaces.go @@ -1,10 +1,9 @@ package handlers import ( + "github.com/emicklei/go-restful" "net/http" - "github.com/gorilla/mux" - "github.com/kiali/kiali/business" "github.com/kiali/kiali/log" "github.com/kiali/kiali/prometheus" @@ -31,28 +30,27 @@ func NamespaceList(w http.ResponseWriter, r *http.Request) { // NamespaceMetrics is the API handler to fetch metrics to be displayed, related to all // services in the namespace -func NamespaceMetrics(w http.ResponseWriter, r *http.Request) { - getNamespaceMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +func NamespaceMetrics(request *restful.Request, response *restful.Response) { + getNamespaceMetrics(request, response, defaultPromClientSupplier, defaultK8SClientSupplier) } // getServiceMetrics (mock-friendly version) -func getNamespaceMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { - vars := mux.Vars(r) - namespace := vars["namespace"] +func getNamespaceMetrics(request *restful.Request, response *restful.Response, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + namespace := request.PathParameters()["namespace"] - prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + prom, _, namespaceInfo := initClientsForMetrics(response.ResponseWriter, promSupplier, k8sSupplier, namespace) if prom == nil { // any returned value nil means error & response already written return } params := prometheus.IstioMetricsQuery{Namespace: namespace} - err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + err := extractIstioMetricsQueryParams(request.Request, ¶ms, namespaceInfo) if err != nil { - RespondWithError(w, http.StatusBadRequest, err.Error()) + RespondWithError(response.ResponseWriter, http.StatusBadRequest, err.Error()) return } metrics := prom.GetMetrics(¶ms) - RespondWithJSON(w, http.StatusOK, metrics) + RespondWithJSON(response.ResponseWriter, http.StatusOK, metrics) } diff --git a/vendor/github.com/kiali/kiali/handlers/services.go b/vendor/github.com/kiali/kiali/handlers/services.go index eeb0aae27..e8cef6f14 100644 --- a/vendor/github.com/kiali/kiali/handlers/services.go +++ b/vendor/github.com/kiali/kiali/handlers/services.go @@ -1,6 +1,7 @@ package handlers import ( + "github.com/emicklei/go-restful" "net/http" "sync" @@ -36,31 +37,30 @@ func ServiceList(w http.ResponseWriter, r *http.Request) { } // ServiceMetrics is the API handler to fetch metrics to be displayed, related to a single service -func ServiceMetrics(w http.ResponseWriter, r *http.Request) { - getServiceMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +func ServiceMetrics(request *restful.Request, response *restful.Response) { + getServiceMetrics(request, response, defaultPromClientSupplier, defaultK8SClientSupplier) } // getServiceMetrics (mock-friendly version) -func getServiceMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { - vars := mux.Vars(r) - namespace := vars["namespace"] - service := vars["service"] +func getServiceMetrics(request *restful.Request, response *restful.Response, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + namespace := request.PathParameters()["namespace"] + service := request.PathParameters()["service"] - prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + prom, _, namespaceInfo := initClientsForMetrics(response.ResponseWriter, promSupplier, k8sSupplier, namespace) if prom == nil { // any returned value nil means error & response already written return } params := prometheus.IstioMetricsQuery{Namespace: namespace, Service: service} - err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + err := extractIstioMetricsQueryParams(request.Request, ¶ms, namespaceInfo) if err != nil { - RespondWithError(w, http.StatusBadRequest, err.Error()) + RespondWithError(response.ResponseWriter, http.StatusBadRequest, err.Error()) return } metrics := prom.GetMetrics(¶ms) - RespondWithJSON(w, http.StatusOK, metrics) + RespondWithJSON(response.ResponseWriter, http.StatusOK, metrics) } // ServiceDetails is the API handler to fetch full details of an specific service diff --git a/vendor/github.com/kiali/kiali/handlers/workloads.go b/vendor/github.com/kiali/kiali/handlers/workloads.go index 564dd5caa..95301f4ee 100644 --- a/vendor/github.com/kiali/kiali/handlers/workloads.go +++ b/vendor/github.com/kiali/kiali/handlers/workloads.go @@ -1,6 +1,7 @@ package handlers import ( + "github.com/emicklei/go-restful" "net/http" "github.com/gorilla/mux" @@ -60,31 +61,30 @@ func WorkloadDetails(w http.ResponseWriter, r *http.Request) { } // WorkloadMetrics is the API handler to fetch metrics to be displayed, related to a single workload -func WorkloadMetrics(w http.ResponseWriter, r *http.Request) { - getWorkloadMetrics(w, r, defaultPromClientSupplier, defaultK8SClientSupplier) +func WorkloadMetrics(request *restful.Request, response *restful.Response) { + getWorkloadMetrics(request, response, defaultPromClientSupplier, defaultK8SClientSupplier) } // getWorkloadMetrics (mock-friendly version) -func getWorkloadMetrics(w http.ResponseWriter, r *http.Request, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { - vars := mux.Vars(r) - namespace := vars["namespace"] - workload := vars["workload"] +func getWorkloadMetrics(request *restful.Request, response *restful.Response, promSupplier promClientSupplier, k8sSupplier k8sClientSupplier) { + namespace := request.PathParameter("namespace") + workload := request.PathParameter("workload") - prom, _, namespaceInfo := initClientsForMetrics(w, promSupplier, k8sSupplier, namespace) + prom, _, namespaceInfo := initClientsForMetrics(response.ResponseWriter, promSupplier, k8sSupplier, namespace) if prom == nil { // any returned value nil means error & response already written return } params := prometheus.IstioMetricsQuery{Namespace: namespace, Workload: workload} - err := extractIstioMetricsQueryParams(r, ¶ms, namespaceInfo) + err := extractIstioMetricsQueryParams(request.Request, ¶ms, namespaceInfo) if err != nil { - RespondWithError(w, http.StatusBadRequest, err.Error()) + RespondWithError(response.ResponseWriter, http.StatusBadRequest, err.Error()) return } metrics := prom.GetMetrics(¶ms) - RespondWithJSON(w, http.StatusOK, metrics) + RespondWithJSON(response, http.StatusOK, metrics) } // WorkloadDashboard is the API handler to fetch Istio dashboard, related to a single workload diff --git a/vendor/github.com/knative/pkg b/vendor/github.com/knative/pkg deleted file mode 160000 index f8007289b..000000000 --- a/vendor/github.com/knative/pkg +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f8007289b228598dfdf7e6de67ee5f5c391c9d34 diff --git a/vendor/github.com/knative/pkg/LICENSE b/vendor/github.com/knative/pkg/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/knative/pkg/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/register.go b/vendor/github.com/knative/pkg/apis/istio/authentication/register.go new file mode 100644 index 000000000..f54c7742d --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/register.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authentication + +const ( + GroupName = "authentication.istio.io" +) diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go new file mode 100644 index 000000000..07b17599c --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource +// +k8s:deepcopy-gen=package +// +groupName=authentication.istio.io +package v1alpha1 diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go new file mode 100644 index 000000000..11bea6756 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/policy_types.go @@ -0,0 +1,345 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/knative/pkg/apis/istio/common/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// VirtualService +type Policy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec PolicySpec `json:"spec"` +} + +// Policy defines what authentication methods can be accepted on workload(s), +// and if authenticated, which method/certificate will set the request principal +// (i.e request.auth.principal attribute). +// +// Authentication policy is composed of 2-part authentication: +// - peer: verify caller service credentials. This part will set source.user +// (peer identity). +// - origin: verify the origin credentials. This part will set request.auth.user +// (origin identity), as well as other attributes like request.auth.presenter, +// request.auth.audiences and raw claims. Note that the identity could be +// end-user, service account, device etc. +// +// Last but not least, the principal binding rule defines which identity (peer +// or origin) should be used as principal. By default, it uses peer. +// +// Examples: +// +// Policy to enable mTLS for all services in namespace frod +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_enable +// namespace: frod +// spec: +// peers: +// - mtls: +// +// Policy to disable mTLS for "productpage" service +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_disable +// namespace: frod +// spec: +// targets: +// - name: productpage +// +// Policy to require mTLS for peer authentication, and JWT for origin authenticationn +// for productpage:9000. Principal is set from origin identity. +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_enable +// namespace: frod +// spec: +// target: +// - name: productpage +// ports: +// - number: 9000 +// peers: +// - mtls: +// origins: +// - jwt: +// issuer: "https://securetoken.google.com" +// audiences: +// - "productpage" +// jwksUri: "https://www.googleapis.com/oauth2/v1/certs" +// jwt_headers: +// - "x-goog-iap-jwt-assertion" +// principaBinding: USE_ORIGIN +// +// Policy to require mTLS for peer authentication, and JWT for origin authenticationn +// for productpage:9000, but allow origin authentication failed. Principal is set +// from origin identity. +// Note: this example can be used for use cases when we want to allow request from +// certain peers, given it comes with an approperiate authorization poicy to check +// and reject request accoridingly. +// +// apiVersion: authentication.istio.io/v1alpha1 +// kind: Policy +// metadata: +// name: mTLS_enable +// namespace: frod +// spec: +// target: +// - name: productpage +// ports: +// - number: 9000 +// peers: +// - mtls: +// origins: +// - jwt: +// issuer: "https://securetoken.google.com" +// audiences: +// - "productpage" +// jwksUri: "https://www.googleapis.com/oauth2/v1/certs" +// jwt_headers: +// - "x-goog-iap-jwt-assertion" +// originIsOptional: true +// principalBinding: USE_ORIGIN +type PolicySpec struct { + // List rules to select destinations that the policy should be applied on. + // If empty, policy will be used on all destinations in the same namespace. + Targets []TargetSelector `json:"targets,omitempty"` + + // List of authentication methods that can be used for peer authentication. + // They will be evaluated in order; the first validate one will be used to + // set peer identity (source.user) and other peer attributes. If none of + // these methods pass, and peer_is_optional flag is false (see below), + // request will be rejected with authentication failed error (401). + // Leave the list empty if peer authentication is not required + Peers []PeerAuthenticationMethod `json:"peers,omitempty"` + + // Set this flag to true to accept request (for peer authentication perspective), + // even when none of the peer authentication methods defined above satisfied. + // Typically, this is used to delay the rejection decision to next layer (e.g + // authorization). + // This flag is ignored if no authentication defined for peer (peers field is empty). + PeerIsOptional bool `json:"peerIsOptional,omitempty"` + + // List of authentication methods that can be used for origin authentication. + // Similar to peers, these will be evaluated in order; the first validate one + // will be used to set origin identity and attributes (i.e request.auth.user, + // request.auth.issuer etc). If none of these methods pass, and origin_is_optional + // is false (see below), request will be rejected with authentication failed + // error (401). + // Leave the list empty if origin authentication is not required. + Origins []OriginAuthenticationMethod `json:"origins,omitempty"` + + // Set this flag to true to accept request (for origin authentication perspective), + // even when none of the origin authentication methods defined above satisfied. + // Typically, this is used to delay the rejection decision to next layer (e.g + // authorization). + // This flag is ignored if no authentication defined for origin (origins field is empty). + OriginIsOptional bool `json:"originIsOptional,omitempty"` + + // Define whether peer or origin identity should be use for principal. Default + // value is USE_PEER. + // If peer (or origin) identity is not available, either because of peer/origin + // authentication is not defined, or failed, principal will be left unset. + // In other words, binding rule does not affect the decision to accept or + // reject request. + PrincipalBinding PrincipalBinding `json:"principalBinding,omitempty"` +} + +// TargetSelector defines a matching rule to a service/destination. +type TargetSelector struct { + // REQUIRED. The name must be a short name from the service registry. The + // fully qualified domain name will be resolved in a platform specific manner. + Name string `json:"name"` + + // Specifies the ports on the destination. Leave empty to match all ports + // that are exposed. + Ports []PortSelector `json:"ports,omitempty"` +} + +// PortSelector specifies the name or number of a port to be used for +// matching targets for authenticationn policy. This is copied from +// networking API to avoid dependency. +type PortSelector struct { + // It is required to specify exactly one of the fields: + // Number or Name + + // Valid port number + Number uint32 `json:"number,omitempty"` + + // Port name + Name string `json:"name,omitempty"` +} + +// PeerAuthenticationMethod defines one particular type of authentication, e.g +// mutual TLS, JWT etc, (no authentication is one type by itself) that can +// be used for peer authentication. +// The type can be progammatically determine by checking the type of the +// "params" field. +type PeerAuthenticationMethod struct { + // It is required to specify exactly one of the fields: + // Mtls or Jwt + // Set if mTLS is used. + Mtls *MutualTls `json:"mtls,omitempty"` + + // Set if JWT is used. This option is not yet available. + Jwt *Jwt `json:"jwt,omitempty"` +} + +// Defines the acceptable connection TLS mode. +type Mode string + +const ( + // Client cert must be presented, connection is in TLS. + ModeStrict Mode = "STRICT" + + // Connection can be either plaintext or TLS, and client cert can be omitted. + ModePermissive Mode = "PERMISSIVE" +) + +// TLS authentication params. +type MutualTls struct { + + // WILL BE DEPRECATED, if set, will translates to `TLS_PERMISSIVE` mode. + // Set this flag to true to allow regular TLS (i.e without client x509 + // certificate). If request carries client certificate, identity will be + // extracted and used (set to peer identity). Otherwise, peer identity will + // be left unset. + // When the flag is false (default), request must have client certificate. + AllowTls bool `json:"allowTls,omitempty"` + + // Defines the mode of mTLS authentication. + Mode Mode `json:"mode,omitempty"` +} + +// JSON Web Token (JWT) token format for authentication as defined by +// https://tools.ietf.org/html/rfc7519. See [OAuth +// 2.0](https://tools.ietf.org/html/rfc6749) and [OIDC +// 1.0](http://openid.net/connect) for how this is used in the whole +// authentication flow. +// +// Example, +// +// issuer: https://example.com +// audiences: +// - bookstore_android.apps.googleusercontent.com +// bookstore_web.apps.googleusercontent.com +// jwksUri: https://example.com/.well-known/jwks.json +// +type Jwt struct { + // Identifies the issuer that issued the JWT. See + // [issuer](https://tools.ietf.org/html/rfc7519#section-4.1.1) + // Usually a URL or an email address. + // + // Example: https://securetoken.google.com + // Example: 1234567-compute@developer.gserviceaccount.com + Issuer string `json:"issuer,omitempty"` + + // The list of JWT + // [audiences](https://tools.ietf.org/html/rfc7519#section-4.1.3). + // that are allowed to access. A JWT containing any of these + // audiences will be accepted. + // + // The service name will be accepted if audiences is empty. + // + // Example: + // + // ```yaml + // audiences: + // - bookstore_android.apps.googleusercontent.com + // bookstore_web.apps.googleusercontent.com + // ``` + Audiences []string `json:"audiences,omitempty"` + + // URL of the provider's public key set to validate signature of the + // JWT. See [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata). + // + // Optional if the key set document can either (a) be retrieved from + // [OpenID + // Discovery](https://openid.net/specs/openid-connect-discovery-1_0.html) of + // the issuer or (b) inferred from the email domain of the issuer (e.g. a + // Google service account). + // + // Example: https://www.googleapis.com/oauth2/v1/certs + JwksUri string `json:"jwksUri,omitempty"` + + // Two fields below define where to extract the JWT from an HTTP request. + // + // If no explicit location is specified the following default + // locations are tried in order: + // + // 1) The Authorization header using the Bearer schema, + // e.g. Authorization: Bearer . (see + // [Authorization Request Header + // Field](https://tools.ietf.org/html/rfc6750#section-2.1)) + // + // 2) `access_token` query parameter (see + // [URI Query Parameter](https://tools.ietf.org/html/rfc6750#section-2.3)) + // JWT is sent in a request header. `header` represents the + // header name. + // + // For example, if `header=x-goog-iap-jwt-assertion`, the header + // format will be x-goog-iap-jwt-assertion: . + JwtHeaders []string `json:"jwtHeaders,omitempty"` + + // JWT is sent in a query parameter. `query` represents the + // query parameter name. + // + // For example, `query=jwt_token`. + JwtParams []string `json:"jwtParams,omitempty"` + + // URL paths that should be excluded from the JWT validation. If the request path is matched, + // the JWT validation will be skipped and the request will proceed regardless. + // This is useful to keep a couple of URLs public for external health checks. + // Example: "/health_check", "/status/cpu_usage". + ExcludedPaths []v1alpha1.StringMatch `json:"excludedPaths,omitempty"` +} + +// OriginAuthenticationMethod defines authentication method/params for origin +// authentication. Origin could be end-user, device, delegate service etc. +// Currently, only JWT is supported for origin authentication. +type OriginAuthenticationMethod struct { + // Jwt params for the method. + Jwt *Jwt `json:"jwt,omitempty"` +} + +// Associates authentication with request principal. +type PrincipalBinding string + +const ( + // Principal will be set to the identity from peer authentication. + PrincipalBindingUserPeer PrincipalBinding = "USE_PEER" + // Principal will be set to the identity from peer authentication. + PrincipalBindingUserOrigin PrincipalBinding = "USE_ORIGIN" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// PolicyLIst is a list of Policy resources +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []Policy `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go new file mode 100644 index 000000000..7809d1cd9 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/knative/pkg/apis/istio/authentication" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: authentication.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Policy{}, + &PolicyList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..1879de764 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/authentication/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,259 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + commonv1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Jwt) DeepCopyInto(out *Jwt) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.JwtHeaders != nil { + in, out := &in.JwtHeaders, &out.JwtHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.JwtParams != nil { + in, out := &in.JwtParams, &out.JwtParams + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedPaths != nil { + in, out := &in.ExcludedPaths, &out.ExcludedPaths + *out = make([]commonv1alpha1.StringMatch, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Jwt. +func (in *Jwt) DeepCopy() *Jwt { + if in == nil { + return nil + } + out := new(Jwt) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutualTls) DeepCopyInto(out *MutualTls) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutualTls. +func (in *MutualTls) DeepCopy() *MutualTls { + if in == nil { + return nil + } + out := new(MutualTls) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OriginAuthenticationMethod) DeepCopyInto(out *OriginAuthenticationMethod) { + *out = *in + if in.Jwt != nil { + in, out := &in.Jwt, &out.Jwt + *out = new(Jwt) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginAuthenticationMethod. +func (in *OriginAuthenticationMethod) DeepCopy() *OriginAuthenticationMethod { + if in == nil { + return nil + } + out := new(OriginAuthenticationMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerAuthenticationMethod) DeepCopyInto(out *PeerAuthenticationMethod) { + *out = *in + if in.Mtls != nil { + in, out := &in.Mtls, &out.Mtls + *out = new(MutualTls) + **out = **in + } + if in.Jwt != nil { + in, out := &in.Jwt, &out.Jwt + *out = new(Jwt) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerAuthenticationMethod. +func (in *PeerAuthenticationMethod) DeepCopy() *PeerAuthenticationMethod { + if in == nil { + return nil + } + out := new(PeerAuthenticationMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicySpec) DeepCopyInto(out *PolicySpec) { + *out = *in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]TargetSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]PeerAuthenticationMethod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Origins != nil { + in, out := &in.Origins, &out.Origins + *out = make([]OriginAuthenticationMethod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec. +func (in *PolicySpec) DeepCopy() *PolicySpec { + if in == nil { + return nil + } + out := new(PolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSelector) DeepCopyInto(out *PortSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSelector. +func (in *PortSelector) DeepCopy() *PortSelector { + if in == nil { + return nil + } + out := new(PortSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetSelector) DeepCopyInto(out *TargetSelector) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortSelector, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSelector. +func (in *TargetSelector) DeepCopy() *TargetSelector { + if in == nil { + return nil + } + out := new(TargetSelector) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go b/vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go new file mode 100644 index 000000000..c34c25053 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/common/v1alpha1/string.go @@ -0,0 +1,35 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// Describes how to match a given string in HTTP headers. Match is +// case-sensitive. +type StringMatch struct { + // Specified exactly one of the fields below. + + // exact string match + Exact string `json:"exact,omitempty"` + + // prefix-based match + Prefix string `json:"prefix,omitempty"` + + // suffix-based match. + Suffix string `json:"suffix,omitempty"` + + // ECMAscript style regex-based match + Regex string `json:"regex,omitempty"` +} diff --git a/vendor/github.com/knative/pkg/apis/istio/register.go b/vendor/github.com/knative/pkg/apis/istio/register.go new file mode 100644 index 000000000..647eb38a0 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/register.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package istio + +const ( + GroupName = "networking.istio.io" +) diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go new file mode 100644 index 000000000..d5b0d4050 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/destinationrule_types.go @@ -0,0 +1,547 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DestinationRule +type DestinationRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec DestinationRuleSpec `json:"spec"` +} + +// DestinationRule defines policies that apply to traffic intended for a +// service after routing has occurred. These rules specify configuration +// for load balancing, connection pool size from the sidecar, and outlier +// detection settings to detect and evict unhealthy hosts from the load +// balancing pool. For example, a simple load balancing policy for the +// ratings service would look as follows: +// +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// +// +// Version specific policies can be specified by defining a named +// subset and overriding the settings specified at the service level. The +// following rule uses a round robin load balancing policy for all traffic +// going to a subset named testversion that is composed of endpoints (e.g., +// pods) with labels (version:v3). +// +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +// +// Traffic policies can be customized to specific ports as well. The +// following rule uses the least connection load balancing policy for all +// traffic to port 80, while uses a round robin load balancing setting for +// traffic to the port 9080. +// +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings-port +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: # Apply to all ports +// portLevelSettings: +// - port: +// number: 80 +// loadBalancer: +// simple: LEAST_CONN +// - port: +// number: 9080 +// loadBalancer: +// simple: ROUND_ROBIN +// +type DestinationRuleSpec struct { + // REQUIRED. The name of a service from the service registry. Service + // names are looked up from the platform's service registry (e.g., + // Kubernetes services, Consul services, etc.) and from the hosts + // declared by [ServiceEntries](#ServiceEntry). Rules defined for + // services that do not exist in the service registry will be ignored. + // + // *Note for Kubernetes users*: When short names are used (e.g. "reviews" + // instead of "reviews.default.svc.cluster.local"), Istio will interpret + // the short name based on the namespace of the rule, not the service. A + // rule in the "default" namespace containing a host "reviews will be + // interpreted as "reviews.default.svc.cluster.local", irrespective of + // the actual namespace associated with the reviews service. _To avoid + // potential misconfigurations, it is recommended to always use fully + // qualified domain names over short names._ + // + // Note that the host field applies to both HTTP and TCP services. + Host string `json:"host"` + + // Traffic policies to apply (load balancing policy, connection pool + // sizes, outlier detection). + TrafficPolicy *TrafficPolicy `json:"trafficPolicy,omitempty"` + + // One or more named sets that represent individual versions of a + // service. Traffic policies can be overridden at subset level. + Subsets []Subset `json:"subsets,omitempty"` +} + +// Traffic policies to apply for a specific destination, across all +// destination ports. See DestinationRule for examples. +type TrafficPolicy struct { + + // Settings controlling the load balancer algorithms. + LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"` + + // Settings controlling the volume of connections to an upstream service + ConnectionPool *ConnectionPoolSettings `json:"connectionPool,omitempty"` + + // Settings controlling eviction of unhealthy hosts from the load balancing pool + OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` + + // TLS related settings for connections to the upstream service. + Tls *TLSSettings `json:"tls,omitempty"` + + // Traffic policies specific to individual ports. Note that port level + // settings will override the destination-level settings. Traffic + // settings specified at the destination-level will not be inherited when + // overridden by port-level settings, i.e. default values will be applied + // to fields omitted in port-level traffic policies. + PortLevelSettings []PortTrafficPolicy `json:"portLevelSettings,omitempty"` +} + +// Traffic policies that apply to specific ports of the service +type PortTrafficPolicy struct { + // Specifies the port name or number of a port on the destination service + // on which this policy is being applied. + // + // Names must comply with DNS label syntax (rfc1035) and therefore cannot + // collide with numbers. If there are multiple ports on a service with + // the same protocol the names should be of the form -. + Port PortSelector `json:"port"` + + // Settings controlling the load balancer algorithms. + LoadBalancer *LoadBalancerSettings `json:"loadBalancer,omitempty"` + + // Settings controlling the volume of connections to an upstream service + ConnectionPool *ConnectionPoolSettings `json:"connectionPool,omitempty"` + + // Settings controlling eviction of unhealthy hosts from the load balancing pool + OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` + + // TLS related settings for connections to the upstream service. + Tls *TLSSettings `json:"tls,omitempty"` +} + +// A subset of endpoints of a service. Subsets can be used for scenarios +// like A/B testing, or routing to a specific version of a service. Refer +// to [VirtualService](#VirtualService) documentation for examples of using +// subsets in these scenarios. In addition, traffic policies defined at the +// service-level can be overridden at a subset-level. The following rule +// uses a round robin load balancing policy for all traffic going to a +// subset named testversion that is composed of endpoints (e.g., pods) with +// labels (version:v3). +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: LEAST_CONN +// subsets: +// - name: testversion +// labels: +// version: v3 +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// +// **Note:** Policies specified for subsets will not take effect until +// a route rule explicitly sends traffic to this subset. +type Subset struct { + // REQUIRED. Name of the subset. The service name and the subset name can + // be used for traffic splitting in a route rule. + Name string `json:"name"` + + // REQUIRED. Labels apply a filter over the endpoints of a service in the + // service registry. See route rules for examples of usage. + Labels map[string]string `json:"labels"` + + // Traffic policies that apply to this subset. Subsets inherit the + // traffic policies specified at the DestinationRule level. Settings + // specified at the subset level will override the corresponding settings + // specified at the DestinationRule level. + TrafficPolicy *TrafficPolicy `json:"trafficPolicy,omitempty"` +} + +// Load balancing policies to apply for a specific destination. See Envoy's +// load balancing +// [documentation](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/load_balancing.html) +// for more details. +// +// For example, the following rule uses a round robin load balancing policy +// for all traffic going to the ratings service. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// simple: ROUND_ROBIN +// +// The following example sets up sticky sessions for the ratings service +// hashing-based load balancer for the same ratings service using the +// the User cookie as the hash key. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-ratings +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// loadBalancer: +// consistentHash: +// httpCookie: +// name: user +// ttl: 0s +type LoadBalancerSettings struct { + // It is required to specify exactly one of the fields: + // Simple or ConsistentHash + Simple SimpleLB `json:"simple,omitempty"` + ConsistentHash *ConsistentHashLB `json:"consistentHash,omitempty"` +} + +// Standard load balancing algorithms that require no tuning. +type SimpleLB string + +const ( + // Round Robin policy. Default + SimpleLBRoundRobin SimpleLB = "ROUND_ROBIN" + + // The least request load balancer uses an O(1) algorithm which selects + // two random healthy hosts and picks the host which has fewer active + // requests. + SimpleLBLeastConn SimpleLB = "LEAST_CONN" + + // The random load balancer selects a random healthy host. The random + // load balancer generally performs better than round robin if no health + // checking policy is configured. + SimpleLBRandom SimpleLB = "RANDOM" + + // This option will forward the connection to the original IP address + // requested by the caller without doing any form of load + // balancing. This option must be used with care. It is meant for + // advanced use cases. Refer to Original Destination load balancer in + // Envoy for further details. + SimpleLBPassthrough SimpleLB = "PASSTHROUGH" +) + +// Consistent Hash-based load balancing can be used to provide soft +// session affinity based on HTTP headers, cookies or other +// properties. This load balancing policy is applicable only for HTTP +// connections. The affinity to a particular destination host will be +// lost when one or more hosts are added/removed from the destination +// service. +type ConsistentHashLB struct { + + // It is required to specify exactly one of the fields as hash key: + // HttpHeaderName, HttpCookie, or UseSourceIP. + // Hash based on a specific HTTP header. + HttpHeaderName string `json:"httpHeaderName,omitempty"` + + // Hash based on HTTP cookie. + HttpCookie *HTTPCookie `json:"httpCookie,omitempty"` + + // Hash based on the source IP address. + UseSourceIp bool `json:"useSourceIp,omitempty"` + + // The minimum number of virtual nodes to use for the hash + // ring. Defaults to 1024. Larger ring sizes result in more granular + // load distributions. If the number of hosts in the load balancing + // pool is larger than the ring size, each host will be assigned a + // single virtual node. + MinimumRingSize uint64 `json:"minimumRingSize,omitempty"` +} + +// Describes a HTTP cookie that will be used as the hash key for the +// Consistent Hash load balancer. If the cookie is not present, it will +// be generated. +type HTTPCookie struct { + // REQUIRED. Name of the cookie. + Name string `json:"name"` + + // Path to set for the cookie. + Path string `json:"path,omitempty"` + + // REQUIRED. Lifetime of the cookie. + Ttl string `json:"ttl"` +} + +// Connection pool settings for an upstream host. The settings apply to +// each individual host in the upstream service. See Envoy's [circuit +// breaker](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/circuit_breaking) +// for more details. Connection pool settings can be applied at the TCP +// level as well as at HTTP level. +// +// For example, the following rule sets a limit of 100 connections to redis +// service called myredissrv with a connect timeout of 30ms +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: bookinfo-redis +// spec: +// host: myredissrv.prod.svc.cluster.local +// trafficPolicy: +// connectionPool: +// tcp: +// maxConnections: 100 +// connectTimeout: 30ms +type ConnectionPoolSettings struct { + + // Settings common to both HTTP and TCP upstream connections. + Tcp *TCPSettings `json:"tcp,omitempty"` + + // HTTP connection pool settings. + Http *HTTPSettings `json:"http,omitempty"` +} + +// Settings common to both HTTP and TCP upstream connections. +type TCPSettings struct { + // Maximum number of HTTP1 /TCP connections to a destination host. + MaxConnections int32 `json:"maxConnections,omitempty"` + + // TCP connection timeout. + ConnectTimeout string `json:"connectTimeout,omitempty"` +} + +// Settings applicable to HTTP1.1/HTTP2/GRPC connections. +type HTTPSettings struct { + // Maximum number of pending HTTP requests to a destination. Default 1024. + Http1MaxPendingRequests int32 `json:"http1MaxPendingRequests,omitempty"` + + // Maximum number of requests to a backend. Default 1024. + Http2MaxRequests int32 `json:"http2MaxRequests,omitempty"` + + // Maximum number of requests per connection to a backend. Setting this + // parameter to 1 disables keep alive. + MaxRequestsPerConnection int32 `json:"maxRequestsPerConnection,omitempty"` + + // Maximum number of retries that can be outstanding to all hosts in a + // cluster at a given time. Defaults to 3. + MaxRetries int32 `json:"maxRetries,omitempty"` +} + +// A Circuit breaker implementation that tracks the status of each +// individual host in the upstream service. Applicable to both HTTP and +// TCP services. For HTTP services, hosts that continually return 5xx +// errors for API calls are ejected from the pool for a pre-defined period +// of time. For TCP services, connection timeouts or connection +// failures to a given host counts as an error when measuring the +// consecutive errors metric. See Envoy's [outlier +// detection](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/outlier) +// for more details. +// +// The following rule sets a connection pool size of 100 connections and +// 1000 concurrent HTTP2 requests, with no more than 10 req/connection to +// "reviews" service. In addition, it configures upstream hosts to be +// scanned every 5 mins, such that any host that fails 7 consecutive times +// with 5XX error code will be ejected for 15 minutes. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-cb-policy +// spec: +// host: reviews.prod.svc.cluster.local +// trafficPolicy: +// connectionPool: +// tcp: +// maxConnections: 100 +// http: +// http2MaxRequests: 1000 +// maxRequestsPerConnection: 10 +// outlierDetection: +// consecutiveErrors: 7 +// interval: 5m +// baseEjectionTime: 15m +type OutlierDetection struct { + // Number of errors before a host is ejected from the connection + // pool. Defaults to 5. When the upstream host is accessed over HTTP, a + // 5xx return code qualifies as an error. When the upstream host is + // accessed over an opaque TCP connection, connect timeouts and + // connection error/failure events qualify as an error. + ConsecutiveErrors int32 `json:"consecutiveErrors,omitempty"` + + // Time interval between ejection sweep analysis. format: + // 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s. + Interval string `json:"interval,omitempty"` + + // Minimum ejection duration. A host will remain ejected for a period + // equal to the product of minimum ejection duration and the number of + // times the host has been ejected. This technique allows the system to + // automatically increase the ejection period for unhealthy upstream + // servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 30s. + BaseEjectionTime string `json:"baseEjectionTime,omitempty"` + + // Maximum % of hosts in the load balancing pool for the upstream + // service that can be ejected. Defaults to 10%. + MaxEjectionPercent int32 `json:"maxEjectionPercent,omitempty"` +} + +// SSL/TLS related settings for upstream connections. See Envoy's [TLS +// context](https://www.envoyproxy.io/docs/envoy/latest/api-v1/cluster_manager/cluster_ssl.html#config-cluster-manager-cluster-ssl) +// for more details. These settings are common to both HTTP and TCP upstreams. +// +// For example, the following rule configures a client to use mutual TLS +// for connections to upstream database cluster. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: db-mtls +// spec: +// host: mydbserver.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: MUTUAL +// clientCertificate: /etc/certs/myclientcert.pem +// privateKey: /etc/certs/client_private_key.pem +// caCertificates: /etc/certs/rootcacerts.pem +// +// The following rule configures a client to use TLS when talking to a +// foreign service whose domain matches *.foo.com. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: tls-foo +// spec: +// host: "*.foo.com" +// trafficPolicy: +// tls: +// mode: SIMPLE +// +// The following rule configures a client to use Istio mutual TLS when talking +// to rating services. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: ratings-istio-mtls +// spec: +// host: ratings.prod.svc.cluster.local +// trafficPolicy: +// tls: +// mode: ISTIO_MUTUAL +type TLSSettings struct { + + // REQUIRED: Indicates whether connections to this port should be secured + // using TLS. The value of this field determines how TLS is enforced. + Mode TLSmode `json:"mode"` + + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client-side TLS certificate to use. + // Should be empty if mode is `ISTIO_MUTUAL`. + ClientCertificate string `json:"clientCertificate,omitempty"` + + // REQUIRED if mode is `MUTUAL`. The path to the file holding the + // client's private key. + // Should be empty if mode is `ISTIO_MUTUAL`. + PrivateKey string `json:"privateKey,omitempty"` + + // OPTIONAL: The path to the file containing certificate authority + // certificates to use in verifying a presented server certificate. If + // omitted, the proxy will not verify the server's certificate. + // Should be empty if mode is `ISTIO_MUTUAL`. + CaCertificates string `json:"caCertificates,omitempty"` + + // A list of alternate names to verify the subject identity in the + // certificate. If specified, the proxy will verify that the server + // certificate's subject alt name matches one of the specified values. + // Should be empty if mode is `ISTIO_MUTUAL`. + SubjectAltNames []string `json:"subjectAltNames,omitempty"` + + // SNI string to present to the server during TLS handshake. + // Should be empty if mode is `ISTIO_MUTUAL`. + Sni string `json:"sni,omitempty"` +} + +// TLS connection mode +type TLSmode string + +const ( + // Do not setup a TLS connection to the upstream endpoint. + TLSmodeDisable TLSmode = "DISABLE" + + // Originate a TLS connection to the upstream endpoint. + TLSmodeSimple TLSmode = "SIMPLE" + + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + TLSmodeMutual TLSmode = "MUTUAL" + + // Secure connections to the upstream using mutual TLS by presenting + // client certificates for authentication. + // Compared to Mutual mode, this mode uses certificates generated + // automatically by Istio for mTLS authentication. When this mode is + // used, all other fields in `TLSSettings` should be empty. + TLSmodeIstioMutual TLSmode = "ISTIO_MUTUAL" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DestinationRuleList is a list of DestinationRule resources +type DestinationRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []DestinationRule `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/doc.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/doc.go new file mode 100644 index 000000000..47ec83dae --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Api versions allow the api contract for a resource to be changed while keeping +// backward compatibility by support multiple concurrent versions +// of the same resource + +// +k8s:deepcopy-gen=package +// +groupName=networking.istio.io +package v1alpha3 diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go new file mode 100644 index 000000000..0822a5e3b --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/gateway_types.go @@ -0,0 +1,318 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Gateway describes a load balancer operating at the edge of the mesh +// receiving incoming or outgoing HTTP/TCP connections. The specification +// describes a set of ports that should be exposed, the type of protocol to +// use, SNI configuration for the load balancer, etc. +// +// For example, the following gateway spec sets up a proxy to act as a load +// balancer exposing port 80 and 9080 (http), 443 (https), and port 2379 +// (TCP) for ingress. The gateway will be applied to the proxy running on +// a pod with labels "app: my-gateway-controller". While Istio will configure the +// proxy to listen on these ports, it is the responsibility of the user to +// ensure that external traffic to these ports are allowed into the mesh. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-gateway +// spec: +// selector: +// app: my-gatweway-controller +// servers: +// - port: +// number: 80 +// name: http +// protocol: HTTP +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// httpsRedirect: true # sends 302 redirect for http requests +// - port: +// number: 443 +// name: https +// protocol: HTTPS +// hosts: +// - uk.bookinfo.com +// - eu.bookinfo.com +// tls: +// mode: SIMPLE #enables HTTPS on this port +// serverCertificate: /etc/certs/servercert.pem +// privateKey: /etc/certs/privatekey.pem +// - port: +// number: 9080 +// name: http-wildcard +// protocol: HTTP +// # no hosts implies wildcard match +// - port: +// number: 2379 #to expose internal service via external port 2379 +// name: mongo +// protocol: MONGO +// +// The gateway specification above describes the L4-L6 properties of a load +// balancer. A VirtualService can then be bound to a gateway to control +// the forwarding of traffic arriving at a particular host or gateway port. +// +// For example, the following VirtualService splits traffic for +// https://uk.bookinfo.com/reviews, https://eu.bookinfo.com/reviews, +// http://uk.bookinfo.com:9080/reviews, http://eu.bookinfo.com:9080/reviews +// into two versions (prod and qa) of an internal reviews service on port +// 9080. In addition, requests containing the cookie user: dev-123 will be +// sent to special port 7777 in the qa version. The same rule is also +// applicable inside the mesh for requests to the reviews.prod +// service. This rule is applicable across ports 443, 9080. Note that +// http://uk.bookinfo.com gets redirected to https://uk.bookinfo.com +// (i.e. 80 redirects to 443). +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-rule +// spec: +// hosts: +// - reviews.prod +// - uk.bookinfo.com +// - eu.bookinfo.com +// gateways: +// - my-gateway +// - mesh # applies to all the sidecars in the mesh +// http: +// - match: +// - headers: +// cookie: +// user: dev-123 +// route: +// - destination: +// port: +// number: 7777 +// name: reviews.qa +// - match: +// uri: +// prefix: /reviews/ +// route: +// - destination: +// port: +// number: 9080 # can be omitted if its the only port for reviews +// name: reviews.prod +// weight: 80 +// - destination: +// name: reviews.qa +// weight: 20 +// +// The following VirtualService forwards traffic arriving at (external) port +// 2379 from 172.17.16.0/24 subnet to internal Mongo server on port 5555. This +// rule is not applicable internally in the mesh as the gateway list omits +// the reserved name "mesh". +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// spec: +// hosts: +// - mongosvr #name of Mongo service +// gateways: +// - my-gateway +// tcp: +// - match: +// - port: +// number: 2379 +// sourceSubnet: "172.17.16.0/24" +// route: +// - destination: +// name: mongo.prod +// +type Gateway struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GatewaySpec `json:"spec"` +} + +type GatewaySpec struct { + // REQUIRED: A list of server specifications. + Servers []Server `json:"servers"` + + // One or more labels that indicate a specific set of pods/VMs + // on which this gateway configuration should be applied. + // If no selectors are provided, the gateway will be implemented by + // the default istio-ingress controller. + Selector map[string]string `json:"selector,omitempty"` +} + +// Server describes the properties of the proxy on a given load balancer port. +// For example, +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-ingress +// spec: +// selector: +// app: my-ingress-controller +// servers: +// - port: +// number: 80 +// name: http2 +// protocol: HTTP2 +// +// Another example +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-tcp-ingress +// spec: +// selector: +// app: my-tcp-ingress-controller +// servers: +// - port: +// number: 27018 +// name: mongo +// protocol: MONGO +// +// The following is an example of TLS configuration for port 443 +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: Gateway +// metadata: +// name: my-tls-ingress +// spec: +// selector: +// app: my-tls-ingress-controller +// servers: +// - port: +// number: 443 +// name: https +// protocol: HTTPS +// tls: +// mode: SIMPLE +// serverCertificate: /etc/certs/server.pem +// privateKey: /etc/certs/privatekey.pem +// +type Server struct { + // REQUIRED: The Port on which the proxy should listen for incoming + // connections + Port Port `json:"port"` + + // A list of hosts exposed by this gateway. While + // typically applicable to HTTP services, it can also be used for TCP + // services using TLS with SNI. Standard DNS wildcard prefix syntax + // is permitted. + // + // A VirtualService that is bound to a gateway must having a matching host + // in its default destination. Specifically one of the VirtualService + // destination hosts is a strict suffix of a gateway host or + // a gateway host is a suffix of one of the VirtualService hosts. + Hosts []string `json:"hosts,omitempty"` + + // Set of TLS related options that govern the server's behavior. Use + // these options to control if all http requests should be redirected to + // https, and the TLS modes to use. + TLS *TLSOptions `json:"tls,omitempty"` +} + +type TLSOptions struct { + // If set to true, the load balancer will send a 302 redirect for all + // http connections, asking the clients to use HTTPS. + HttpsRedirect bool `json:"httpsRedirect"` + + // Optional: Indicates whether connections to this port should be + // secured using TLS. The value of this field determines how TLS is + // enforced. + Mode TLSMode `json:"mode,omitempty"` + + // REQUIRED if mode is "SIMPLE" or "MUTUAL". The path to the file + // holding the server-side TLS certificate to use. + ServerCertificate string `json:"serverCertificate"` + + // REQUIRED if mode is "SIMPLE" or "MUTUAL". The path to the file + // holding the server's private key. + PrivateKey string `json:"privateKey"` + + // REQUIRED if mode is "MUTUAL". The path to a file containing + // certificate authority certificates to use in verifying a presented + // client side certificate. + CaCertificates string `json:"caCertificates"` + + // A list of alternate names to verify the subject identity in the + // certificate presented by the client. + SubjectAltNames []string `json:"subjectAltNames"` +} + +// TLS modes enforced by the proxy +type TLSMode string + +const ( + // If set to "PASSTHROUGH", the proxy will forward the connection + // to the upstream server selected based on the SNI string presented + // by the client. + TLSModePassThrough TLSMode = "PASSTHROUGH" + + // If set to "SIMPLE", the proxy will secure connections with + // standard TLS semantics. + TLSModeSimple TLSMode = "SIMPLE" + + // If set to "MUTUAL", the proxy will secure connections to the + // upstream using mutual TLS by presenting client certificates for + // authentication. + TLSModeMutual TLSMode = "MUTUAL" +) + +// Port describes the properties of a specific port of a service. +type Port struct { + // REQUIRED: A valid non-negative integer port number. + Number int `json:"number"` + + // REQUIRED: The protocol exposed on the port. + // MUST BE one of HTTP|HTTPS|GRPC|HTTP2|MONGO|TCP. + Protocol PortProtocol `json:"protocol"` + + // Label assigned to the port. + Name string `json:"name,omitempty"` +} + +type PortProtocol string + +const ( + ProtocolHTTP PortProtocol = "HTTP" + ProtocolHTTPS PortProtocol = "HTTPS" + ProtocolGRPC PortProtocol = "GRPC" + ProtocolHTTP2 PortProtocol = "HTTP2" + ProtocolMongo PortProtocol = "Mongo" + ProtocolTCP PortProtocol = "TCP" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GatewayList is a list of Gateway resources +type GatewayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Gateway `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/register.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/register.go new file mode 100644 index 000000000..c2089e5c2 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "github.com/knative/pkg/apis/istio" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: istio.GroupName, Version: "v1alpha3"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VirtualService{}, + &Gateway{}, + &DestinationRule{}, + &VirtualServiceList{}, + &GatewayList{}, + &DestinationRuleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go new file mode 100644 index 000000000..b9dcee7df --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/virtualservice_types.go @@ -0,0 +1,852 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha3 + +import ( + "github.com/knative/pkg/apis/istio/common/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VirtualService +type VirtualService struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec VirtualServiceSpec `json:"spec"` +} + +// A VirtualService defines a set of traffic routing rules to apply when a host is +// addressed. Each routing rule defines matching criteria for traffic of a specific +// protocol. If the traffic is matched, then it is sent to a named destination service +// (or subset/version of it) defined in the registry. +// +// The source of traffic can also be matched in a routing rule. This allows routing +// to be customized for specific client contexts. +// +// The following example routes all HTTP traffic by default to +// pods of the reviews service with label "version: v1". In addition, +// HTTP requests containing /wpcatalog/, /consumercatalog/ url prefixes will +// be rewritten to /newcatalog and sent to pods with label "version: v2". The +// rules will be applied at the gateway named "bookinfo" as well as at all +// the sidecars in the mesh (indicated by the reserved gateway name +// "mesh"). +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews +// gateways: # if omitted, defaults to "mesh" +// - bookinfo +// - mesh +// http: +// - match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews +// subset: v2 +// - route: +// - destination: +// host: reviews +// subset: v1 +// +// A subset/version of a route destination is identified with a reference +// to a named service subset which must be declared in a corresponding +// DestinationRule. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// +// A host name can be defined by only one VirtualService. A single +// VirtualService can be used to describe traffic properties for multiple +// HTTP and TCP ports. +type VirtualServiceSpec struct { + // REQUIRED. The destination address for traffic captured by this virtual + // service. Could be a DNS name with wildcard prefix or a CIDR + // prefix. Depending on the platform, short-names can also be used + // instead of a FQDN (i.e. has no dots in the name). In such a scenario, + // the FQDN of the host would be derived based on the underlying + // platform. + // + // For example on Kubernetes, when hosts contains a short name, Istio will + // interpret the short name based on the namespace of the rule. Thus, when a + // client namespace applies a rule in the "default" namespace containing a name + // "reviews, Istio will setup routes to the "reviews.default.svc.cluster.local" + // service. However, if a different name such as "reviews.sales.svc.cluster.local" + // is used, it would be treated as a FQDN during virtual host matching. + // In Consul, a plain service name would be resolved to the FQDN + // "reviews.service.consul". + // + // Note that the hosts field applies to both HTTP and TCP + // services. Service inside the mesh, i.e., those found in the service + // registry, must always be referred to using their alphanumeric + // names. IP addresses or CIDR prefixes are allowed only for services + // defined via the Gateway. + Hosts []string `json:"hosts"` + + // The names of gateways and sidecars that should apply these routes. A + // single VirtualService is used for sidecars inside the mesh as well + // as for one or more gateways. The selection condition imposed by this field + // can be overridden using the source field in the match conditions of HTTP/TCP + // routes. The reserved word "mesh" is used to imply all the sidecars in + // the mesh. When this field is omitted, the default gateway ("mesh") + // will be used, which would apply the rule to all sidecars in the + // mesh. If a list of gateway names is provided, the rules will apply + // only to the gateways. To apply the rules to both gateways and sidecars, + // specify "mesh" as one of the gateway names. + Gateways []string `json:"gateways,omitempty"` + + // An ordered list of route rules for HTTP traffic. + // The first rule matching an incoming request is used. + Http []HTTPRoute `json:"http,omitempty"` + + // An ordered list of route rules for TCP traffic. + // The first rule matching an incoming request is used. + Tcp []TCPRoute `json:"tcp,omitempty"` + + Tls []TLSRoute `json:"tls,omitempty"` +} + +// Describes match conditions and actions for routing HTTP/1.1, HTTP2, and +// gRPC traffic. See VirtualService for usage examples. +type HTTPRoute struct { + // Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []HTTPMatchRequest `json:"match,omitempty"` + + // A http rule can either redirect or forward (default) traffic. The + // forwarding target can be one of several versions of a service (see + // glossary in beginning of document). Weights associated with the + // service version determine the proportion of traffic it receives. + Route []DestinationWeight `json:"route,omitempty"` + + // A http rule can either redirect or forward (default) traffic. If + // traffic passthrough option is specified in the rule, + // route/redirect will be ignored. The redirect primitive can be used to + // send a HTTP 302 redirect to a different URI or Authority. + Redirect *HTTPRedirect `json:"redirect,omitempty"` + + // Rewrite HTTP URIs and Authority headers. Rewrite cannot be used with + // Redirect primitive. Rewrite will be performed before forwarding. + Rewrite *HTTPRewrite `json:"rewrite,omitempty"` + + // Indicates that a HTTP/1.1 client connection to this particular route + // should be allowed (and expected) to upgrade to a WebSocket connection. + // The default is false. Istio's reference sidecar implementation (Envoy) + // expects the first request to this route to contain the WebSocket + // upgrade headers. Otherwise, the request will be rejected. Note that + // Websocket allows secondary protocol negotiation which may then be + // subject to further routing rules based on the protocol selected. + WebsocketUpgrade bool `json:"websocketUpgrade,omitempty"` + + // Timeout for HTTP requests. + Timeout string `json:"timeout,omitempty"` + + // Retry policy for HTTP requests. + Retries *HTTPRetry `json:"retries,omitempty"` + + // Fault injection policy to apply on HTTP traffic. + Fault *HTTPFaultInjection `json:"fault,omitempty"` + + // Mirror HTTP traffic to a another destination in addition to forwarding + // the requests to the intended destination. Mirrored traffic is on a + // best effort basis where the sidecar/gateway will not wait for the + // mirrored cluster to respond before returning the response from the + // original destination. Statistics will be generated for the mirrored + // destination. + Mirror *Destination `json:"mirror,omitempty"` + + // Additional HTTP headers to add before forwarding a request to the + // destination service. + AppendHeaders map[string]string `json:"appendHeaders,omitempty"` + + // Http headers to remove before returning the response to the caller + RemoveResponseHeaders map[string]string `json:"removeResponseHeaders,omitempty"` + + // Cross-Origin Resource Sharing policy + CorsPolicy *CorsPolicy `json:"corsPolicy,omitempty"` +} + +// HttpMatchRequest specifies a set of criterion to be met in order for the +// rule to be applied to the HTTP request. For example, the following +// restricts the rule to match only requests where the URL path +// starts with /ratings/v2/ and the request contains a "cookie" with value +// "user=jason". +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - match: +// - headers: +// cookie: +// regex: "^(.*?;)?(user=jason)(;.*)?" +// uri: +// prefix: "/ratings/v2/" +// route: +// - destination: +// host: ratings +// +// HTTPMatchRequest CANNOT be empty. +type HTTPMatchRequest struct { + // URI to match + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Uri *v1alpha1.StringMatch `json:"uri,omitempty"` + + // URI Scheme + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Scheme *v1alpha1.StringMatch `json:"scheme,omitempty"` + + // HTTP Method + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Method *v1alpha1.StringMatch `json:"method,omitempty"` + + // HTTP Authority + // values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + Authority *v1alpha1.StringMatch `json:"authority,omitempty"` + + // The header keys must be lowercase and use hyphen as the separator, + // e.g. _x-request-id_. + // + // Header values are case-sensitive and formatted as follows: + // + // - `exact: "value"` for exact string match + // + // - `prefix: "value"` for prefix-based match + // + // - `regex: "value"` for ECMAscript style regex-based match + // + // **Note:** The keys `uri`, `scheme`, `method`, and `authority` will be ignored. + Headers map[string]v1alpha1.StringMatch `json:"headers,omitempty"` + + // Specifies the ports on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port uint32 `json:"port,omitempty"` + + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `json:"sourceLabels,omitempty"` + + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway match is + // independent of sourceLabels. + Gateways []string `json:"gateways,omitempty"` +} + +type DestinationWeight struct { + // REQUIRED. Destination uniquely identifies the instances of a service + // to which the request/connection should be forwarded to. + Destination Destination `json:"destination"` + + // REQUIRED. The proportion of traffic to be forwarded to the service + // version. (0-100). Sum of weights across destinations SHOULD BE == 100. + // If there is only destination in a rule, the weight value is assumed to + // be 100. + Weight int `json:"weight"` +} + +// Destination indicates the network addressable service to which the +// request/connection will be sent after processing a routing rule. The +// destination.name should unambiguously refer to a service in the service +// registry. It can be a short name or a fully qualified domain name from +// the service registry, a resolvable DNS name, an IP address or a service +// name from the service registry and a subset name. The order of inference +// is as follows: +// +// 1. Service registry lookup. The entire name is looked up in the service +// registry. If the lookup succeeds, the search terminates. The requests +// will be routed to any instance of the service in the mesh. When the +// service name consists of a single word, the FQDN will be constructed in +// a platform specific manner. For example, in Kubernetes, the namespace +// associated with the routing rule will be used to identify the service as +// .. However, if the service name contains +// multiple words separated by a dot (e.g., reviews.prod), the name in its +// entirety would be looked up in the service registry. +// +// 2. Runtime DNS lookup by the proxy. If step 1 fails, and the name is not +// an IP address, it will be considered as a DNS name that is not in the +// service registry (e.g., wikipedia.org). The sidecar/gateway will resolve +// the DNS and load balance requests appropriately. See Envoy's strict_dns +// for details. +// +// The following example routes all traffic by default to pods of the +// reviews service with label "version: v1" (i.e., subset v1), and some +// to subset v2, in a kubernetes environment. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews # namespace is same as the client/caller's namespace +// http: +// - match: +// - uri: +// prefix: "/wpcatalog" +// - uri: +// prefix: "/consumercatalog" +// rewrite: +// uri: "/newcatalog" +// route: +// - destination: +// host: reviews +// subset: v2 +// - route: +// - destination: +// host: reviews +// subset: v1 +// +// And the associated DestinationRule +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: DestinationRule +// metadata: +// name: reviews-destination +// spec: +// host: reviews +// subsets: +// - name: v1 +// labels: +// version: v1 +// - name: v2 +// labels: +// version: v2 +// +// The following VirtualService sets a timeout of 5s for all calls to +// productpage.prod service. Notice that there are no subsets defined in +// this rule. Istio will fetch all instances of productpage.prod service +// from the service registry and populate the sidecar's load balancing +// pool. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: my-productpage-rule +// spec: +// hosts: +// - productpage.prod # in kubernetes, this applies only to prod namespace +// http: +// - timeout: 5s +// route: +// - destination: +// host: productpage.prod +// +// The following sets a timeout of 5s for all calls to the external +// service wikipedia.org, as there is no internal service of that name. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: my-wiki-rule +// spec: +// hosts: +// - wikipedia.org +// http: +// - timeout: 5s +// route: +// - destination: +// host: wikipedia.org +// +type Destination struct { + // REQUIRED. The name of a service from the service registry. Service + // names are looked up from the platform's service registry (e.g., + // Kubernetes services, Consul services, etc.) and from the hosts + // declared by [ServiceEntry](#ServiceEntry). Traffic forwarded to + // destinations that are not found in either of the two, will be dropped. + // + // *Note for Kubernetes users*: When short names are used (e.g. "reviews" + // instead of "reviews.default.svc.cluster.local"), Istio will interpret + // the short name based on the namespace of the rule, not the service. A + // rule in the "default" namespace containing a host "reviews will be + // interpreted as "reviews.default.svc.cluster.local", irrespective of + // the actual namespace associated with the reviews service. _To avoid + // potential misconfigurations, it is recommended to always use fully + // qualified domain names over short names._ + Host string `json:"host"` + + // The name of a subset within the service. Applicable only to services + // within the mesh. The subset must be defined in a corresponding + // DestinationRule. + Subset string `json:"subset,omitempty"` + + // Specifies the port on the host that is being addressed. If a service + // exposes only a single port it is not required to explicitly select the + // port. + Port PortSelector `json:"port,omitempty"` +} + +// PortSelector specifies the number of a port to be used for +// matching or selection for final routing. +type PortSelector struct { + // Choose one of the fields below. + + // Valid port number + Number uint32 `json:"number,omitempty"` + + // Valid port name + Name string `json:"name,omitempty"` +} + +// Describes match conditions and actions for routing TCP traffic. The +// following routing rule forwards traffic arriving at port 27017 for +// mongo.prod.svc.cluster.local from 172.17.16.* subnet to another Mongo +// server on port 5555. +// +// ```yaml +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: bookinfo-Mongo +// spec: +// hosts: +// - mongo.prod.svc.cluster.local +// tcp: +// - match: +// - port: 27017 +// sourceSubnet: "172.17.16.0/24" +// route: +// - destination: +// host: mongo.backup.svc.cluster.local +// port: +// number: 5555 +// ``` +type TCPRoute struct { + // Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []L4MatchAttributes `json:"match"` + + // The destinations to which the connection should be forwarded to. Weights + // must add to 100%. + Route []DestinationWeight `json:"route"` +} + +// Describes match conditions and actions for routing unterminated TLS +// traffic (TLS/HTTPS) The following routing rule forwards unterminated TLS +// traffic arriving at port 443 of gateway called mygateway to internal +// services in the mesh based on the SNI value. +// +// ```yaml +// kind: VirtualService +// metadata: +// name: bookinfo-sni +// spec: +// hosts: +// - '*.bookinfo.com' +// gateways: +// - mygateway +// tls: +// - match: +// - port: 443 +// sniHosts: +// - login.bookinfo.com +// route: +// - destination: +// host: login.prod.svc.cluster.local +// - match: +// - port: 443 +// sniHosts: +// - reviews.bookinfo.com +// route: +// - destination: +// host: reviews.prod.svc.cluster.local +// ``` +type TLSRoute struct { + // REQUIRED. Match conditions to be satisfied for the rule to be + // activated. All conditions inside a single match block have AND + // semantics, while the list of match blocks have OR semantics. The rule + // is matched if any one of the match blocks succeed. + Match []TLSMatchAttributes `json:"match"` + + // The destination to which the connection should be forwarded to. + Route []DestinationWeight `json:"route"` +} + +// L4 connection match attributes. Note that L4 connection matching support +// is incomplete. +type L4MatchAttributes struct { + // IPv4 or IPv6 ip address of destination with optional subnet. E.g., + // a.b.c.d/xx form or just a.b.c.d. + DestinationSubnets []string `json:"destinationSubnets,omitempty"` + + // Specifies the port on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port int `json:"port,omitempty"` + + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `json:"sourceLabels,omitempty"` + + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway match is + // independent of sourceLabels. + Gateways []string `json:"gateways,omitempty"` +} + +// TLS connection match attributes. +type TLSMatchAttributes struct { + // REQUIRED. SNI (server name indicator) to match on. Wildcard prefixes + // can be used in the SNI value, e.g., *.com will match foo.example.com + // as well as example.com. An SNI value must be a subset (i.e., fall + // within the domain) of the corresponding virtual service's hosts + SniHosts []string `json:"sniHosts"` + + // IPv4 or IPv6 ip addresses of destination with optional subnet. E.g., + // a.b.c.d/xx form or just a.b.c.d. + DestinationSubnets []string `json:"destinationSubnets,omitempty"` + + // Specifies the port on the host that is being addressed. Many services + // only expose a single port or label ports with the protocols they support, + // in these cases it is not required to explicitly select the port. + Port int `json:"port,omitempty"` + + // One or more labels that constrain the applicability of a rule to + // workloads with the given labels. If the VirtualService has a list of + // gateways specified at the top, it should include the reserved gateway + // `mesh` in order for this field to be applicable. + SourceLabels map[string]string `json:"sourceLabels,omitempty"` + + // Names of gateways where the rule should be applied to. Gateway names + // at the top of the VirtualService (if any) are overridden. The gateway match is + // independent of sourceLabels. + Gateways []string `json:"gateways,omitempty"` +} + +// HTTPRedirect can be used to send a 302 redirect response to the caller, +// where the Authority/Host and the URI in the response can be swapped with +// the specified values. For example, the following rule redirects +// requests for /v1/getProductRatings API on the ratings service to +// /v1/bookRatings provided by the bookratings service. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - match: +// - uri: +// exact: /v1/getProductRatings +// redirect: +// uri: /v1/bookRatings +// authority: bookratings.default.svc.cluster.local +// ... +// +type HTTPRedirect struct { + // On a redirect, overwrite the Path portion of the URL with this + // value. Note that the entire path will be replaced, irrespective of the + // request URI being matched as an exact path or prefix. + Uri string `json:"uri,omitempty"` + + // On a redirect, overwrite the Authority/Host portion of the URL with + // this value. + Authority string `json:"authority,omitempty"` +} + +// HTTPRewrite can be used to rewrite specific parts of a HTTP request +// before forwarding the request to the destination. Rewrite primitive can +// be used only with the DestinationWeights. The following example +// demonstrates how to rewrite the URL prefix for api call (/ratings) to +// ratings service before making the actual API call. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - match: +// - uri: +// prefix: /ratings +// rewrite: +// uri: /v1/bookRatings +// route: +// - destination: +// host: ratings +// subset: v1 +// +type HTTPRewrite struct { + // rewrite the path (or the prefix) portion of the URI with this + // value. If the original URI was matched based on prefix, the value + // provided in this field will replace the corresponding matched prefix. + Uri string `json:"uri,omitempty"` + + // rewrite the Authority/Host header with this value. + Authority string `json:"authority,omitempty"` +} + +// Describes the retry policy to use when a HTTP request fails. For +// example, the following rule sets the maximum number of retries to 3 when +// calling ratings:v1 service, with a 2s timeout per retry attempt. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - route: +// - destination: +// host: ratings +// subset: v1 +// retries: +// attempts: 3 +// perTryTimeout: 2s +// +type HTTPRetry struct { + // REQUIRED. Number of retries for a given request. The interval + // between retries will be determined automatically (25ms+). Actual + // number of retries attempted depends on the httpReqTimeout. + Attempts int `json:"attempts"` + + // Timeout per retry attempt for a given request. format: 1h/1m/1s/1ms. MUST BE >=1ms. + PerTryTimeout string `json:"perTryTimeout"` +} + +// Describes the Cross-Origin Resource Sharing (CORS) policy, for a given +// service. Refer to +// https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS +// for further details about cross origin resource sharing. For example, +// the following rule restricts cross origin requests to those originating +// from example.com domain using HTTP POST/GET, and sets the +// Access-Control-Allow-Credentials header to false. In addition, it only +// exposes X-Foo-bar header and sets an expiry period of 1 day. +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - route: +// - destination: +// host: ratings +// subset: v1 +// corsPolicy: +// allowOrigin: +// - example.com +// allowMethods: +// - POST +// - GET +// allowCredentials: false +// allowHeaders: +// - X-Foo-Bar +// maxAge: "1d" +// +type CorsPolicy struct { + // The list of origins that are allowed to perform CORS requests. The + // content will be serialized into the Access-Control-Allow-Origin + // header. Wildcard * will allow all origins. + AllowOrigin []string `json:"allowOrigin,omitempty"` + + // List of HTTP methods allowed to access the resource. The content will + // be serialized into the Access-Control-Allow-Methods header. + AllowMethods []string `json:"allowMethods,omitempty"` + + // List of HTTP headers that can be used when requesting the + // resource. Serialized to Access-Control-Allow-Methods header. + AllowHeaders []string `json:"allowHeaders,omitempty"` + + // A white list of HTTP headers that the browsers are allowed to + // access. Serialized into Access-Control-Expose-Headers header. + ExposeHeaders []string `json:"exposeHeaders,omitempty"` + + // Specifies how long the the results of a preflight request can be + // cached. Translates to the Access-Control-Max-Age header. + MaxAge string `json:"maxAge,omitempty"` + + // Indicates whether the caller is allowed to send the actual request + // (not the preflight) using credentials. Translates to + // Access-Control-Allow-Credentials header. + AllowCredentials bool `json:"allowCredentials,omitempty"` +} + +// HTTPFaultInjection can be used to specify one or more faults to inject +// while forwarding http requests to the destination specified in a route. +// Fault specification is part of a VirtualService rule. Faults include +// aborting the Http request from downstream service, and/or delaying +// proxying of requests. A fault rule MUST HAVE delay or abort or both. +// +// *Note:* Delay and abort faults are independent of one another, even if +// both are specified simultaneously. +type HTTPFaultInjection struct { + // Delay requests before forwarding, emulating various failures such as + // network issues, overloaded upstream service, etc. + Delay *InjectDelay `json:"delay,omitempty"` + + // Abort Http request attempts and return error codes back to downstream + // service, giving the impression that the upstream service is faulty. + Abort *InjectAbort `json:"abort,omitempty"` +} + +// Delay specification is used to inject latency into the request +// forwarding path. The following example will introduce a 5 second delay +// in 10% of the requests to the "v1" version of the "reviews" +// service from all pods with label env: prod +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: reviews-route +// spec: +// hosts: +// - reviews +// http: +// - match: +// - sourceLabels: +// env: prod +// route: +// - destination: +// host: reviews +// subset: v1 +// fault: +// delay: +// percent: 10 +// fixedDelay: 5s +// +// The _fixedDelay_ field is used to indicate the amount of delay in +// seconds. An optional _percent_ field, a value between 0 and 100, can +// be used to only delay a certain percentage of requests. If left +// unspecified, all request will be delayed. +type InjectDelay struct { + // Percentage of requests on which the delay will be injected (0-100). + Percent int `json:"percent,omitempty"` + + // REQUIRED. Add a fixed delay before forwarding the request. Format: + // 1h/1m/1s/1ms. MUST be >=1ms. + FixedDelay string `json:"fixedDelay"` + + // (-- Add a delay (based on an exponential function) before forwarding + // the request. mean delay needed to derive the exponential delay + // values --) + ExponentialDelay string `json:"exponentialDelay,omitempty"` +} + +// Abort specification is used to prematurely abort a request with a +// pre-specified error code. The following example will return an HTTP +// 400 error code for 10% of the requests to the "ratings" service "v1". +// +// apiVersion: networking.istio.io/v1alpha3 +// kind: VirtualService +// metadata: +// name: ratings-route +// spec: +// hosts: +// - ratings +// http: +// - route: +// - destination: +// host: ratings +// subset: v1 +// fault: +// abort: +// percent: 10 +// httpStatus: 400 +// +// The _httpStatus_ field is used to indicate the HTTP status code to +// return to the caller. The optional _percent_ field, a value between 0 +// and 100, is used to only abort a certain percentage of requests. If +// not specified, all requests are aborted. +type InjectAbort struct { + // Percentage of requests to be aborted with the error code provided (0-100). + Perecent int `json:"percent,omitempty"` + + // REQUIRED. HTTP status code to use to abort the Http request. + HttpStatus int `json:"httpStatus"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VirtualServiceList is a list of VirtualService resources +type VirtualServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []VirtualService `json:"items"` +} diff --git a/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go new file mode 100644 index 000000000..6555e3f64 --- /dev/null +++ b/vendor/github.com/knative/pkg/apis/istio/v1alpha3/zz_generated.deepcopy.go @@ -0,0 +1,1082 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/common/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolSettings) DeepCopyInto(out *ConnectionPoolSettings) { + *out = *in + if in.Tcp != nil { + in, out := &in.Tcp, &out.Tcp + *out = new(TCPSettings) + **out = **in + } + if in.Http != nil { + in, out := &in.Http, &out.Http + *out = new(HTTPSettings) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolSettings. +func (in *ConnectionPoolSettings) DeepCopy() *ConnectionPoolSettings { + if in == nil { + return nil + } + out := new(ConnectionPoolSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsistentHashLB) DeepCopyInto(out *ConsistentHashLB) { + *out = *in + if in.HttpCookie != nil { + in, out := &in.HttpCookie, &out.HttpCookie + *out = new(HTTPCookie) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsistentHashLB. +func (in *ConsistentHashLB) DeepCopy() *ConsistentHashLB { + if in == nil { + return nil + } + out := new(ConsistentHashLB) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CorsPolicy) DeepCopyInto(out *CorsPolicy) { + *out = *in + if in.AllowOrigin != nil { + in, out := &in.AllowOrigin, &out.AllowOrigin + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowMethods != nil { + in, out := &in.AllowMethods, &out.AllowMethods + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowHeaders != nil { + in, out := &in.AllowHeaders, &out.AllowHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposeHeaders != nil { + in, out := &in.ExposeHeaders, &out.ExposeHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CorsPolicy. +func (in *CorsPolicy) DeepCopy() *CorsPolicy { + if in == nil { + return nil + } + out := new(CorsPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Destination) DeepCopyInto(out *Destination) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Destination. +func (in *Destination) DeepCopy() *Destination { + if in == nil { + return nil + } + out := new(Destination) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRule) DeepCopyInto(out *DestinationRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRule. +func (in *DestinationRule) DeepCopy() *DestinationRule { + if in == nil { + return nil + } + out := new(DestinationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DestinationRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRuleList) DeepCopyInto(out *DestinationRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DestinationRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleList. +func (in *DestinationRuleList) DeepCopy() *DestinationRuleList { + if in == nil { + return nil + } + out := new(DestinationRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DestinationRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationRuleSpec) DeepCopyInto(out *DestinationRuleSpec) { + *out = *in + if in.TrafficPolicy != nil { + in, out := &in.TrafficPolicy, &out.TrafficPolicy + *out = new(TrafficPolicy) + (*in).DeepCopyInto(*out) + } + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]Subset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationRuleSpec. +func (in *DestinationRuleSpec) DeepCopy() *DestinationRuleSpec { + if in == nil { + return nil + } + out := new(DestinationRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DestinationWeight) DeepCopyInto(out *DestinationWeight) { + *out = *in + out.Destination = in.Destination + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DestinationWeight. +func (in *DestinationWeight) DeepCopy() *DestinationWeight { + if in == nil { + return nil + } + out := new(DestinationWeight) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gateway) DeepCopyInto(out *Gateway) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gateway. +func (in *Gateway) DeepCopy() *Gateway { + if in == nil { + return nil + } + out := new(Gateway) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Gateway) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewayList) DeepCopyInto(out *GatewayList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Gateway, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayList. +func (in *GatewayList) DeepCopy() *GatewayList { + if in == nil { + return nil + } + out := new(GatewayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GatewayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]Server, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec. +func (in *GatewaySpec) DeepCopy() *GatewaySpec { + if in == nil { + return nil + } + out := new(GatewaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCookie) DeepCopyInto(out *HTTPCookie) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCookie. +func (in *HTTPCookie) DeepCopy() *HTTPCookie { + if in == nil { + return nil + } + out := new(HTTPCookie) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPFaultInjection) DeepCopyInto(out *HTTPFaultInjection) { + *out = *in + if in.Delay != nil { + in, out := &in.Delay, &out.Delay + *out = new(InjectDelay) + **out = **in + } + if in.Abort != nil { + in, out := &in.Abort, &out.Abort + *out = new(InjectAbort) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPFaultInjection. +func (in *HTTPFaultInjection) DeepCopy() *HTTPFaultInjection { + if in == nil { + return nil + } + out := new(HTTPFaultInjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMatchRequest) DeepCopyInto(out *HTTPMatchRequest) { + *out = *in + if in.Uri != nil { + in, out := &in.Uri, &out.Uri + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Scheme != nil { + in, out := &in.Scheme, &out.Scheme + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Method != nil { + in, out := &in.Method, &out.Method + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Authority != nil { + in, out := &in.Authority, &out.Authority + *out = new(v1alpha1.StringMatch) + **out = **in + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make(map[string]v1alpha1.StringMatch, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatchRequest. +func (in *HTTPMatchRequest) DeepCopy() *HTTPMatchRequest { + if in == nil { + return nil + } + out := new(HTTPMatchRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRedirect) DeepCopyInto(out *HTTPRedirect) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRedirect. +func (in *HTTPRedirect) DeepCopy() *HTTPRedirect { + if in == nil { + return nil + } + out := new(HTTPRedirect) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRetry) DeepCopyInto(out *HTTPRetry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRetry. +func (in *HTTPRetry) DeepCopy() *HTTPRetry { + if in == nil { + return nil + } + out := new(HTTPRetry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRewrite) DeepCopyInto(out *HTTPRewrite) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRewrite. +func (in *HTTPRewrite) DeepCopy() *HTTPRewrite { + if in == nil { + return nil + } + out := new(HTTPRewrite) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPRoute) DeepCopyInto(out *HTTPRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]HTTPMatchRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]DestinationWeight, len(*in)) + copy(*out, *in) + } + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(HTTPRedirect) + **out = **in + } + if in.Rewrite != nil { + in, out := &in.Rewrite, &out.Rewrite + *out = new(HTTPRewrite) + **out = **in + } + if in.Retries != nil { + in, out := &in.Retries, &out.Retries + *out = new(HTTPRetry) + **out = **in + } + if in.Fault != nil { + in, out := &in.Fault, &out.Fault + *out = new(HTTPFaultInjection) + (*in).DeepCopyInto(*out) + } + if in.Mirror != nil { + in, out := &in.Mirror, &out.Mirror + *out = new(Destination) + **out = **in + } + if in.AppendHeaders != nil { + in, out := &in.AppendHeaders, &out.AppendHeaders + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RemoveResponseHeaders != nil { + in, out := &in.RemoveResponseHeaders, &out.RemoveResponseHeaders + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CorsPolicy != nil { + in, out := &in.CorsPolicy, &out.CorsPolicy + *out = new(CorsPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPRoute. +func (in *HTTPRoute) DeepCopy() *HTTPRoute { + if in == nil { + return nil + } + out := new(HTTPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPSettings) DeepCopyInto(out *HTTPSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSettings. +func (in *HTTPSettings) DeepCopy() *HTTPSettings { + if in == nil { + return nil + } + out := new(HTTPSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InjectAbort) DeepCopyInto(out *InjectAbort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InjectAbort. +func (in *InjectAbort) DeepCopy() *InjectAbort { + if in == nil { + return nil + } + out := new(InjectAbort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InjectDelay) DeepCopyInto(out *InjectDelay) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InjectDelay. +func (in *InjectDelay) DeepCopy() *InjectDelay { + if in == nil { + return nil + } + out := new(InjectDelay) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *L4MatchAttributes) DeepCopyInto(out *L4MatchAttributes) { + *out = *in + if in.DestinationSubnets != nil { + in, out := &in.DestinationSubnets, &out.DestinationSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4MatchAttributes. +func (in *L4MatchAttributes) DeepCopy() *L4MatchAttributes { + if in == nil { + return nil + } + out := new(L4MatchAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerSettings) DeepCopyInto(out *LoadBalancerSettings) { + *out = *in + if in.ConsistentHash != nil { + in, out := &in.ConsistentHash, &out.ConsistentHash + *out = new(ConsistentHashLB) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSettings. +func (in *LoadBalancerSettings) DeepCopy() *LoadBalancerSettings { + if in == nil { + return nil + } + out := new(LoadBalancerSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutlierDetection) DeepCopyInto(out *OutlierDetection) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetection. +func (in *OutlierDetection) DeepCopy() *OutlierDetection { + if in == nil { + return nil + } + out := new(OutlierDetection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Port) DeepCopyInto(out *Port) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. +func (in *Port) DeepCopy() *Port { + if in == nil { + return nil + } + out := new(Port) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortSelector) DeepCopyInto(out *PortSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortSelector. +func (in *PortSelector) DeepCopy() *PortSelector { + if in == nil { + return nil + } + out := new(PortSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortTrafficPolicy) DeepCopyInto(out *PortTrafficPolicy) { + *out = *in + out.Port = in.Port + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerSettings) + (*in).DeepCopyInto(*out) + } + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ConnectionPoolSettings) + (*in).DeepCopyInto(*out) + } + if in.OutlierDetection != nil { + in, out := &in.OutlierDetection, &out.OutlierDetection + *out = new(OutlierDetection) + **out = **in + } + if in.Tls != nil { + in, out := &in.Tls, &out.Tls + *out = new(TLSSettings) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortTrafficPolicy. +func (in *PortTrafficPolicy) DeepCopy() *PortTrafficPolicy { + if in == nil { + return nil + } + out := new(PortTrafficPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Server) DeepCopyInto(out *Server) { + *out = *in + out.Port = in.Port + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. +func (in *Server) DeepCopy() *Server { + if in == nil { + return nil + } + out := new(Server) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subset) DeepCopyInto(out *Subset) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.TrafficPolicy != nil { + in, out := &in.TrafficPolicy, &out.TrafficPolicy + *out = new(TrafficPolicy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subset. +func (in *Subset) DeepCopy() *Subset { + if in == nil { + return nil + } + out := new(Subset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPRoute) DeepCopyInto(out *TCPRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]L4MatchAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]DestinationWeight, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPRoute. +func (in *TCPRoute) DeepCopy() *TCPRoute { + if in == nil { + return nil + } + out := new(TCPRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSettings) DeepCopyInto(out *TCPSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSettings. +func (in *TCPSettings) DeepCopy() *TCPSettings { + if in == nil { + return nil + } + out := new(TCPSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSMatchAttributes) DeepCopyInto(out *TLSMatchAttributes) { + *out = *in + if in.SniHosts != nil { + in, out := &in.SniHosts, &out.SniHosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DestinationSubnets != nil { + in, out := &in.DestinationSubnets, &out.DestinationSubnets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSMatchAttributes. +func (in *TLSMatchAttributes) DeepCopy() *TLSMatchAttributes { + if in == nil { + return nil + } + out := new(TLSMatchAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSOptions) DeepCopyInto(out *TLSOptions) { + *out = *in + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSOptions. +func (in *TLSOptions) DeepCopy() *TLSOptions { + if in == nil { + return nil + } + out := new(TLSOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSRoute) DeepCopyInto(out *TLSRoute) { + *out = *in + if in.Match != nil { + in, out := &in.Match, &out.Match + *out = make([]TLSMatchAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Route != nil { + in, out := &in.Route, &out.Route + *out = make([]DestinationWeight, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSRoute. +func (in *TLSRoute) DeepCopy() *TLSRoute { + if in == nil { + return nil + } + out := new(TLSRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSettings) DeepCopyInto(out *TLSSettings) { + *out = *in + if in.SubjectAltNames != nil { + in, out := &in.SubjectAltNames, &out.SubjectAltNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSettings. +func (in *TLSSettings) DeepCopy() *TLSSettings { + if in == nil { + return nil + } + out := new(TLSSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficPolicy) DeepCopyInto(out *TrafficPolicy) { + *out = *in + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(LoadBalancerSettings) + (*in).DeepCopyInto(*out) + } + if in.ConnectionPool != nil { + in, out := &in.ConnectionPool, &out.ConnectionPool + *out = new(ConnectionPoolSettings) + (*in).DeepCopyInto(*out) + } + if in.OutlierDetection != nil { + in, out := &in.OutlierDetection, &out.OutlierDetection + *out = new(OutlierDetection) + **out = **in + } + if in.Tls != nil { + in, out := &in.Tls, &out.Tls + *out = new(TLSSettings) + (*in).DeepCopyInto(*out) + } + if in.PortLevelSettings != nil { + in, out := &in.PortLevelSettings, &out.PortLevelSettings + *out = make([]PortTrafficPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficPolicy. +func (in *TrafficPolicy) DeepCopy() *TrafficPolicy { + if in == nil { + return nil + } + out := new(TrafficPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualService) DeepCopyInto(out *VirtualService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualService. +func (in *VirtualService) DeepCopy() *VirtualService { + if in == nil { + return nil + } + out := new(VirtualService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceList) DeepCopyInto(out *VirtualServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceList. +func (in *VirtualServiceList) DeepCopy() *VirtualServiceList { + if in == nil { + return nil + } + out := new(VirtualServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualServiceSpec) DeepCopyInto(out *VirtualServiceSpec) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Gateways != nil { + in, out := &in.Gateways, &out.Gateways + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Http != nil { + in, out := &in.Http, &out.Http + *out = make([]HTTPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tcp != nil { + in, out := &in.Tcp, &out.Tcp + *out = make([]TCPRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tls != nil { + in, out := &in.Tls, &out.Tls + *out = make([]TLSRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpec. +func (in *VirtualServiceSpec) DeepCopy() *VirtualServiceSpec { + if in == nil { + return nil + } + out := new(VirtualServiceSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 000000000..ba98730a0 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,120 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + authenticationv1alpha1 "github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1" + networkingv1alpha3 "github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface + NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface + // Deprecated: please explicitly pick a version if possible. + Networking() networkingv1alpha3.NetworkingV1alpha3Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + authenticationV1alpha1 *authenticationv1alpha1.AuthenticationV1alpha1Client + networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client +} + +// AuthenticationV1alpha1 retrieves the AuthenticationV1alpha1Client +func (c *Clientset) AuthenticationV1alpha1() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return c.authenticationV1alpha1 +} + +// Deprecated: Authentication retrieves the default version of AuthenticationClient. +// Please explicitly pick a version. +func (c *Clientset) Authentication() authenticationv1alpha1.AuthenticationV1alpha1Interface { + return c.authenticationV1alpha1 +} + +// NetworkingV1alpha3 retrieves the NetworkingV1alpha3Client +func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface { + return c.networkingV1alpha3 +} + +// Deprecated: Networking retrieves the default version of NetworkingClient. +// Please explicitly pick a version. +func (c *Clientset) Networking() networkingv1alpha3.NetworkingV1alpha3Interface { + return c.networkingV1alpha3 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.authenticationV1alpha1, err = authenticationv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.networkingV1alpha3, err = networkingv1alpha3.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.authenticationV1alpha1 = authenticationv1alpha1.NewForConfigOrDie(c) + cs.networkingV1alpha3 = networkingv1alpha3.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.authenticationV1alpha1 = authenticationv1alpha1.New(c) + cs.networkingV1alpha3 = networkingv1alpha3.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go new file mode 100644 index 000000000..3fe468584 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..60ea8ba90 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..b21adf727 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + networkingv1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + authenticationv1alpha1.AddToScheme, + networkingv1alpha3.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go new file mode 100644 index 000000000..d2c0d4331 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/authentication_client.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + "github.com/knative/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1alpha1Interface interface { + RESTClient() rest.Interface + PoliciesGetter +} + +// AuthenticationV1alpha1Client is used to interact with features provided by the authentication.istio.io group. +type AuthenticationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1alpha1Client) Policies(namespace string) PolicyInterface { + return newPolicies(c, namespace) +} + +// NewForConfig creates a new AuthenticationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1alpha1Client { + return &AuthenticationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go new file mode 100644 index 000000000..75445c179 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..d986b9fcb --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type PolicyExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go new file mode 100644 index 000000000..ee24e798b --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/authentication/v1alpha1/policy.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PoliciesGetter has a method to return a PolicyInterface. +// A group's client should implement this interface. +type PoliciesGetter interface { + Policies(namespace string) PolicyInterface +} + +// PolicyInterface has methods to work with Policy resources. +type PolicyInterface interface { + Create(*v1alpha1.Policy) (*v1alpha1.Policy, error) + Update(*v1alpha1.Policy) (*v1alpha1.Policy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Policy, error) + List(opts v1.ListOptions) (*v1alpha1.PolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error) + PolicyExpansion +} + +// policies implements PolicyInterface +type policies struct { + client rest.Interface + ns string +} + +// newPolicies returns a Policies +func newPolicies(c *AuthenticationV1alpha1Client, namespace string) *policies { + return &policies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the policy, and returns the corresponding policy object, and an error if there is any. +func (c *policies) Get(name string, options v1.GetOptions) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("policies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Policies that match those selectors. +func (c *policies) List(opts v1.ListOptions) (result *v1alpha1.PolicyList, err error) { + result = &v1alpha1.PolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested policies. +func (c *policies) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a policy and creates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *policies) Create(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("policies"). + Body(policy). + Do(). + Into(result) + return +} + +// Update takes the representation of a policy and updates it. Returns the server's representation of the policy, and an error, if there is any. +func (c *policies) Update(policy *v1alpha1.Policy) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("policies"). + Name(policy.Name). + Body(policy). + Do(). + Into(result) + return +} + +// Delete takes name of the policy and deletes it. Returns an error if one occurs. +func (c *policies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("policies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *policies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("policies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched policy. +func (c *policies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Policy, err error) { + result = &v1alpha1.Policy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("policies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go new file mode 100644 index 000000000..465b9adbd --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/destinationrule.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DestinationRulesGetter has a method to return a DestinationRuleInterface. +// A group's client should implement this interface. +type DestinationRulesGetter interface { + DestinationRules(namespace string) DestinationRuleInterface +} + +// DestinationRuleInterface has methods to work with DestinationRule resources. +type DestinationRuleInterface interface { + Create(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error) + Update(*v1alpha3.DestinationRule) (*v1alpha3.DestinationRule, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.DestinationRule, error) + List(opts v1.ListOptions) (*v1alpha3.DestinationRuleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) + DestinationRuleExpansion +} + +// destinationRules implements DestinationRuleInterface +type destinationRules struct { + client rest.Interface + ns string +} + +// newDestinationRules returns a DestinationRules +func newDestinationRules(c *NetworkingV1alpha3Client, namespace string) *destinationRules { + return &destinationRules{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the destinationRule, and returns the corresponding destinationRule object, and an error if there is any. +func (c *destinationRules) Get(name string, options v1.GetOptions) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DestinationRules that match those selectors. +func (c *destinationRules) List(opts v1.ListOptions) (result *v1alpha3.DestinationRuleList, err error) { + result = &v1alpha3.DestinationRuleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested destinationRules. +func (c *destinationRules) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a destinationRule and creates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *destinationRules) Create(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Post(). + Namespace(c.ns). + Resource("destinationrules"). + Body(destinationRule). + Do(). + Into(result) + return +} + +// Update takes the representation of a destinationRule and updates it. Returns the server's representation of the destinationRule, and an error, if there is any. +func (c *destinationRules) Update(destinationRule *v1alpha3.DestinationRule) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("destinationrules"). + Name(destinationRule.Name). + Body(destinationRule). + Do(). + Into(result) + return +} + +// Delete takes name of the destinationRule and deletes it. Returns an error if one occurs. +func (c *destinationRules) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("destinationrules"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *destinationRules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("destinationrules"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched destinationRule. +func (c *destinationRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.DestinationRule, err error) { + result = &v1alpha3.DestinationRule{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("destinationrules"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go new file mode 100644 index 000000000..acea591f3 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha3 diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go new file mode 100644 index 000000000..61ede4c14 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/gateway.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// GatewaysGetter has a method to return a GatewayInterface. +// A group's client should implement this interface. +type GatewaysGetter interface { + Gateways(namespace string) GatewayInterface +} + +// GatewayInterface has methods to work with Gateway resources. +type GatewayInterface interface { + Create(*v1alpha3.Gateway) (*v1alpha3.Gateway, error) + Update(*v1alpha3.Gateway) (*v1alpha3.Gateway, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.Gateway, error) + List(opts v1.ListOptions) (*v1alpha3.GatewayList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) + GatewayExpansion +} + +// gateways implements GatewayInterface +type gateways struct { + client rest.Interface + ns string +} + +// newGateways returns a Gateways +func newGateways(c *NetworkingV1alpha3Client, namespace string) *gateways { + return &gateways{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the gateway, and returns the corresponding gateway object, and an error if there is any. +func (c *gateways) Get(name string, options v1.GetOptions) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Gateways that match those selectors. +func (c *gateways) List(opts v1.ListOptions) (result *v1alpha3.GatewayList, err error) { + result = &v1alpha3.GatewayList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested gateways. +func (c *gateways) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a gateway and creates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *gateways) Create(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Post(). + Namespace(c.ns). + Resource("gateways"). + Body(gateway). + Do(). + Into(result) + return +} + +// Update takes the representation of a gateway and updates it. Returns the server's representation of the gateway, and an error, if there is any. +func (c *gateways) Update(gateway *v1alpha3.Gateway) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Put(). + Namespace(c.ns). + Resource("gateways"). + Name(gateway.Name). + Body(gateway). + Do(). + Into(result) + return +} + +// Delete takes name of the gateway and deletes it. Returns an error if one occurs. +func (c *gateways) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("gateways"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *gateways) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("gateways"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched gateway. +func (c *gateways) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.Gateway, err error) { + result = &v1alpha3.Gateway{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("gateways"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go new file mode 100644 index 000000000..2f806db14 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +type DestinationRuleExpansion interface{} + +type GatewayExpansion interface{} + +type VirtualServiceExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go new file mode 100644 index 000000000..fd7ed4282 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/istio_client.go @@ -0,0 +1,100 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "github.com/knative/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1alpha3Interface interface { + RESTClient() rest.Interface + DestinationRulesGetter + GatewaysGetter + VirtualServicesGetter +} + +// NetworkingV1alpha3Client is used to interact with features provided by the networking.istio.io group. +type NetworkingV1alpha3Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1alpha3Client) DestinationRules(namespace string) DestinationRuleInterface { + return newDestinationRules(c, namespace) +} + +func (c *NetworkingV1alpha3Client) Gateways(namespace string) GatewayInterface { + return newGateways(c, namespace) +} + +func (c *NetworkingV1alpha3Client) VirtualServices(namespace string) VirtualServiceInterface { + return newVirtualServices(c, namespace) +} + +// NewForConfig creates a new NetworkingV1alpha3Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1alpha3Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1alpha3Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1alpha3Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha3Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1alpha3Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1alpha3Client { + return &NetworkingV1alpha3Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha3.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1alpha3Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go new file mode 100644 index 000000000..7c23bbc3a --- /dev/null +++ b/vendor/github.com/knative/pkg/client/clientset/versioned/typed/istio/v1alpha3/virtualservice.go @@ -0,0 +1,157 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + scheme "github.com/knative/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VirtualServicesGetter has a method to return a VirtualServiceInterface. +// A group's client should implement this interface. +type VirtualServicesGetter interface { + VirtualServices(namespace string) VirtualServiceInterface +} + +// VirtualServiceInterface has methods to work with VirtualService resources. +type VirtualServiceInterface interface { + Create(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error) + Update(*v1alpha3.VirtualService) (*v1alpha3.VirtualService, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha3.VirtualService, error) + List(opts v1.ListOptions) (*v1alpha3.VirtualServiceList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) + VirtualServiceExpansion +} + +// virtualServices implements VirtualServiceInterface +type virtualServices struct { + client rest.Interface + ns string +} + +// newVirtualServices returns a VirtualServices +func newVirtualServices(c *NetworkingV1alpha3Client, namespace string) *virtualServices { + return &virtualServices{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the virtualService, and returns the corresponding virtualService object, and an error if there is any. +func (c *virtualServices) Get(name string, options v1.GetOptions) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VirtualServices that match those selectors. +func (c *virtualServices) List(opts v1.ListOptions) (result *v1alpha3.VirtualServiceList, err error) { + result = &v1alpha3.VirtualServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested virtualServices. +func (c *virtualServices) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a virtualService and creates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *virtualServices) Create(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Post(). + Namespace(c.ns). + Resource("virtualservices"). + Body(virtualService). + Do(). + Into(result) + return +} + +// Update takes the representation of a virtualService and updates it. Returns the server's representation of the virtualService, and an error, if there is any. +func (c *virtualServices) Update(virtualService *v1alpha3.VirtualService) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Put(). + Namespace(c.ns). + Resource("virtualservices"). + Name(virtualService.Name). + Body(virtualService). + Do(). + Into(result) + return +} + +// Delete takes name of the virtualService and deletes it. Returns an error if one occurs. +func (c *virtualServices) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("virtualservices"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *virtualServices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("virtualservices"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched virtualService. +func (c *virtualServices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha3.VirtualService, err error) { + result = &v1alpha3.VirtualService{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("virtualservices"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/interface.go new file mode 100644 index 000000000..e3fc7fe8c --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package authentication + +import ( + v1alpha1 "github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/interface.go new file mode 100644 index 000000000..a4f6518b9 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Policies returns a PolicyInformer. + Policies() PolicyInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Policies returns a PolicyInformer. +func (v *version) Policies() PolicyInformer { + return &policyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go new file mode 100644 index 000000000..fabb2bda2 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/authentication/v1alpha1/policy.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + authenticationv1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/knative/pkg/client/listers/authentication/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PolicyInformer provides access to a shared informer and lister for +// Policies. +type PolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PolicyLister +} + +type policyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPolicyInformer constructs a new informer for Policy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPolicyInformer constructs a new informer for Policy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthenticationV1alpha1().Policies(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AuthenticationV1alpha1().Policies(namespace).Watch(options) + }, + }, + &authenticationv1alpha1.Policy{}, + resyncPeriod, + indexers, + ) +} + +func (f *policyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *policyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&authenticationv1alpha1.Policy{}, f.defaultInformer) +} + +func (f *policyInformer) Lister() v1alpha1.PolicyLister { + return v1alpha1.NewPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/factory.go b/vendor/github.com/knative/pkg/client/informers/externalversions/factory.go new file mode 100644 index 000000000..297b59402 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,186 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/knative/pkg/client/clientset/versioned" + authentication "github.com/knative/pkg/client/informers/externalversions/authentication" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + istio "github.com/knative/pkg/client/informers/externalversions/istio" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Authentication() authentication.Interface + Networking() istio.Interface +} + +func (f *sharedInformerFactory) Authentication() authentication.Interface { + return authentication.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Networking() istio.Interface { + return istio.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/generic.go b/vendor/github.com/knative/pkg/client/informers/externalversions/generic.go new file mode 100644 index 000000000..66737d499 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=authentication.istio.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("policies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Authentication().V1alpha1().Policies().Informer()}, nil + + // Group=networking.istio.io, Version=v1alpha3 + case v1alpha3.SchemeGroupVersion.WithResource("destinationrules"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().DestinationRules().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("gateways"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().Gateways().Informer()}, nil + case v1alpha3.SchemeGroupVersion.WithResource("virtualservices"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha3().VirtualServices().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 000000000..2d488c907 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,38 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/knative/pkg/client/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/istio/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/interface.go new file mode 100644 index 000000000..dd053c21a --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package networking + +import ( + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha3 provides access to shared informers for resources in V1alpha3. + V1alpha3() v1alpha3.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha3 returns a new v1alpha3.Interface. +func (g *group) V1alpha3() v1alpha3.Interface { + return v1alpha3.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go new file mode 100644 index 000000000..c080d94bf --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/destinationrule.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/listers/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DestinationRuleInformer provides access to a shared informer and lister for +// DestinationRules. +type DestinationRuleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.DestinationRuleLister +} + +type destinationRuleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDestinationRuleInformer constructs a new informer for DestinationRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDestinationRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDestinationRuleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDestinationRuleInformer constructs a new informer for DestinationRule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDestinationRuleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().DestinationRules(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().DestinationRules(namespace).Watch(options) + }, + }, + &istiov1alpha3.DestinationRule{}, + resyncPeriod, + indexers, + ) +} + +func (f *destinationRuleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDestinationRuleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *destinationRuleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&istiov1alpha3.DestinationRule{}, f.defaultInformer) +} + +func (f *destinationRuleInformer) Lister() v1alpha3.DestinationRuleLister { + return v1alpha3.NewDestinationRuleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go new file mode 100644 index 000000000..f77a18665 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/gateway.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/listers/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// GatewayInformer provides access to a shared informer and lister for +// Gateways. +type GatewayInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.GatewayLister +} + +type gatewayInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewGatewayInformer constructs a new informer for Gateway type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewGatewayInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredGatewayInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredGatewayInformer constructs a new informer for Gateway type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredGatewayInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().Gateways(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().Gateways(namespace).Watch(options) + }, + }, + &istiov1alpha3.Gateway{}, + resyncPeriod, + indexers, + ) +} + +func (f *gatewayInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredGatewayInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *gatewayInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&istiov1alpha3.Gateway{}, f.defaultInformer) +} + +func (f *gatewayInformer) Lister() v1alpha3.GatewayLister { + return v1alpha3.NewGatewayLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/interface.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/interface.go new file mode 100644 index 000000000..c03f5b447 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/interface.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DestinationRules returns a DestinationRuleInformer. + DestinationRules() DestinationRuleInformer + // Gateways returns a GatewayInformer. + Gateways() GatewayInformer + // VirtualServices returns a VirtualServiceInformer. + VirtualServices() VirtualServiceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DestinationRules returns a DestinationRuleInformer. +func (v *version) DestinationRules() DestinationRuleInformer { + return &destinationRuleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Gateways returns a GatewayInformer. +func (v *version) Gateways() GatewayInformer { + return &gatewayInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VirtualServices returns a VirtualServiceInformer. +func (v *version) VirtualServices() VirtualServiceInformer { + return &virtualServiceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go new file mode 100644 index 000000000..8c46a6df0 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/informers/externalversions/istio/v1alpha3/virtualservice.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + time "time" + + istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + versioned "github.com/knative/pkg/client/clientset/versioned" + internalinterfaces "github.com/knative/pkg/client/informers/externalversions/internalinterfaces" + v1alpha3 "github.com/knative/pkg/client/listers/istio/v1alpha3" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// VirtualServiceInformer provides access to a shared informer and lister for +// VirtualServices. +type VirtualServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha3.VirtualServiceLister +} + +type virtualServiceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVirtualServiceInformer constructs a new informer for VirtualService type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVirtualServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVirtualServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVirtualServiceInformer constructs a new informer for VirtualService type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVirtualServiceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().VirtualServices(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha3().VirtualServices(namespace).Watch(options) + }, + }, + &istiov1alpha3.VirtualService{}, + resyncPeriod, + indexers, + ) +} + +func (f *virtualServiceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVirtualServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *virtualServiceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&istiov1alpha3.VirtualService{}, f.defaultInformer) +} + +func (f *virtualServiceInformer) Lister() v1alpha3.VirtualServiceLister { + return v1alpha3.NewVirtualServiceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/expansion_generated.go b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..a80338584 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// PolicyListerExpansion allows custom methods to be added to +// PolicyLister. +type PolicyListerExpansion interface{} + +// PolicyNamespaceListerExpansion allows custom methods to be added to +// PolicyNamespaceLister. +type PolicyNamespaceListerExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go new file mode 100644 index 000000000..73bef4463 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/authentication/v1alpha1/policy.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/knative/pkg/apis/istio/authentication/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PolicyLister helps list Policies. +type PolicyLister interface { + // List lists all Policies in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) + // Policies returns an object that can list and get Policies. + Policies(namespace string) PolicyNamespaceLister + PolicyListerExpansion +} + +// policyLister implements the PolicyLister interface. +type policyLister struct { + indexer cache.Indexer +} + +// NewPolicyLister returns a new PolicyLister. +func NewPolicyLister(indexer cache.Indexer) PolicyLister { + return &policyLister{indexer: indexer} +} + +// List lists all Policies in the indexer. +func (s *policyLister) List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Policy)) + }) + return ret, err +} + +// Policies returns an object that can list and get Policies. +func (s *policyLister) Policies(namespace string) PolicyNamespaceLister { + return policyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PolicyNamespaceLister helps list and get Policies. +type PolicyNamespaceLister interface { + // List lists all Policies in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) + // Get retrieves the Policy from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Policy, error) + PolicyNamespaceListerExpansion +} + +// policyNamespaceLister implements the PolicyNamespaceLister +// interface. +type policyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Policies in the indexer for a given namespace. +func (s policyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Policy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Policy)) + }) + return ret, err +} + +// Get retrieves the Policy from the indexer for a given namespace and name. +func (s policyNamespaceLister) Get(name string) (*v1alpha1.Policy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("policy"), name) + } + return obj.(*v1alpha1.Policy), nil +} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go new file mode 100644 index 000000000..804b19cbb --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/destinationrule.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DestinationRuleLister helps list DestinationRules. +type DestinationRuleLister interface { + // List lists all DestinationRules in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) + // DestinationRules returns an object that can list and get DestinationRules. + DestinationRules(namespace string) DestinationRuleNamespaceLister + DestinationRuleListerExpansion +} + +// destinationRuleLister implements the DestinationRuleLister interface. +type destinationRuleLister struct { + indexer cache.Indexer +} + +// NewDestinationRuleLister returns a new DestinationRuleLister. +func NewDestinationRuleLister(indexer cache.Indexer) DestinationRuleLister { + return &destinationRuleLister{indexer: indexer} +} + +// List lists all DestinationRules in the indexer. +func (s *destinationRuleLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.DestinationRule)) + }) + return ret, err +} + +// DestinationRules returns an object that can list and get DestinationRules. +func (s *destinationRuleLister) DestinationRules(namespace string) DestinationRuleNamespaceLister { + return destinationRuleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DestinationRuleNamespaceLister helps list and get DestinationRules. +type DestinationRuleNamespaceLister interface { + // List lists all DestinationRules in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) + // Get retrieves the DestinationRule from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.DestinationRule, error) + DestinationRuleNamespaceListerExpansion +} + +// destinationRuleNamespaceLister implements the DestinationRuleNamespaceLister +// interface. +type destinationRuleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DestinationRules in the indexer for a given namespace. +func (s destinationRuleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.DestinationRule, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.DestinationRule)) + }) + return ret, err +} + +// Get retrieves the DestinationRule from the indexer for a given namespace and name. +func (s destinationRuleNamespaceLister) Get(name string) (*v1alpha3.DestinationRule, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("destinationrule"), name) + } + return obj.(*v1alpha3.DestinationRule), nil +} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go new file mode 100644 index 000000000..2b9f90f67 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +// DestinationRuleListerExpansion allows custom methods to be added to +// DestinationRuleLister. +type DestinationRuleListerExpansion interface{} + +// DestinationRuleNamespaceListerExpansion allows custom methods to be added to +// DestinationRuleNamespaceLister. +type DestinationRuleNamespaceListerExpansion interface{} + +// GatewayListerExpansion allows custom methods to be added to +// GatewayLister. +type GatewayListerExpansion interface{} + +// GatewayNamespaceListerExpansion allows custom methods to be added to +// GatewayNamespaceLister. +type GatewayNamespaceListerExpansion interface{} + +// VirtualServiceListerExpansion allows custom methods to be added to +// VirtualServiceLister. +type VirtualServiceListerExpansion interface{} + +// VirtualServiceNamespaceListerExpansion allows custom methods to be added to +// VirtualServiceNamespaceLister. +type VirtualServiceNamespaceListerExpansion interface{} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go new file mode 100644 index 000000000..6e76c1098 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/gateway.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// GatewayLister helps list Gateways. +type GatewayLister interface { + // List lists all Gateways in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) + // Gateways returns an object that can list and get Gateways. + Gateways(namespace string) GatewayNamespaceLister + GatewayListerExpansion +} + +// gatewayLister implements the GatewayLister interface. +type gatewayLister struct { + indexer cache.Indexer +} + +// NewGatewayLister returns a new GatewayLister. +func NewGatewayLister(indexer cache.Indexer) GatewayLister { + return &gatewayLister{indexer: indexer} +} + +// List lists all Gateways in the indexer. +func (s *gatewayLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Gateway)) + }) + return ret, err +} + +// Gateways returns an object that can list and get Gateways. +func (s *gatewayLister) Gateways(namespace string) GatewayNamespaceLister { + return gatewayNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// GatewayNamespaceLister helps list and get Gateways. +type GatewayNamespaceLister interface { + // List lists all Gateways in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) + // Get retrieves the Gateway from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.Gateway, error) + GatewayNamespaceListerExpansion +} + +// gatewayNamespaceLister implements the GatewayNamespaceLister +// interface. +type gatewayNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Gateways in the indexer for a given namespace. +func (s gatewayNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.Gateway, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.Gateway)) + }) + return ret, err +} + +// Get retrieves the Gateway from the indexer for a given namespace and name. +func (s gatewayNamespaceLister) Get(name string) (*v1alpha3.Gateway, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("gateway"), name) + } + return obj.(*v1alpha3.Gateway), nil +} diff --git a/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go new file mode 100644 index 000000000..df7b350c2 --- /dev/null +++ b/vendor/github.com/knative/pkg/client/listers/istio/v1alpha3/virtualservice.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha3 + +import ( + v1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VirtualServiceLister helps list VirtualServices. +type VirtualServiceLister interface { + // List lists all VirtualServices in the indexer. + List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) + // VirtualServices returns an object that can list and get VirtualServices. + VirtualServices(namespace string) VirtualServiceNamespaceLister + VirtualServiceListerExpansion +} + +// virtualServiceLister implements the VirtualServiceLister interface. +type virtualServiceLister struct { + indexer cache.Indexer +} + +// NewVirtualServiceLister returns a new VirtualServiceLister. +func NewVirtualServiceLister(indexer cache.Indexer) VirtualServiceLister { + return &virtualServiceLister{indexer: indexer} +} + +// List lists all VirtualServices in the indexer. +func (s *virtualServiceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.VirtualService)) + }) + return ret, err +} + +// VirtualServices returns an object that can list and get VirtualServices. +func (s *virtualServiceLister) VirtualServices(namespace string) VirtualServiceNamespaceLister { + return virtualServiceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VirtualServiceNamespaceLister helps list and get VirtualServices. +type VirtualServiceNamespaceLister interface { + // List lists all VirtualServices in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) + // Get retrieves the VirtualService from the indexer for a given namespace and name. + Get(name string) (*v1alpha3.VirtualService, error) + VirtualServiceNamespaceListerExpansion +} + +// virtualServiceNamespaceLister implements the VirtualServiceNamespaceLister +// interface. +type virtualServiceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VirtualServices in the indexer for a given namespace. +func (s virtualServiceNamespaceLister) List(selector labels.Selector) (ret []*v1alpha3.VirtualService, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha3.VirtualService)) + }) + return ret, err +} + +// Get retrieves the VirtualService from the indexer for a given namespace and name. +func (s virtualServiceNamespaceLister) Get(name string) (*v1alpha3.VirtualService, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha3.Resource("virtualservice"), name) + } + return obj.(*v1alpha3.VirtualService), nil +} diff --git a/vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/application.go b/vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/application.go similarity index 100% rename from vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/application.go rename to vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/application.go diff --git a/vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/application_types.go b/vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/application_types.go similarity index 100% rename from vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/application_types.go rename to vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/application_types.go diff --git a/vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/condition.go b/vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/condition.go similarity index 100% rename from vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/condition.go rename to vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/condition.go diff --git a/vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/doc.go similarity index 100% rename from vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/doc.go rename to vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/doc.go diff --git a/vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/register.go b/vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/register.go similarity index 100% rename from vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/register.go rename to vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/register.go diff --git a/vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/status.go b/vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/status.go similarity index 100% rename from vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/status.go rename to vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/status.go diff --git a/vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/zz_generated.deepcopy.go similarity index 100% rename from vendor/sigs.k8s.io/application/pkg/apis/app/v1beta1/zz_generated.deepcopy.go rename to vendor/github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1/zz_generated.deepcopy.go diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/doc.go similarity index 81% rename from vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go rename to vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/doc.go index ab386b29c..45d7f7c35 100644 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go +++ b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/doc.go @@ -14,7 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* -Package controllerutil contains utility functions for working with and implementing Controllers. -*/ -package controllerutil +// Package genericreconciler contains generic reconciler loop +package genericreconciler diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/genericreconciler.go b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/genericreconciler.go new file mode 100644 index 000000000..3c3a31318 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/genericreconciler.go @@ -0,0 +1,374 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericreconciler + +import ( + "context" + "fmt" + "github.com/kubernetes-sigs/application/pkg/component" + cr "github.com/kubernetes-sigs/application/pkg/customresource" + "github.com/kubernetes-sigs/application/pkg/resource" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + urt "k8s.io/apimachinery/pkg/util/runtime" + "log" + "reflect" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func handleErrorArr(info string, name string, e error, errs []error) []error { + HandleError(info, name, e) + return append(errs, e) +} + +// HandleError common error handling routine +func HandleError(info string, name string, e error) error { + urt.HandleError(fmt.Errorf("Failed: [%s] %s. %s", name, info, e.Error())) + return e +} + +func (gr *Reconciler) observe(observables ...resource.Observable) (*resource.ObjectBag, error) { + var returnval *resource.ObjectBag = new(resource.ObjectBag) + var err error + for _, obs := range observables { + var resources []resource.Object + if obs.Labels != nil { + opts := client.MatchingLabels(obs.Labels) + opts.Raw = &metav1.ListOptions{TypeMeta: obs.Type} + err = gr.List(context.TODO(), opts, obs.ObjList.(runtime.Object)) + if err == nil { + items, err := meta.ExtractList(obs.ObjList.(runtime.Object)) + if err == nil { + for _, item := range items { + resources = append(resources, resource.Object{Obj: item.(metav1.Object)}) + } + } + } + } else { + var obj metav1.Object = obs.Obj.(metav1.Object) + name := obj.GetName() + namespace := obj.GetNamespace() + otype := reflect.TypeOf(obj).String() + err = gr.Get(context.TODO(), + types.NamespacedName{Name: name, Namespace: namespace}, + obs.Obj.(runtime.Object)) + if err == nil { + log.Printf(" >>get: %s", otype+"/"+namespace+"/"+name) + resources = append(resources, resource.Object{Obj: obs.Obj}) + } else { + log.Printf(" >>>ERR get: %s", otype+"/"+namespace+"/"+name) + } + } + if err != nil { + return nil, err + } + for _, resource := range resources { + returnval.Add(resource) + } + } + return returnval, nil +} + +func specDiffers(o1, o2 metav1.Object) bool { + // Not all k8s objects have Spec + // example ConfigMap + // TODO strategic merge patch diff in generic controller loop + e := reflect.Indirect(reflect.ValueOf(o1)).FieldByName("Spec") + o := reflect.Indirect(reflect.ValueOf(o2)).FieldByName("Spec") + if !e.IsValid() { + // handling ConfigMap + e = reflect.Indirect(reflect.ValueOf(o1)).FieldByName("Data") + o = reflect.Indirect(reflect.ValueOf(o2)).FieldByName("Data") + } + + if e.IsValid() && o.IsValid() { + if reflect.DeepEqual(e.Interface(), o.Interface()) { + return false + } + } + return true +} + +// If both ownerRefs have the same group/kind/name but different uid, that means at least one of them doesn't exist anymore. +// If we compare `uid` in this function, we'd set both as owners which is not what we want +// Because in the case that the original owner is already gone, we want its dependent to be garbage collected with it. +func isReferringSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + return aGV == bGV && a.Kind == b.Kind && a.Name == b.Name +} + +func injectOwnerRefs(o metav1.Object, ref *metav1.OwnerReference) bool { + if ref == nil { + return false + } + objRefs := o.GetOwnerReferences() + for _, r := range objRefs { + if isReferringSameObject(*ref, r) { + return false + } + } + objRefs = append(objRefs, *ref) + o.SetOwnerReferences(objRefs) + return true +} + +// ReconcileCR is a generic function that reconciles expected and observed resources +func (gr *Reconciler) ReconcileCR(namespacedname types.NamespacedName, handle cr.Handle) error { + var status interface{} + expected := &resource.ObjectBag{} + update := false + rsrc := handle.NewRsrc() + name := reflect.TypeOf(rsrc).String() + "/" + namespacedname.String() + err := gr.Get(context.TODO(), namespacedname, rsrc.(runtime.Object)) + if err == nil { + o := rsrc.(metav1.Object) + log.Printf("%s Validating spec\n", name) + err = rsrc.Validate() + status = rsrc.NewStatus() + if err == nil { + log.Printf("%s Applying defaults\n", name) + rsrc.ApplyDefaults() + components := rsrc.Components() + for _, component := range components { + if o.GetDeletionTimestamp() == nil { + err = gr.ReconcileComponent(name, component, status, expected) + } else { + err = gr.FinalizeComponent(name, component, status, expected) + } + } + } + } else { + if errors.IsNotFound(err) { + urt.HandleError(fmt.Errorf("not found %s. %s", name, err.Error())) + return nil + } + } + update = rsrc.UpdateRsrcStatus(status, err) + + if update { + err = gr.Update(context.TODO(), rsrc.(runtime.Object)) + } + if err != nil { + urt.HandleError(fmt.Errorf("error updating %s. %s", name, err.Error())) + } + + return err +} + +// ObserveAndMutate is a function that is called to observe and mutate expected resources +func (gr *Reconciler) ObserveAndMutate(crname string, c component.Component, status interface{}, mutate bool, aggregated *resource.ObjectBag) (*resource.ObjectBag, *resource.ObjectBag, error) { + var err error + var expected, observed, dependent *resource.ObjectBag + emptybag := &resource.ObjectBag{} + + // Get dependenta objects + dependent, err = gr.observe(resource.ObservablesFromObjects(gr.Scheme, c.DependentResources(c.CR), c.Labels())...) + if err != nil { + return emptybag, emptybag, fmt.Errorf("Failed getting dependent resources: %s", err.Error()) + } + + // Get Expected resources + expected, err = c.ExpectedResources(c.CR, c.Labels(), dependent, aggregated) + if err != nil { + return emptybag, emptybag, fmt.Errorf("Failed gathering expected resources: %s", err.Error()) + } + + // Get observables + observables := c.Observables(gr.Scheme, c.CR, c.Labels(), expected) + + // Observe observables + observed, err = gr.observe(observables...) + if err != nil { + return emptybag, emptybag, fmt.Errorf("Failed observing resources: %s", err.Error()) + } + + // Mutate expected objects + if mutate { + expected, err = c.Mutate(c.CR, c.Labels(), status, expected, dependent, observed) + if err != nil { + return emptybag, emptybag, fmt.Errorf("Failed mutating resources: %s", err.Error()) + } + + // Get observables + observables := c.Observables(gr.Scheme, c.CR, c.Labels(), expected) + + // Observe observables + observed, err = gr.observe(observables...) + if err != nil { + return emptybag, emptybag, fmt.Errorf("Failed observing resources after mutation: %s", err.Error()) + } + } + + return expected, observed, err +} + +// FinalizeComponent is a function that finalizes component +func (gr *Reconciler) FinalizeComponent(crname string, c component.Component, status interface{}, aggregated *resource.ObjectBag) error { + cname := crname + "(cmpnt:" + c.Name + ")" + log.Printf("%s { finalizing component\n", cname) + defer log.Printf("%s } finalizing component\n", cname) + + expected, observed, err := gr.ObserveAndMutate(crname, c, status, false, aggregated) + + if err != nil { + HandleError("", crname, err) + } + aggregated.Add(expected.Items()...) + err = c.Finalize(c.CR, status, observed) + return err +} + +// ReconcileComponent is a generic function that reconciles expected and observed resources +func (gr *Reconciler) ReconcileComponent(crname string, c component.Component, status interface{}, aggregated *resource.ObjectBag) error { + errs := []error{} + var reconciled *resource.ObjectBag = new(resource.ObjectBag) + + cname := crname + "(cmpnt:" + c.Name + ")" + log.Printf("%s { reconciling component\n", cname) + defer log.Printf("%s } reconciling component\n", cname) + + expected, observed, err := gr.ObserveAndMutate(crname, c, status, true, aggregated) + + // Reconciliation logic is straight-forward: + // This method gets the list of expected resources and observed resources + // We compare the 2 lists and: + // create(rsrc) where rsrc is in expected but not in observed + // delete(rsrc) where rsrc is in observed but not in expected + // update(rsrc) where rsrc is in observed and expected + // + // We have a notion of Managed and Referred resources + // Only Managed resources are CRUD'd + // Missing Reffered resources are treated as errors and surfaced as such in the status field + // + + if err != nil { + errs = handleErrorArr("", crname, err, errs) + } else { + aggregated.Add(expected.Items()...) + log.Printf("%s Expected Resources:\n", cname) + for _, e := range expected.Items() { + log.Printf("%s exp: %s/%s/%s\n", cname, e.Obj.GetNamespace(), reflect.TypeOf(e.Obj).String(), e.Obj.GetName()) + } + log.Printf("%s Observed Resources:\n", cname) + for _, e := range observed.Items() { + log.Printf("%s obs: %s/%s/%s\n", cname, e.Obj.GetNamespace(), reflect.TypeOf(e.Obj).String(), e.Obj.GetName()) + } + + log.Printf("%s Reconciling Resources:\n", cname) + } + for _, e := range expected.Items() { + seen := false + eNamespace := e.Obj.GetNamespace() + eName := e.Obj.GetName() + eKind := reflect.TypeOf(e.Obj).String() + eRsrcInfo := eNamespace + "/" + eKind + "/" + eName + for _, o := range observed.Items() { + if (eName != o.Obj.GetName()) || (eNamespace != o.Obj.GetNamespace()) || + (eKind != reflect.TypeOf(o.Obj).String()) { + continue + } + // rsrc is seen in both expected and observed, update it if needed + e.Obj.SetResourceVersion(o.Obj.GetResourceVersion()) + e.Obj.SetOwnerReferences(o.Obj.GetOwnerReferences()) + if e.Lifecycle == resource.LifecycleManaged && (specDiffers(e.Obj, o.Obj) && c.Differs(e.Obj, o.Obj) || injectOwnerRefs(e.Obj, c.OwnerRef)) { + if err := gr.Update(context.TODO(), e.Obj.(runtime.Object).DeepCopyObject()); err != nil { + errs = handleErrorArr("update", eRsrcInfo, err, errs) + } else { + log.Printf("%s update: %s\n", cname, eRsrcInfo) + } + } else { + log.Printf("%s nochange: %s\n", cname, eRsrcInfo) + } + reconciled.Add(o) + seen = true + break + } + // rsrc is in expected but not in observed - create + if !seen { + if e.Lifecycle == resource.LifecycleManaged { + injectOwnerRefs(e.Obj, c.OwnerRef) + if err := gr.Create(context.TODO(), e.Obj.(runtime.Object)); err != nil { + errs = handleErrorArr("Create", cname, err, errs) + } else { + log.Printf("%s +create: %s\n", cname, eRsrcInfo) + reconciled.Add(e) + } + } else { + err := fmt.Errorf("missing resource not managed by %s: %s", cname, eRsrcInfo) + errs = handleErrorArr("missing resource", cname, err, errs) + } + } + } + + // delete(observed - expected) + for _, o := range observed.Items() { + seen := false + oNamespace := o.Obj.GetNamespace() + oName := o.Obj.GetName() + oKind := reflect.TypeOf(o.Obj).String() + oRsrcInfo := oKind + "/" + oNamespace + "/" + oName + for _, e := range expected.Items() { + if (e.Obj.GetName() == oName) && + (e.Obj.GetNamespace() == oNamespace) && + (reflect.TypeOf(o.Obj).String() == oKind) { + seen = true + break + } + } + // rsrc is in observed but not in expected - delete + if !seen { + if err := gr.Delete(context.TODO(), o.Obj.(runtime.Object)); err != nil { + errs = handleErrorArr("delete", oRsrcInfo, err, errs) + } else { + log.Printf("%s -delete: %s\n", cname, oRsrcInfo) + } + } + } + + err = utilerrors.NewAggregate(errs) + c.UpdateComponentStatus(c.CR, status, reconciled, err) + return err +} + +// Reconcile expected by kubebuilder +func (gr *Reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { + err := gr.ReconcileCR(request.NamespacedName, gr.Handle) + if err != nil { + fmt.Printf("err: %s", err.Error()) + } + return reconcile.Result{}, err +} + +// AddToSchemes for adding Application to scheme +var AddToSchemes runtime.SchemeBuilder + +// Init sets up Reconciler +func (gr *Reconciler) Init() { + gr.Client = gr.Manager.GetClient() + gr.Scheme = gr.Manager.GetScheme() + AddToSchemes.AddToScheme(gr.Scheme) +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/types.go b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/types.go new file mode 100644 index 000000000..dc5618810 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/types.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericreconciler + +import ( + cr "github.com/kubernetes-sigs/application/pkg/customresource" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ reconcile.Reconciler = &Reconciler{} + +// Reconciler defines fields needed for all airflow controllers +// +k8s:deepcopy-gen=false +type Reconciler struct { + client.Client + Scheme *runtime.Scheme + Handle cr.Handle + Manager manager.Manager +} + +// ReconcilerConfig config defines reconciler parameters +// +k8s:deepcopy-gen=false +type ReconcilerConfig struct { +} + +// KVmap is a map[string]string +type KVmap map[string]string diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/utils.go b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/utils.go new file mode 100644 index 000000000..ecc363264 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/genericreconciler/utils.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericreconciler + +// Merge is used to merge multiple maps into the target map +func (out KVmap) Merge(kvmaps ...KVmap) { + for _, kvmap := range kvmaps { + for k, v := range kvmap { + out[k] = v + } + } +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/doc.go new file mode 100644 index 000000000..3dc5cb172 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package kbcontroller contains methods to integrate with kube-builder manager +package kbcontroller diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/kbcontroller.go b/vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/kbcontroller.go new file mode 100644 index 000000000..eaa546306 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/kbcontroller/kbcontroller.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 The Kubernetes Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kbcontroller + +import ( + cr "github.com/kubernetes-sigs/application/pkg/customresource" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +// CreateController creates a new Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller and Start it when the Manager is Started. +func CreateController(name string, mgr manager.Manager, handle cr.Handle, r reconcile.Reconciler) error { + // Create a new controller + c, err := controller.New(name+"-ctrl", mgr, controller.Options{Reconciler: r}) + if err != nil { + return err + } + + // Watch for changes to Base resource + err = c.Watch(&source.Kind{Type: handle.NewRsrc().(runtime.Object)}, + &handler.EnqueueRequestForObject{}) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go new file mode 100644 index 000000000..bc22e9732 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_agpl.go @@ -0,0 +1,683 @@ +package cmd + +func initAgpl() { + Licenses["agpl"] = License{ + Name: "GNU Affero General Public License", + PossibleMatches: []string{"agpl", "affero gpl", "gnu agpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see .`, + Text: ` GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go new file mode 100644 index 000000000..38393d541 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_apache_2.go @@ -0,0 +1,238 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initApache2() { + Licenses["apache"] = License{ + Name: "Apache 2.0", + PossibleMatches: []string{"apache", "apache20", "apache 2.0", "apache2.0", "apache-2.0"}, + Header: ` +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License.`, + Text: ` + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go new file mode 100644 index 000000000..4a847e04a --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_2.go @@ -0,0 +1,71 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initBsdClause2() { + Licenses["freebsd"] = License{ + Name: "Simplified BSD License", + PossibleMatches: []string{"freebsd", "simpbsd", "simple bsd", "2-clause bsd", + "2 clause bsd", "simplified bsd license"}, + Header: `All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go new file mode 100644 index 000000000..c7476b31f --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_bsd_clause_3.go @@ -0,0 +1,78 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initBsdClause3() { + Licenses["bsd"] = License{ + Name: "NewBSD", + PossibleMatches: []string{"bsd", "newbsd", "3 clause bsd", "3-clause bsd"}, + Header: `All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.`, + Text: `{{ .copyright }} +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go new file mode 100644 index 000000000..03e05b3a7 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_2.go @@ -0,0 +1,376 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initGpl2() { + Licenses["gpl2"] = License{ + Name: "GNU General Public License 2.0", + PossibleMatches: []string{"gpl2", "gnu gpl2", "gplv2"}, + Header: ` +This program is free software; you can redistribute it and/or +modify it under the terms of the GNU General Public License +as published by the Free Software Foundation; either version 2 +of the License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this program. If not, see .`, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than 'show w' and 'show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + 'Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go new file mode 100644 index 000000000..ce07679c7 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_gpl_3.go @@ -0,0 +1,711 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initGpl3() { + Licenses["gpl3"] = License{ + Name: "GNU General Public License 3.0", + PossibleMatches: []string{"gpl3", "gplv3", "gpl", "gnu gpl3", "gnu gpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see .`, + Text: ` GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands 'show w' and 'show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go new file mode 100644 index 000000000..0f8b96cad --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_lgpl.go @@ -0,0 +1,186 @@ +package cmd + +func initLgpl() { + Licenses["lgpl"] = License{ + Name: "GNU Lesser General Public License", + PossibleMatches: []string{"lgpl", "lesser gpl", "gnu lgpl"}, + Header: ` +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this program. If not, see .`, + Text: ` GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library.`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go new file mode 100644 index 000000000..bd2d0c4fa --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/license_mit.go @@ -0,0 +1,63 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +func initMit() { + Licenses["mit"] = License{ + Name: "MIT License", + PossibleMatches: []string{"mit"}, + Header: ` +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE.`, + Text: `The MIT License (MIT) + +{{ .copyright }} + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +`, + } +} diff --git a/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go new file mode 100644 index 000000000..a070134dd --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra/cmd/licenses.go @@ -0,0 +1,118 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Parts inspired by https://github.com/ryanuber/go-license + +package cmd + +import ( + "strings" + "time" + + "github.com/spf13/viper" +) + +// Licenses contains all possible licenses a user can choose from. +var Licenses = make(map[string]License) + +// License represents a software license agreement, containing the Name of +// the license, its possible matches (on the command line as given to cobra), +// the header to be used with each file on the file's creating, and the text +// of the license +type License struct { + Name string // The type of license in use + PossibleMatches []string // Similar names to guess + Text string // License text data + Header string // License header for source files +} + +func init() { + // Allows a user to not use a license. + Licenses["none"] = License{"None", []string{"none", "false"}, "", ""} + + initApache2() + initMit() + initBsdClause3() + initBsdClause2() + initGpl2() + initGpl3() + initLgpl() + initAgpl() +} + +// getLicense returns license specified by user in flag or in config. +// If user didn't specify the license, it returns Apache License 2.0. +// +// TODO: Inspect project for existing license +func getLicense() License { + // If explicitly flagged, use that. + if userLicense != "" { + return findLicense(userLicense) + } + + // If user wants to have custom license, use that. + if viper.IsSet("license.header") || viper.IsSet("license.text") { + return License{Header: viper.GetString("license.header"), + Text: viper.GetString("license.text")} + } + + // If user wants to have built-in license, use that. + if viper.IsSet("license") { + return findLicense(viper.GetString("license")) + } + + // If user didn't set any license, use Apache 2.0 by default. + return Licenses["apache"] +} + +func copyrightLine() string { + author := viper.GetString("author") + + year := viper.GetString("year") // For tests. + if year == "" { + year = time.Now().Format("2006") + } + + return "Copyright © " + year + " " + author +} + +// findLicense looks for License object of built-in licenses. +// If it didn't find license, then the app will be terminated and +// error will be printed. +func findLicense(name string) License { + found := matchLicense(name) + if found == "" { + er("unknown license: " + name) + } + return Licenses[found] +} + +// matchLicense compares the given a license name +// to PossibleMatches of all built-in licenses. +// It returns blank string, if name is blank string or it didn't find +// then appropriate match to name. +func matchLicense(name string) string { + if name == "" { + return "" + } + + for key, lic := range Licenses { + for _, match := range lic.PossibleMatches { + if strings.EqualFold(name, match) { + return key + } + } + } + + return "" +} diff --git a/vendor/gopkg.in/square/go-jose.v2/jwt/builder.go b/vendor/gopkg.in/square/go-jose.v2/jwt/builder.go new file mode 100644 index 000000000..686ec80a4 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/jwt/builder.go @@ -0,0 +1,334 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "bytes" + "reflect" + + "gopkg.in/square/go-jose.v2/json" + + "gopkg.in/square/go-jose.v2" +) + +// Builder is a utility for making JSON Web Tokens. Calls can be chained, and +// errors are accumulated until the final call to CompactSerialize/FullSerialize. +type Builder interface { + // Claims encodes claims into JWE/JWS form. Multiple calls will merge claims + // into single JSON object. If you are passing private claims, make sure to set + // struct field tags to specify the name for the JSON key to be used when + // serializing. + Claims(i interface{}) Builder + // Token builds a JSONWebToken from provided data. + Token() (*JSONWebToken, error) + // FullSerialize serializes a token using the full serialization format. + FullSerialize() (string, error) + // CompactSerialize serializes a token using the compact serialization format. + CompactSerialize() (string, error) +} + +// NestedBuilder is a utility for making Signed-Then-Encrypted JSON Web Tokens. +// Calls can be chained, and errors are accumulated until final call to +// CompactSerialize/FullSerialize. +type NestedBuilder interface { + // Claims encodes claims into JWE/JWS form. Multiple calls will merge claims + // into single JSON object. If you are passing private claims, make sure to set + // struct field tags to specify the name for the JSON key to be used when + // serializing. + Claims(i interface{}) NestedBuilder + // Token builds a NestedJSONWebToken from provided data. + Token() (*NestedJSONWebToken, error) + // FullSerialize serializes a token using the full serialization format. + FullSerialize() (string, error) + // CompactSerialize serializes a token using the compact serialization format. + CompactSerialize() (string, error) +} + +type builder struct { + payload map[string]interface{} + err error +} + +type signedBuilder struct { + builder + sig jose.Signer +} + +type encryptedBuilder struct { + builder + enc jose.Encrypter +} + +type nestedBuilder struct { + builder + sig jose.Signer + enc jose.Encrypter +} + +// Signed creates builder for signed tokens. +func Signed(sig jose.Signer) Builder { + return &signedBuilder{ + sig: sig, + } +} + +// Encrypted creates builder for encrypted tokens. +func Encrypted(enc jose.Encrypter) Builder { + return &encryptedBuilder{ + enc: enc, + } +} + +// SignedAndEncrypted creates builder for signed-then-encrypted tokens. +// ErrInvalidContentType will be returned if encrypter doesn't have JWT content type. +func SignedAndEncrypted(sig jose.Signer, enc jose.Encrypter) NestedBuilder { + if contentType, _ := enc.Options().ExtraHeaders[jose.HeaderContentType].(jose.ContentType); contentType != "JWT" { + return &nestedBuilder{ + builder: builder{ + err: ErrInvalidContentType, + }, + } + } + return &nestedBuilder{ + sig: sig, + enc: enc, + } +} + +func (b builder) claims(i interface{}) builder { + if b.err != nil { + return b + } + + m, ok := i.(map[string]interface{}) + switch { + case ok: + return b.merge(m) + case reflect.Indirect(reflect.ValueOf(i)).Kind() == reflect.Struct: + m, err := normalize(i) + if err != nil { + return builder{ + err: err, + } + } + return b.merge(m) + default: + return builder{ + err: ErrInvalidClaims, + } + } +} + +func normalize(i interface{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + + raw, err := json.Marshal(i) + if err != nil { + return nil, err + } + + d := json.NewDecoder(bytes.NewReader(raw)) + d.UseNumber() + + if err := d.Decode(&m); err != nil { + return nil, err + } + + return m, nil +} + +func (b *builder) merge(m map[string]interface{}) builder { + p := make(map[string]interface{}) + for k, v := range b.payload { + p[k] = v + } + for k, v := range m { + p[k] = v + } + + return builder{ + payload: p, + } +} + +func (b *builder) token(p func(interface{}) ([]byte, error), h []jose.Header) (*JSONWebToken, error) { + return &JSONWebToken{ + payload: p, + Headers: h, + }, nil +} + +func (b *signedBuilder) Claims(i interface{}) Builder { + return &signedBuilder{ + builder: b.builder.claims(i), + sig: b.sig, + } +} + +func (b *signedBuilder) Token() (*JSONWebToken, error) { + sig, err := b.sign() + if err != nil { + return nil, err + } + + h := make([]jose.Header, len(sig.Signatures)) + for i, v := range sig.Signatures { + h[i] = v.Header + } + + return b.builder.token(sig.Verify, h) +} + +func (b *signedBuilder) CompactSerialize() (string, error) { + sig, err := b.sign() + if err != nil { + return "", err + } + + return sig.CompactSerialize() +} + +func (b *signedBuilder) FullSerialize() (string, error) { + sig, err := b.sign() + if err != nil { + return "", err + } + + return sig.FullSerialize(), nil +} + +func (b *signedBuilder) sign() (*jose.JSONWebSignature, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + return b.sig.Sign(p) +} + +func (b *encryptedBuilder) Claims(i interface{}) Builder { + return &encryptedBuilder{ + builder: b.builder.claims(i), + enc: b.enc, + } +} + +func (b *encryptedBuilder) CompactSerialize() (string, error) { + enc, err := b.encrypt() + if err != nil { + return "", err + } + + return enc.CompactSerialize() +} + +func (b *encryptedBuilder) FullSerialize() (string, error) { + enc, err := b.encrypt() + if err != nil { + return "", err + } + + return enc.FullSerialize(), nil +} + +func (b *encryptedBuilder) Token() (*JSONWebToken, error) { + enc, err := b.encrypt() + if err != nil { + return nil, err + } + + return b.builder.token(enc.Decrypt, []jose.Header{enc.Header}) +} + +func (b *encryptedBuilder) encrypt() (*jose.JSONWebEncryption, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + return b.enc.Encrypt(p) +} + +func (b *nestedBuilder) Claims(i interface{}) NestedBuilder { + return &nestedBuilder{ + builder: b.builder.claims(i), + sig: b.sig, + enc: b.enc, + } +} + +func (b *nestedBuilder) Token() (*NestedJSONWebToken, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return nil, err + } + + return &NestedJSONWebToken{ + enc: enc, + Headers: []jose.Header{enc.Header}, + }, nil +} + +func (b *nestedBuilder) CompactSerialize() (string, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return "", err + } + + return enc.CompactSerialize() +} + +func (b *nestedBuilder) FullSerialize() (string, error) { + enc, err := b.signAndEncrypt() + if err != nil { + return "", err + } + + return enc.FullSerialize(), nil +} + +func (b *nestedBuilder) signAndEncrypt() (*jose.JSONWebEncryption, error) { + if b.err != nil { + return nil, b.err + } + + p, err := json.Marshal(b.payload) + if err != nil { + return nil, err + } + + sig, err := b.sig.Sign(p) + if err != nil { + return nil, err + } + + p2, err := sig.CompactSerialize() + if err != nil { + return nil, err + } + + return b.enc.Encrypt([]byte(p2)) +} diff --git a/vendor/gopkg.in/square/go-jose.v2/jwt/claims.go b/vendor/gopkg.in/square/go-jose.v2/jwt/claims.go new file mode 100644 index 000000000..50fb7055f --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/jwt/claims.go @@ -0,0 +1,120 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "strconv" + "time" + + "gopkg.in/square/go-jose.v2/json" +) + +// Claims represents public claim values (as specified in RFC 7519). +type Claims struct { + Issuer string `json:"iss,omitempty"` + Subject string `json:"sub,omitempty"` + Audience Audience `json:"aud,omitempty"` + Expiry *NumericDate `json:"exp,omitempty"` + NotBefore *NumericDate `json:"nbf,omitempty"` + IssuedAt *NumericDate `json:"iat,omitempty"` + ID string `json:"jti,omitempty"` +} + +// NumericDate represents date and time as the number of seconds since the +// epoch, including leap seconds. Non-integer values can be represented +// in the serialized format, but we round to the nearest second. +type NumericDate int64 + +// NewNumericDate constructs NumericDate from time.Time value. +func NewNumericDate(t time.Time) *NumericDate { + if t.IsZero() { + return nil + } + + // While RFC 7519 technically states that NumericDate values may be + // non-integer values, we don't bother serializing timestamps in + // claims with sub-second accurancy and just round to the nearest + // second instead. Not convined sub-second accuracy is useful here. + out := NumericDate(t.Unix()) + return &out +} + +// MarshalJSON serializes the given NumericDate into its JSON representation. +func (n NumericDate) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatInt(int64(n), 10)), nil +} + +// UnmarshalJSON reads a date from its JSON representation. +func (n *NumericDate) UnmarshalJSON(b []byte) error { + s := string(b) + + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return ErrUnmarshalNumericDate + } + + *n = NumericDate(f) + return nil +} + +// Time returns time.Time representation of NumericDate. +func (n *NumericDate) Time() time.Time { + if n == nil { + return time.Time{} + } + return time.Unix(int64(*n), 0) +} + +// Audience represents the recipents that the token is intended for. +type Audience []string + +// UnmarshalJSON reads an audience from its JSON representation. +func (s *Audience) UnmarshalJSON(b []byte) error { + var v interface{} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + switch v := v.(type) { + case string: + *s = []string{v} + case []interface{}: + a := make([]string, len(v)) + for i, e := range v { + s, ok := e.(string) + if !ok { + return ErrUnmarshalAudience + } + a[i] = s + } + *s = a + default: + return ErrUnmarshalAudience + } + + return nil +} + +func (s Audience) Contains(v string) bool { + for _, a := range s { + if a == v { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/square/go-jose.v2/jwt/doc.go b/vendor/gopkg.in/square/go-jose.v2/jwt/doc.go new file mode 100644 index 000000000..4cf97b54e --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/jwt/doc.go @@ -0,0 +1,22 @@ +/*- + * Copyright 2017 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jwt provides an implementation of the JSON Web Token standard. + +*/ +package jwt diff --git a/vendor/gopkg.in/square/go-jose.v2/jwt/errors.go b/vendor/gopkg.in/square/go-jose.v2/jwt/errors.go new file mode 100644 index 000000000..09f76ae4b --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/jwt/errors.go @@ -0,0 +1,53 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import "errors" + +// ErrUnmarshalAudience indicates that aud claim could not be unmarshalled. +var ErrUnmarshalAudience = errors.New("square/go-jose/jwt: expected string or array value to unmarshal to Audience") + +// ErrUnmarshalNumericDate indicates that JWT NumericDate could not be unmarshalled. +var ErrUnmarshalNumericDate = errors.New("square/go-jose/jwt: expected number value to unmarshal NumericDate") + +// ErrInvalidClaims indicates that given claims have invalid type. +var ErrInvalidClaims = errors.New("square/go-jose/jwt: expected claims to be value convertible into JSON object") + +// ErrInvalidIssuer indicates invalid iss claim. +var ErrInvalidIssuer = errors.New("square/go-jose/jwt: validation failed, invalid issuer claim (iss)") + +// ErrInvalidSubject indicates invalid sub claim. +var ErrInvalidSubject = errors.New("square/go-jose/jwt: validation failed, invalid subject claim (sub)") + +// ErrInvalidAudience indicated invalid aud claim. +var ErrInvalidAudience = errors.New("square/go-jose/jwt: validation failed, invalid audience claim (aud)") + +// ErrInvalidID indicates invalid jti claim. +var ErrInvalidID = errors.New("square/go-jose/jwt: validation failed, invalid ID claim (jti)") + +// ErrNotValidYet indicates that token is used before time indicated in nbf claim. +var ErrNotValidYet = errors.New("square/go-jose/jwt: validation failed, token not valid yet (nbf)") + +// ErrExpired indicates that token is used after expiry time indicated in exp claim. +var ErrExpired = errors.New("square/go-jose/jwt: validation failed, token is expired (exp)") + +// ErrIssuedInTheFuture indicates that the iat field is in the future. +var ErrIssuedInTheFuture = errors.New("square/go-jose/jwt: validation field, token issued in the future (iat)") + +// ErrInvalidContentType indicates that token requires JWT cty header. +var ErrInvalidContentType = errors.New("square/go-jose/jwt: expected content type to be JWT (cty header)") diff --git a/vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go b/vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go new file mode 100644 index 000000000..aa13d4f0e --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/jwt/jwt.go @@ -0,0 +1,163 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "fmt" + "strings" + + jose "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/json" +) + +// JSONWebToken represents a JSON Web Token (as specified in RFC7519). +type JSONWebToken struct { + payload func(k interface{}) ([]byte, error) + unverifiedPayload func() []byte + Headers []jose.Header +} + +type NestedJSONWebToken struct { + enc *jose.JSONWebEncryption + Headers []jose.Header +} + +// Claims deserializes a JSONWebToken into dest using the provided key. +func (t *JSONWebToken) Claims(key interface{}, dest ...interface{}) error { + payloadKey := tryJWKS(t.Headers, key) + + b, err := t.payload(payloadKey) + if err != nil { + return err + } + + for _, d := range dest { + if err := json.Unmarshal(b, d); err != nil { + return err + } + } + + return nil +} + +// UnsafeClaimsWithoutVerification deserializes the claims of a +// JSONWebToken into the dests. For signed JWTs, the claims are not +// verified. This function won't work for encrypted JWTs. +func (t *JSONWebToken) UnsafeClaimsWithoutVerification(dest ...interface{}) error { + if t.unverifiedPayload == nil { + return fmt.Errorf("square/go-jose: Cannot get unverified claims") + } + claims := t.unverifiedPayload() + for _, d := range dest { + if err := json.Unmarshal(claims, d); err != nil { + return err + } + } + return nil +} + +func (t *NestedJSONWebToken) Decrypt(decryptionKey interface{}) (*JSONWebToken, error) { + key := tryJWKS(t.Headers, decryptionKey) + + b, err := t.enc.Decrypt(key) + if err != nil { + return nil, err + } + + sig, err := ParseSigned(string(b)) + if err != nil { + return nil, err + } + + return sig, nil +} + +// ParseSigned parses token from JWS form. +func ParseSigned(s string) (*JSONWebToken, error) { + sig, err := jose.ParseSigned(s) + if err != nil { + return nil, err + } + headers := make([]jose.Header, len(sig.Signatures)) + for i, signature := range sig.Signatures { + headers[i] = signature.Header + } + + return &JSONWebToken{ + payload: sig.Verify, + unverifiedPayload: sig.UnsafePayloadWithoutVerification, + Headers: headers, + }, nil +} + +// ParseEncrypted parses token from JWE form. +func ParseEncrypted(s string) (*JSONWebToken, error) { + enc, err := jose.ParseEncrypted(s) + if err != nil { + return nil, err + } + + return &JSONWebToken{ + payload: enc.Decrypt, + Headers: []jose.Header{enc.Header}, + }, nil +} + +// ParseSignedAndEncrypted parses signed-then-encrypted token from JWE form. +func ParseSignedAndEncrypted(s string) (*NestedJSONWebToken, error) { + enc, err := jose.ParseEncrypted(s) + if err != nil { + return nil, err + } + + contentType, _ := enc.Header.ExtraHeaders[jose.HeaderContentType].(string) + if strings.ToUpper(contentType) != "JWT" { + return nil, ErrInvalidContentType + } + + return &NestedJSONWebToken{ + enc: enc, + Headers: []jose.Header{enc.Header}, + }, nil +} + +func tryJWKS(headers []jose.Header, key interface{}) interface{} { + jwks, ok := key.(*jose.JSONWebKeySet) + if !ok { + return key + } + + var kid string + for _, header := range headers { + if header.KeyID != "" { + kid = header.KeyID + break + } + } + + if kid == "" { + return key + } + + keys := jwks.Key(kid) + if len(keys) == 0 { + return key + } + + return keys[0].Key +} diff --git a/vendor/gopkg.in/square/go-jose.v2/jwt/validation.go b/vendor/gopkg.in/square/go-jose.v2/jwt/validation.go new file mode 100644 index 000000000..045d5dfba --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/jwt/validation.go @@ -0,0 +1,114 @@ +/*- + * Copyright 2016 Zbigniew Mandziejewicz + * Copyright 2016 Square, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import "time" + +const ( + // DefaultLeeway defines the default leeway for matching NotBefore/Expiry claims. + DefaultLeeway = 1.0 * time.Minute +) + +// Expected defines values used for protected claims validation. +// If field has zero value then validation is skipped. +type Expected struct { + // Issuer matches the "iss" claim exactly. + Issuer string + // Subject matches the "sub" claim exactly. + Subject string + // Audience matches the values in "aud" claim, regardless of their order. + Audience Audience + // ID matches the "jti" claim exactly. + ID string + // Time matches the "exp" and "nbf" claims with leeway. + Time time.Time +} + +// WithTime copies expectations with new time. +func (e Expected) WithTime(t time.Time) Expected { + e.Time = t + return e +} + +// Validate checks claims in a token against expected values. +// A default leeway value of one minute is used to compare time values. +// +// The default leeway will cause the token to be deemed valid until one +// minute after the expiration time. If you're a server application that +// wants to give an extra minute to client tokens, use this +// function. If you're a client application wondering if the server +// will accept your token, use ValidateWithLeeway with a leeway <=0, +// otherwise this function might make you think a token is valid when +// it is not. +func (c Claims) Validate(e Expected) error { + return c.ValidateWithLeeway(e, DefaultLeeway) +} + +// ValidateWithLeeway checks claims in a token against expected values. A +// custom leeway may be specified for comparing time values. You may pass a +// zero value to check time values with no leeway, but you should not that +// numeric date values are rounded to the nearest second and sub-second +// precision is not supported. +// +// The leeway gives some extra time to the token from the server's +// point of view. That is, if the token is expired, ValidateWithLeeway +// will still accept the token for 'leeway' amount of time. This fails +// if you're using this function to check if a server will accept your +// token, because it will think the token is valid even after it +// expires. So if you're a client validating if the token is valid to +// be submitted to a server, use leeway <=0, if you're a server +// validation a token, use leeway >=0. +func (c Claims) ValidateWithLeeway(e Expected, leeway time.Duration) error { + if e.Issuer != "" && e.Issuer != c.Issuer { + return ErrInvalidIssuer + } + + if e.Subject != "" && e.Subject != c.Subject { + return ErrInvalidSubject + } + + if e.ID != "" && e.ID != c.ID { + return ErrInvalidID + } + + if len(e.Audience) != 0 { + for _, v := range e.Audience { + if !c.Audience.Contains(v) { + return ErrInvalidAudience + } + } + } + + if !e.Time.IsZero() { + if c.NotBefore != nil && e.Time.Add(leeway).Before(c.NotBefore.Time()) { + return ErrNotValidYet + } + + if c.Expiry != nil && e.Time.Add(-leeway).After(c.Expiry.Time()) { + return ErrExpired + } + + // IssuedAt is optional but cannot be in the future. This is not required by the RFC, but + // something is misconfigured if this happens and we should not trust it. + if c.IssuedAt != nil && e.Time.Add(leeway).Before(c.IssuedAt.Time()) { + return ErrIssuedInTheFuture + } + } + + return nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go new file mode 100644 index 000000000..8af73d2e1 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @sttts, @nikhita + // alpha: v1.8 + // beta: v1.9 + // + // CustomResourceValidation is a list of validation methods for CustomResources + CustomResourceValidation utilfeature.Feature = "CustomResourceValidation" + + // owner: @sttts, @nikhita + // alpha: v1.10 + // beta: v1.11 + // + // CustomResourceSubresources defines the subresources for CustomResources + CustomResourceSubresources utilfeature.Feature = "CustomResourceSubresources" + + // owner: @mbohlool, @roycaihw + // alpha: v1.13 + // + // CustomResourceWebhookConversion defines the webhook conversion for Custom Resources. + CustomResourceWebhookConversion utilfeature.Feature = "CustomResourceWebhookConversion" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + CustomResourceWebhookConversion: {Default: false, PreRelease: utilfeature.Alpha}, +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go new file mode 100644 index 000000000..f02fa8e43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go @@ -0,0 +1,49 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package equality + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" +) + +// Semantic can do semantic deep equality checks for api objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go new file mode 100644 index 000000000..9f20152e4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation contains generic api type validation functions. +package validation // import "k8s.io/apimachinery/pkg/api/validation" diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go new file mode 100644 index 000000000..348cdc087 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go @@ -0,0 +1,85 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const IsNegativeErrorMsg string = `must be greater than or equal to 0` + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc func(name string, prefix bool) []string + +// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. +func NameIsDNSSubdomain(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Subdomain(name) +} + +// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label. +func NameIsDNSLabel(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1123Label(name) +} + +// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label. +func NameIsDNS1035Label(name string, prefix bool) []string { + if prefix { + name = maskTrailingDash(name) + } + return validation.IsDNS1035Label(name) +} + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = NameIsDNSLabel + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = NameIsDNSSubdomain + +// maskTrailingDash replaces the final character of a string with a subdomain safe +// value if is a dash. +func maskTrailingDash(name string) string { + if strings.HasSuffix(name, "-") { + return name[:len(name)-2] + "a" + } + return name +} + +// Validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg)) + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go new file mode 100644 index 000000000..44b9b1600 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go @@ -0,0 +1,308 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const FieldImmutableErrorMsg string = `field is immutable` + +const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = map[schema.GroupVersionKind]struct{}{ + {Group: "", Version: "v1", Kind: "Event"}: {}, +} + +// ValidateClusterName can be used to check whether the given cluster name is valid. +var ValidateClusterName = NameIsDNS1035Label + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + var totalSize int64 + for k, v := range annotations { + for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) { + allErrs = append(allErrs, field.Invalid(fldPath, k, msg)) + } + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(totalAnnotationSizeLimitB) { + allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB)) + } + return allErrs +} + +func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind) + // gvk.Group is empty for the legacy group. + if len(gvk.Version) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty")) + } + if len(gvk.Kind) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty")) + } + if len(ownerReference.Name) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty")) + } + if len(ownerReference.UID) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty")) + } + if _, ok := BannedOwners[gvk]; ok { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk))) + } + return allErrs +} + +func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + controllerName := "" + for _, ref := range ownerReferences { + allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...) + if ref.Controller != nil && *ref.Controller { + if controllerName != "" { + allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences, + fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name))) + } else { + controllerName = ref.Name + } + } + } + return allErrs +} + +// Validate finalizer names +func ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(stringValue) { + allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg)) + } + + return allErrs +} + +func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...)) + if len(extra) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List()))) + } + return allErrs +} + +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !apiequality.Semantic.DeepEqual(oldVal, newVal) { + allErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + metadata, err := meta.Accessor(objMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath) +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(meta.GetGenerateName()) != 0 { + for _, msg := range nameFn(meta.GetGenerateName(), true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GetGenerateName(), msg)) + } + } + // If the generated name validates, but the calculated value does not, it's a problem with generation, and we + // report it here. This may confuse users, but indicates a programming bug and still must be validated. + // If there are multiple fields out of which one is required then add an or as a separator + if len(meta.GetName()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required")) + } else { + for _, msg := range nameFn(meta.GetName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg)) + } + } + if requiresNamespace { + if len(meta.GetNamespace()) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "")) + } else { + for _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.GetNamespace(), msg)) + } + } + } else { + if len(meta.GetNamespace()) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type")) + } + } + if len(meta.GetClusterName()) != 0 { + for _, msg := range ValidateClusterName(meta.GetClusterName(), false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.GetClusterName(), msg)) + } + } + allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...) + allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + allErrs = append(allErrs, ValidateInitializers(meta.GetInitializers(), fldPath.Child("initializers"))...) + allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...) + return allErrs +} + +func ValidateInitializers(initializers *metav1.Initializers, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if initializers == nil { + return allErrs + } + for i, initializer := range initializers.Pending { + allErrs = append(allErrs, validation.IsFullyQualifiedName(fldPath.Child("pending").Index(i).Child("name"), initializer.Name)...) + } + allErrs = append(allErrs, validateInitializersResult(initializers.Result, fldPath.Child("result"))...) + return allErrs +} + +func validateInitializersResult(result *metav1.Status, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if result == nil { + return allErrs + } + switch result.Status { + case metav1.StatusFailure: + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("status"), result.Status, "must be 'Failure'")) + } + return allErrs +} + +// ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers. +func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + hasFinalizerOrphanDependents := false + hasFinalizerDeleteDependents := false + for _, finalizer := range finalizers { + allErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...) + if finalizer == metav1.FinalizerOrphanDependents { + hasFinalizerOrphanDependents = true + } + if finalizer == metav1.FinalizerDeleteDependents { + hasFinalizerDeleteDependents = true + } + } + if hasFinalizerDeleteDependents && hasFinalizerOrphanDependents { + allErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf("finalizer %s and %s cannot be both set", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents))) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + newMetadata, err := meta.Accessor(newMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error())) + return allErrs + } + oldMetadata, err := meta.Accessor(oldMeta) + if err != nil { + allErrs := field.ErrorList{} + allErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error())) + return allErrs + } + return ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath) +} + +func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // Finalizers cannot be added if the object is already being deleted. + if oldMeta.GetDeletionTimestamp() != nil { + allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child("finalizers"))...) + } + + // Reject updates that don't specify a resource version + if len(newMeta.GetResourceVersion()) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.GetResourceVersion(), "must be specified for an update")) + } + + // Generation shouldn't be decremented + if newMeta.GetGeneration() < oldMeta.GetGeneration() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented")) + } + + allErrs = append(allErrs, ValidateInitializersUpdate(newMeta.GetInitializers(), oldMeta.GetInitializers(), fldPath.Child("initializers"))...) + + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child("creationTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionTimestamp(), oldMeta.GetDeletionTimestamp(), fldPath.Child("deletionTimestamp"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionGracePeriodSeconds(), oldMeta.GetDeletionGracePeriodSeconds(), fldPath.Child("deletionGracePeriodSeconds"))...) + allErrs = append(allErrs, ValidateImmutableField(newMeta.GetClusterName(), oldMeta.GetClusterName(), fldPath.Child("clusterName"))...) + + allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...) + + return allErrs +} + +// ValidateInitializersUpdate checks the update of the metadata initializers field +func ValidateInitializersUpdate(newInit, oldInit *metav1.Initializers, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + switch { + case oldInit == nil && newInit != nil: + // Initializers may not be set on new objects + allErrs = append(allErrs, field.Invalid(fldPath, nil, "field is immutable once initialization has completed")) + case oldInit != nil && newInit == nil: + // this is a valid transition and means initialization was successful + case oldInit != nil && newInit != nil: + // validate changes to initializers + switch { + case oldInit.Result == nil && newInit.Result != nil: + // setting a result is allowed + allErrs = append(allErrs, validateInitializersResult(newInit.Result, fldPath.Child("result"))...) + case oldInit.Result != nil: + // setting Result implies permanent failure, and all future updates will be prevented + allErrs = append(allErrs, ValidateImmutableField(newInit.Result, oldInit.Result, fldPath.Child("result"))...) + default: + // leaving the result nil is allowed + } + } + return allErrs +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go new file mode 100644 index 000000000..81f86fb30 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func ValidateLabelSelector(ps *metav1.LabelSelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ps == nil { + return allErrs + } + allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) + for i, expr := range ps.MatchExpressions { + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) + } + return allErrs +} + +func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch sr.Operator { + case metav1.LabelSelectorOpIn, metav1.LabelSelectorOpNotIn: + if len(sr.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case metav1.LabelSelectorOpExists, metav1.LabelSelectorOpDoesNotExist: + if len(sr.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) + } + allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + return allErrs +} + +// ValidateLabelName validates that the label name is correctly defined. +func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(labelName) { + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + } + return allErrs +} + +// ValidateLabels validates that a set of labels are correctly defined. +func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, v := range labels { + allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) + for _, msg := range validation.IsValidLabelValue(v) { + allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) + } + } + return allErrs +} + +func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { + allErrs := field.ErrorList{} + if options.OrphanDependents != nil && options.PropagationPolicy != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set")) + } + if options.PropagationPolicy != nil && + *options.PropagationPolicy != metav1.DeletePropagationForeground && + *options.PropagationPolicy != metav1.DeletePropagationBackground && + *options.PropagationPolicy != metav1.DeletePropagationOrphan { + allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) + } + allErrs = append(allErrs, validateDryRun(field.NewPath("dryRun"), options.DryRun)...) + return allErrs +} + +func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { + return validateDryRun(field.NewPath("dryRun"), options.DryRun) +} + +func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { + return validateDryRun(field.NewPath("dryRun"), options.DryRun) +} + +var allowedDryRunValues = sets.NewString(metav1.DryRunAll) + +func validateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedDryRunValues.HasAll(dryRun...) { + allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) + } + return allErrs +} + +const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go new file mode 100644 index 000000000..bcf7eb4bc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "fmt" + "net/http" +) + +func authenticate(ctx context.Context, implicitAuds Audiences, authenticate func() (*Response, bool, error)) (*Response, bool, error) { + targetAuds, ok := AudiencesFrom(ctx) + // We can remove this once api audiences is never empty. That will probably + // be N releases after TokenRequest is GA. + if !ok { + return authenticate() + } + auds := implicitAuds.Intersect(targetAuds) + if len(auds) == 0 { + return nil, false, nil + } + resp, ok, err := authenticate() + if err != nil || !ok { + return nil, false, err + } + if len(resp.Audiences) > 0 { + // maybe the authenticator was audience aware after all. + return nil, false, fmt.Errorf("audience agnostic authenticator wrapped an authenticator that returned audiences: %q", resp.Audiences) + } + resp.Audiences = auds + return resp, true, nil +} + +type audAgnosticRequestAuthenticator struct { + implicit Audiences + delegate Request +} + +var _ = Request(&audAgnosticRequestAuthenticator{}) + +func (a *audAgnosticRequestAuthenticator) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return authenticate(req.Context(), a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateRequest(req) + }) +} + +// WrapAudienceAgnosticRequest wraps an audience agnostic request authenticator +// to restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticRequest(implicit Audiences, delegate Request) Request { + return &audAgnosticRequestAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} + +type audAgnosticTokenAuthenticator struct { + implicit Audiences + delegate Token +} + +var _ = Token(&audAgnosticTokenAuthenticator{}) + +func (a *audAgnosticTokenAuthenticator) AuthenticateToken(ctx context.Context, tok string) (*Response, bool, error) { + return authenticate(ctx, a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateToken(ctx, tok) + }) +} + +// WrapAudienceAgnosticToken wraps an audience agnostic token authenticator to +// restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticToken(implicit Audiences, delegate Token) Token { + return &audAgnosticTokenAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go new file mode 100644 index 000000000..2a3a91889 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import "context" + +// Audiences is a container for the Audiences of a token. +type Audiences []string + +// The key type is unexported to prevent collisions +type key int + +const ( + // audiencesKey is the context key for request audiences. + audiencesKey key = iota +) + +// WithAudiences returns a context that stores a request's expected audiences. +func WithAudiences(ctx context.Context, auds Audiences) context.Context { + return context.WithValue(ctx, audiencesKey, auds) +} + +// AudiencesFrom returns a request's expected audiences stored in the request context. +func AudiencesFrom(ctx context.Context) (Audiences, bool) { + auds, ok := ctx.Value(audiencesKey).(Audiences) + return auds, ok +} + +// Has checks if Audiences contains a specific audiences. +func (a Audiences) Has(taud string) bool { + for _, aud := range a { + if aud == taud { + return true + } + } + return false +} + +// Intersect intersects Audiences with a target Audiences and returns all +// elements in both. +func (a Audiences) Intersect(tauds Audiences) Audiences { + selected := Audiences{} + for _, taud := range tauds { + if a.Has(taud) { + selected = append(selected, taud) + } + } + return selected +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go new file mode 100644 index 000000000..e3b1b622c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go @@ -0,0 +1,80 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "net/http" + + "k8s.io/apiserver/pkg/authentication/user" +) + +// Token checks a string value against a backing authentication store and +// returns a Response or an error if the token could not be checked. +type Token interface { + AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) +} + +// Request attempts to extract authentication information from a request and +// returns a Response or an error if the request could not be checked. +type Request interface { + AuthenticateRequest(req *http.Request) (*Response, bool, error) +} + +// Password checks a username and password against a backing authentication +// store and returns a Response or an error if the password could not be +// checked. +type Password interface { + AuthenticatePassword(ctx context.Context, user, password string) (*Response, bool, error) +} + +// TokenFunc is a function that implements the Token interface. +type TokenFunc func(ctx context.Context, token string) (*Response, bool, error) + +// AuthenticateToken implements authenticator.Token. +func (f TokenFunc) AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) { + return f(ctx, token) +} + +// RequestFunc is a function that implements the Request interface. +type RequestFunc func(req *http.Request) (*Response, bool, error) + +// AuthenticateRequest implements authenticator.Request. +func (f RequestFunc) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return f(req) +} + +// PasswordFunc is a function that implements the Password interface. +type PasswordFunc func(ctx context.Context, user, password string) (*Response, bool, error) + +// AuthenticatePassword implements authenticator.Password. +func (f PasswordFunc) AuthenticatePassword(ctx context.Context, user, password string) (*Response, bool, error) { + return f(ctx, user, password) +} + +// Response is the struct returned by authenticator interfaces upon successful +// authentication. It contains information about whether the authenticator +// authenticated the request, information about the context of the +// authentication, and information about the authenticated user. +type Response struct { + // Audiences is the set of audiences the authenticator was able to validate + // the token against. If the authenticator is not audience aware, this field + // will be empty. + Audiences Audiences + // User is the UserInfo associated with the authentication context. + User user.Info +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go new file mode 100644 index 000000000..1b7bbc139 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -0,0 +1,73 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "fmt" + "strings" + + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" +) + +const ( + ServiceAccountUsernamePrefix = "system:serviceaccount:" + ServiceAccountUsernameSeparator = ":" + ServiceAccountGroupPrefix = "system:serviceaccounts:" + AllServiceAccountsGroup = "system:serviceaccounts" +) + +// MakeUsername generates a username from the given namespace and ServiceAccount name. +// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name. +func MakeUsername(namespace, name string) string { + return ServiceAccountUsernamePrefix + namespace + ServiceAccountUsernameSeparator + name +} + +var invalidUsernameErr = fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name")) + +// SplitUsername returns the namespace and ServiceAccount name embedded in the given username, +// or an error if the username is not a valid name produced by MakeUsername +func SplitUsername(username string) (string, string, error) { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return "", "", invalidUsernameErr + } + trimmed := strings.TrimPrefix(username, ServiceAccountUsernamePrefix) + parts := strings.Split(trimmed, ServiceAccountUsernameSeparator) + if len(parts) != 2 { + return "", "", invalidUsernameErr + } + namespace, name := parts[0], parts[1] + if len(apimachineryvalidation.ValidateNamespaceName(namespace, false)) != 0 { + return "", "", invalidUsernameErr + } + if len(apimachineryvalidation.ValidateServiceAccountName(name, false)) != 0 { + return "", "", invalidUsernameErr + } + return namespace, name, nil +} + +// MakeGroupNames generates service account group names for the given namespace +func MakeGroupNames(namespace string) []string { + return []string{ + AllServiceAccountsGroup, + MakeNamespaceGroupName(namespace), + } +} + +// MakeNamespaceGroupName returns the name of the group all service accounts in the namespace are included in +func MakeNamespaceGroupName(namespace string) string { + return ServiceAccountGroupPrefix + namespace +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go index 3d87fd72c..570c51ae9 100644 --- a/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package user contains utilities for dealing with simple user exchange in the auth // packages. The user.Info interface defines an interface for exchanging that info. -package user // import "k8s.io/apiserver/pkg/authentication/user" +package user diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go index 5f212ca04..95ade009e 100644 --- a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -56,7 +56,7 @@ type Attributes interface { GetAPIVersion() string // IsResourceRequest returns true for requests to API resources, like /api/v1/nodes, - // and false for non-resource endpoints like /api, /healthz + // and false for non-resource endpoints like /api, /healthz, and /swaggerapi IsResourceRequest() bool // GetPath returns the path of the request diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go new file mode 100644 index 000000000..924182568 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -0,0 +1,109 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature() bool + + // owner: @tallclair + // alpha: v1.5 + // beta: v1.6 + // + // StreamingProxyRedirects controls whether the apiserver should intercept (and follow) + // redirects from the backend (Kubelet) for streaming requests (exec/attach/port-forward). + StreamingProxyRedirects utilfeature.Feature = "StreamingProxyRedirects" + + // owner: @tallclair + // alpha: v1.10 + // + // ValidateProxyRedirects controls whether the apiserver should validate that redirects are only + // followed to the same host. Only used if StreamingProxyRedirects is enabled. + ValidateProxyRedirects utilfeature.Feature = "ValidateProxyRedirects" + + // owner: @tallclair + // alpha: v1.7 + // beta: v1.8 + // GA: v1.12 + // + // AdvancedAuditing enables a much more general API auditing pipeline, which includes support for + // pluggable output backends and an audit policy specifying how different requests should be + // audited. + AdvancedAuditing utilfeature.Feature = "AdvancedAuditing" + + // owner: @pbarker + // alpha: v1.13 + // + // DynamicAuditing enables configuration of audit policy and webhook backends through an + // AuditSink API object. + DynamicAuditing utilfeature.Feature = "DynamicAuditing" + + // owner: @ilackams + // alpha: v1.7 + // + // Enables compression of REST responses (GET and LIST only) + APIResponseCompression utilfeature.Feature = "APIResponseCompression" + + // owner: @smarterclayton + // alpha: v1.7 + // + // Allow asynchronous coordination of object creation. + // Auto-enabled by the Initializers admission plugin. + Initializers utilfeature.Feature = "Initializers" + + // owner: @smarterclayton + // alpha: v1.8 + // beta: v1.9 + // + // Allow API clients to retrieve resource lists in chunks rather than + // all at once. + APIListChunking utilfeature.Feature = "APIListChunking" + + // owner: @apelisse + // alpha: v1.12 + // beta: v1.13 + // + // Allow requests to be processed but not stored, so that + // validation, merging, mutation can be tested without + // committing. + DryRun utilfeature.Feature = "DryRun" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, + ValidateProxyRedirects: {Default: false, PreRelease: utilfeature.Alpha}, + AdvancedAuditing: {Default: true, PreRelease: utilfeature.GA}, + DynamicAuditing: {Default: false, PreRelease: utilfeature.Alpha}, + APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, + Initializers: {Default: false, PreRelease: utilfeature.Alpha}, + APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, + DryRun: {Default: true, PreRelease: utilfeature.Beta}, +} diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go new file mode 100644 index 000000000..a83dafd56 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -0,0 +1,323 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "fmt" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/spf13/pflag" + "k8s.io/klog" +) + +type Feature string + +const ( + flagName = "feature-gates" + + // allAlphaGate is a global toggle for alpha features. Per-feature key + // values override the default set by allAlphaGate. Examples: + // AllAlpha=false,NewFeature=true will result in newFeature=true + // AllAlpha=true,NewFeature=false will result in newFeature=false + allAlphaGate Feature = "AllAlpha" +) + +var ( + // The generic features. + defaultFeatures = map[Feature]FeatureSpec{ + allAlphaGate: {Default: false, PreRelease: Alpha}, + } + + // Special handling for a few gates. + specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){ + allAlphaGate: setUnsetAlphaGates, + } + + // DefaultFeatureGate is a shared global FeatureGate. + DefaultFeatureGate FeatureGate = NewFeatureGate() +) + +type FeatureSpec struct { + Default bool + PreRelease prerelease +} + +type prerelease string + +const ( + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") + + // Deprecated + Deprecated = prerelease("DEPRECATED") +) + +// FeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type FeatureGate interface { + // AddFlag adds a flag for setting global feature gates to the specified FlagSet. + AddFlag(fs *pflag.FlagSet) + // Set parses and stores flag gates for known features + // from a string like feature1=true,feature2=false,... + Set(value string) error + // SetFromMap stores flag gates for known features from a map[string]bool or returns an error + SetFromMap(m map[string]bool) error + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool + // Add adds features to the featureGate. + Add(features map[Feature]FeatureSpec) error + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []string + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be + // set on the copy without mutating the original. This is useful for validating + // config against potential feature gate changes before committing those changes. + DeepCopy() FeatureGate +} + +// featureGate implements FeatureGate as well as pflag.Value for flag parsing. +type featureGate struct { + special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool) + + // lock guards writes to known, enabled, and reads/writes of closed + lock sync.Mutex + // known holds a map[Feature]FeatureSpec + known *atomic.Value + // enabled holds a map[Feature]bool + enabled *atomic.Value + // closed is set to true when AddFlag is called, and prevents subsequent calls to Add + closed bool +} + +func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) { + for k, v := range known { + if v.PreRelease == Alpha { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +// Set, String, and Type implement pflag.Value +var _ pflag.Value = &featureGate{} + +func NewFeatureGate() *featureGate { + known := map[Feature]FeatureSpec{} + for k, v := range defaultFeatures { + known[k] = v + } + + knownValue := &atomic.Value{} + knownValue.Store(known) + + enabled := map[Feature]bool{} + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + + f := &featureGate{ + known: knownValue, + special: specialFeatures, + enabled: enabledValue, + } + return f +} + +// Set parses a string of the form "key1=value1,key2=value2,..." into a +// map[string]bool of known keys or returns an error. +func (f *featureGate) Set(value string) error { + m := make(map[string]bool) + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + k := strings.TrimSpace(arr[0]) + if len(arr) != 2 { + return fmt.Errorf("missing bool value for %s", k) + } + v := strings.TrimSpace(arr[1]) + boolValue, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("invalid value of %s=%s, err: %v", k, v, err) + } + m[k] = boolValue + } + return f.SetFromMap(m) +} + +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + for k, v := range m { + k := Feature(k) + featureSpec, ok := known[k] + if !ok { + return fmt.Errorf("unrecognized feature gate: %s", k) + } + enabled[k] = v + // Handle "special" features like "all alpha gates" + if fn, found := f.special[k]; found { + fn(known, enabled, v) + } + + if featureSpec.PreRelease == Deprecated { + klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) + } else if featureSpec.PreRelease == GA { + klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) + } + } + + // Persist changes + f.known.Store(known) + f.enabled.Store(enabled) + + klog.V(1).Infof("feature gates: %v", f.enabled) + return nil +} + +// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". +func (f *featureGate) String() string { + pairs := []string{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (f *featureGate) Type() string { + return "mapStringBool" +} + +// Add adds features to the featureGate. +func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return fmt.Errorf("cannot add a feature gate after adding it to the flag set") + } + + // Copy existing state + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + + for name, spec := range features { + if existingSpec, found := known[name]; found { + if existingSpec == spec { + continue + } + return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) + } + + known[name] = spec + } + + // Persist updated state + f.known.Store(known) + + return nil +} + +// Enabled returns true if the key is enabled. +func (f *featureGate) Enabled(key Feature) bool { + if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok { + return v + } + return f.known.Load().(map[Feature]FeatureSpec)[key].Default +} + +// AddFlag adds a flag for setting global feature gates to the specified FlagSet. +func (f *featureGate) AddFlag(fs *pflag.FlagSet) { + f.lock.Lock() + // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? + // Not all components expose a feature gates flag using this AddFlag method, and + // in the future, all components will completely stop exposing a feature gates flag, + // in favor of componentconfig. + f.closed = true + f.lock.Unlock() + + known := f.KnownFeatures() + fs.Var(f, flagName, ""+ + "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(known, "\n")) +} + +// KnownFeatures returns a slice of strings describing the FeatureGate's known features. +// Deprecated and GA features are hidden from the list. +func (f *featureGate) KnownFeatures() []string { + var known []string + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + if v.PreRelease == GA || v.PreRelease == Deprecated { + continue + } + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default)) + } + sort.Strings(known) + return known +} + +// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be +// set on the copy without mutating the original. This is useful for validating +// config against potential feature gate changes before committing those changes. +func (f *featureGate) DeepCopy() FeatureGate { + // Copy existing state. + known := map[Feature]FeatureSpec{} + for k, v := range f.known.Load().(map[Feature]FeatureSpec) { + known[k] = v + } + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + + // Store copied state in new atomics. + knownValue := &atomic.Value{} + knownValue.Store(known) + enabledValue := &atomic.Value{} + enabledValue.Store(enabled) + + // Construct a new featureGate around the copied state. + // Note that specialFeatures is treated as immutable by convention, + // and we maintain the value of f.closed across the copy. + return &featureGate{ + special: specialFeatures, + known: knownValue, + enabled: enabledValue, + closed: f.closed, + } +} diff --git a/vendor/k8s.io/client-go/discovery/fake/discovery.go b/vendor/k8s.io/client-go/discovery/fake/discovery.go new file mode 100644 index 000000000..9565fa46c --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/fake/discovery.go @@ -0,0 +1,144 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fake + +import ( + "fmt" + + "github.com/googleapis/gnostic/OpenAPIv2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/version" + kubeversion "k8s.io/client-go/pkg/version" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/testing" +) + +// FakeDiscovery implements discovery.DiscoveryInterface and sometimes calls testing.Fake.Invoke with an action, +// but doesn't respect the return value if any. There is a way to fake static values like ServerVersion by using the Faked... fields on the struct. +type FakeDiscovery struct { + *testing.Fake + FakedServerVersion *version.Info +} + +// ServerResourcesForGroupVersion returns the supported resources for a group +// and version. +func (c *FakeDiscovery) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + for _, resourceList := range c.Resources { + if resourceList.GroupVersion == groupVersion { + return resourceList, nil + } + } + return nil, fmt.Errorf("GroupVersion %q not found", groupVersion) +} + +// ServerResources returns the supported resources for all groups and versions. +func (c *FakeDiscovery) ServerResources() ([]*metav1.APIResourceList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "resource"}, + } + c.Invokes(action, nil) + return c.Resources, nil +} + +// ServerPreferredResources returns the supported resources with the version +// preferred by the server. +func (c *FakeDiscovery) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources +// with the version preferred by the server. +func (c *FakeDiscovery) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return nil, nil +} + +// ServerGroups returns the supported groups, with information like supported +// versions and the preferred version. +func (c *FakeDiscovery) ServerGroups() (*metav1.APIGroupList, error) { + action := testing.ActionImpl{ + Verb: "get", + Resource: schema.GroupVersionResource{Resource: "group"}, + } + c.Invokes(action, nil) + + groups := map[string]*metav1.APIGroup{} + + for _, res := range c.Resources { + gv, err := schema.ParseGroupVersion(res.GroupVersion) + if err != nil { + return nil, err + } + group := groups[gv.Group] + if group == nil { + group = &metav1.APIGroup{ + Name: gv.Group, + PreferredVersion: metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }, + } + groups[gv.Group] = group + } + + group.Versions = append(group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: res.GroupVersion, + Version: gv.Version, + }) + } + + list := &metav1.APIGroupList{} + for _, apiGroup := range groups { + list.Groups = append(list.Groups, *apiGroup) + } + + return list, nil + +} + +// ServerVersion retrieves and parses the server's version. +func (c *FakeDiscovery) ServerVersion() (*version.Info, error) { + action := testing.ActionImpl{} + action.Verb = "get" + action.Resource = schema.GroupVersionResource{Resource: "version"} + c.Invokes(action, nil) + + if c.FakedServerVersion != nil { + return c.FakedServerVersion, nil + } + + versionInfo := kubeversion.Get() + return &versionInfo, nil +} + +// OpenAPISchema retrieves and parses the swagger API schema the server supports. +func (c *FakeDiscovery) OpenAPISchema() (*openapi_v2.Document, error) { + return &openapi_v2.Document{}, nil +} + +// RESTClient returns a RESTClient that is used to communicate with API server +// by this client implementation. +func (c *FakeDiscovery) RESTClient() restclient.Interface { + return nil +} diff --git a/vendor/k8s.io/client-go/testing/actions.go b/vendor/k8s.io/client-go/testing/actions.go new file mode 100644 index 000000000..e6db578ed --- /dev/null +++ b/vendor/k8s.io/client-go/testing/actions.go @@ -0,0 +1,671 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +func NewRootGetAction(resource schema.GroupVersionResource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Name = name + + return action +} + +func NewGetAction(resource schema.GroupVersionResource, namespace, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewGetSubresourceAction(resource schema.GroupVersionResource, namespace, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootGetSubresourceAction(resource schema.GroupVersionResource, subresource, name string) GetActionImpl { + action := GetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewRootListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewListAction(resource schema.GroupVersionResource, kind schema.GroupVersionKind, namespace string, opts interface{}) ListActionImpl { + action := ListActionImpl{} + action.Verb = "list" + action.Resource = resource + action.Kind = kind + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootCreateAction(resource schema.GroupVersionResource, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Object = object + + return action +} + +func NewCreateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewCreateSubresourceAction(resource schema.GroupVersionResource, name, subresource, namespace string, object runtime.Object) CreateActionImpl { + action := CreateActionImpl{} + action.Verb = "create" + action.Resource = resource + action.Namespace = namespace + action.Subresource = subresource + action.Name = name + action.Object = object + + return action +} + +func NewRootUpdateAction(resource schema.GroupVersionResource, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Object = object + + return action +} + +func NewUpdateAction(resource schema.GroupVersionResource, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootPatchAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchAction(resource schema.GroupVersionResource, namespace string, name string, pt types.PatchType, patch []byte) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootPatchSubresourceAction(resource schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewPatchSubresourceAction(resource schema.GroupVersionResource, namespace, name string, pt types.PatchType, patch []byte, subresources ...string) PatchActionImpl { + action := PatchActionImpl{} + action.Verb = "patch" + action.Resource = resource + action.Subresource = path.Join(subresources...) + action.Namespace = namespace + action.Name = name + action.PatchType = pt + action.Patch = patch + + return action +} + +func NewRootUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Object = object + + return action +} +func NewUpdateSubresourceAction(resource schema.GroupVersionResource, subresource string, namespace string, object runtime.Object) UpdateActionImpl { + action := UpdateActionImpl{} + action.Verb = "update" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Object = object + + return action +} + +func NewRootDeleteAction(resource schema.GroupVersionResource, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Name = name + + return action +} + +func NewRootDeleteSubresourceAction(resource schema.GroupVersionResource, subresource string, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Name = name + + return action +} + +func NewDeleteAction(resource schema.GroupVersionResource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewDeleteSubresourceAction(resource schema.GroupVersionResource, subresource, namespace, name string) DeleteActionImpl { + action := DeleteActionImpl{} + action.Verb = "delete" + action.Resource = resource + action.Subresource = subresource + action.Namespace = namespace + action.Name = name + + return action +} + +func NewRootDeleteCollectionAction(resource schema.GroupVersionResource, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewDeleteCollectionAction(resource schema.GroupVersionResource, namespace string, opts interface{}) DeleteCollectionActionImpl { + action := DeleteCollectionActionImpl{} + action.Verb = "delete-collection" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, _ := ExtractFromListOptions(opts) + action.ListRestrictions = ListRestrictions{labelSelector, fieldSelector} + + return action +} + +func NewRootWatchAction(resource schema.GroupVersionResource, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func ExtractFromListOptions(opts interface{}) (labelSelector labels.Selector, fieldSelector fields.Selector, resourceVersion string) { + var err error + switch t := opts.(type) { + case metav1.ListOptions: + labelSelector, err = labels.Parse(t.LabelSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.LabelSelector, err)) + } + fieldSelector, err = fields.ParseSelector(t.FieldSelector) + if err != nil { + panic(fmt.Errorf("invalid selector %q: %v", t.FieldSelector, err)) + } + resourceVersion = t.ResourceVersion + default: + panic(fmt.Errorf("expect a ListOptions %T", opts)) + } + if labelSelector == nil { + labelSelector = labels.Everything() + } + if fieldSelector == nil { + fieldSelector = fields.Everything() + } + return labelSelector, fieldSelector, resourceVersion +} + +func NewWatchAction(resource schema.GroupVersionResource, namespace string, opts interface{}) WatchActionImpl { + action := WatchActionImpl{} + action.Verb = "watch" + action.Resource = resource + action.Namespace = namespace + labelSelector, fieldSelector, resourceVersion := ExtractFromListOptions(opts) + action.WatchRestrictions = WatchRestrictions{labelSelector, fieldSelector, resourceVersion} + + return action +} + +func NewProxyGetAction(resource schema.GroupVersionResource, namespace, scheme, name, port, path string, params map[string]string) ProxyGetActionImpl { + action := ProxyGetActionImpl{} + action.Verb = "get" + action.Resource = resource + action.Namespace = namespace + action.Scheme = scheme + action.Name = name + action.Port = port + action.Path = path + action.Params = params + return action +} + +type ListRestrictions struct { + Labels labels.Selector + Fields fields.Selector +} +type WatchRestrictions struct { + Labels labels.Selector + Fields fields.Selector + ResourceVersion string +} + +type Action interface { + GetNamespace() string + GetVerb() string + GetResource() schema.GroupVersionResource + GetSubresource() string + Matches(verb, resource string) bool + + // DeepCopy is used to copy an action to avoid any risk of accidental mutation. Most people never need to call this + // because the invocation logic deep copies before calls to storage and reactors. + DeepCopy() Action +} + +type GenericAction interface { + Action + GetValue() interface{} +} + +type GetAction interface { + Action + GetName() string +} + +type ListAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type CreateAction interface { + Action + GetObject() runtime.Object +} + +type UpdateAction interface { + Action + GetObject() runtime.Object +} + +type DeleteAction interface { + Action + GetName() string +} + +type DeleteCollectionAction interface { + Action + GetListRestrictions() ListRestrictions +} + +type PatchAction interface { + Action + GetName() string + GetPatchType() types.PatchType + GetPatch() []byte +} + +type WatchAction interface { + Action + GetWatchRestrictions() WatchRestrictions +} + +type ProxyGetAction interface { + Action + GetScheme() string + GetName() string + GetPort() string + GetPath() string + GetParams() map[string]string +} + +type ActionImpl struct { + Namespace string + Verb string + Resource schema.GroupVersionResource + Subresource string +} + +func (a ActionImpl) GetNamespace() string { + return a.Namespace +} +func (a ActionImpl) GetVerb() string { + return a.Verb +} +func (a ActionImpl) GetResource() schema.GroupVersionResource { + return a.Resource +} +func (a ActionImpl) GetSubresource() string { + return a.Subresource +} +func (a ActionImpl) Matches(verb, resource string) bool { + return strings.ToLower(verb) == strings.ToLower(a.Verb) && + strings.ToLower(resource) == strings.ToLower(a.Resource.Resource) +} +func (a ActionImpl) DeepCopy() Action { + ret := a + return ret +} + +type GenericActionImpl struct { + ActionImpl + Value interface{} +} + +func (a GenericActionImpl) GetValue() interface{} { + return a.Value +} + +func (a GenericActionImpl) DeepCopy() Action { + return GenericActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + // TODO this is wrong, but no worse than before + Value: a.Value, + } +} + +type GetActionImpl struct { + ActionImpl + Name string +} + +func (a GetActionImpl) GetName() string { + return a.Name +} + +func (a GetActionImpl) DeepCopy() Action { + return GetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type ListActionImpl struct { + ActionImpl + Kind schema.GroupVersionKind + Name string + ListRestrictions ListRestrictions +} + +func (a ListActionImpl) GetKind() schema.GroupVersionKind { + return a.Kind +} + +func (a ListActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a ListActionImpl) DeepCopy() Action { + return ListActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Kind: a.Kind, + Name: a.Name, + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type CreateActionImpl struct { + ActionImpl + Name string + Object runtime.Object +} + +func (a CreateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a CreateActionImpl) DeepCopy() Action { + return CreateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + Object: a.Object.DeepCopyObject(), + } +} + +type UpdateActionImpl struct { + ActionImpl + Object runtime.Object +} + +func (a UpdateActionImpl) GetObject() runtime.Object { + return a.Object +} + +func (a UpdateActionImpl) DeepCopy() Action { + return UpdateActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Object: a.Object.DeepCopyObject(), + } +} + +type PatchActionImpl struct { + ActionImpl + Name string + PatchType types.PatchType + Patch []byte +} + +func (a PatchActionImpl) GetName() string { + return a.Name +} + +func (a PatchActionImpl) GetPatch() []byte { + return a.Patch +} + +func (a PatchActionImpl) GetPatchType() types.PatchType { + return a.PatchType +} + +func (a PatchActionImpl) DeepCopy() Action { + patch := make([]byte, len(a.Patch)) + copy(patch, a.Patch) + return PatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + PatchType: a.PatchType, + Patch: patch, + } +} + +type DeleteActionImpl struct { + ActionImpl + Name string +} + +func (a DeleteActionImpl) GetName() string { + return a.Name +} + +func (a DeleteActionImpl) DeepCopy() Action { + return DeleteActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Name: a.Name, + } +} + +type DeleteCollectionActionImpl struct { + ActionImpl + ListRestrictions ListRestrictions +} + +func (a DeleteCollectionActionImpl) GetListRestrictions() ListRestrictions { + return a.ListRestrictions +} + +func (a DeleteCollectionActionImpl) DeepCopy() Action { + return DeleteCollectionActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + ListRestrictions: ListRestrictions{ + Labels: a.ListRestrictions.Labels.DeepCopySelector(), + Fields: a.ListRestrictions.Fields.DeepCopySelector(), + }, + } +} + +type WatchActionImpl struct { + ActionImpl + WatchRestrictions WatchRestrictions +} + +func (a WatchActionImpl) GetWatchRestrictions() WatchRestrictions { + return a.WatchRestrictions +} + +func (a WatchActionImpl) DeepCopy() Action { + return WatchActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + WatchRestrictions: WatchRestrictions{ + Labels: a.WatchRestrictions.Labels.DeepCopySelector(), + Fields: a.WatchRestrictions.Fields.DeepCopySelector(), + ResourceVersion: a.WatchRestrictions.ResourceVersion, + }, + } +} + +type ProxyGetActionImpl struct { + ActionImpl + Scheme string + Name string + Port string + Path string + Params map[string]string +} + +func (a ProxyGetActionImpl) GetScheme() string { + return a.Scheme +} + +func (a ProxyGetActionImpl) GetName() string { + return a.Name +} + +func (a ProxyGetActionImpl) GetPort() string { + return a.Port +} + +func (a ProxyGetActionImpl) GetPath() string { + return a.Path +} + +func (a ProxyGetActionImpl) GetParams() map[string]string { + return a.Params +} + +func (a ProxyGetActionImpl) DeepCopy() Action { + params := map[string]string{} + for k, v := range a.Params { + params[k] = v + } + return ProxyGetActionImpl{ + ActionImpl: a.ActionImpl.DeepCopy().(ActionImpl), + Scheme: a.Scheme, + Name: a.Name, + Port: a.Port, + Path: a.Path, + Params: params, + } +} diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go new file mode 100644 index 000000000..8b3f31eaf --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fake.go @@ -0,0 +1,213 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Fake implements client.Interface. Meant to be embedded into a struct to get +// a default implementation. This makes faking out just the method you want to +// test easier. +type Fake struct { + sync.RWMutex + actions []Action // these may be castable to other types, but "Action" is the minimum + + // ReactionChain is the list of reactors that will be attempted for every + // request in the order they are tried. + ReactionChain []Reactor + // WatchReactionChain is the list of watch reactors that will be attempted + // for every request in the order they are tried. + WatchReactionChain []WatchReactor + // ProxyReactionChain is the list of proxy reactors that will be attempted + // for every request in the order they are tried. + ProxyReactionChain []ProxyReactor + + Resources []*metav1.APIResourceList +} + +// Reactor is an interface to allow the composition of reaction functions. +type Reactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles the action and returns results. It may choose to + // delegate by indicated handled=false. + React(action Action) (handled bool, ret runtime.Object, err error) +} + +// WatchReactor is an interface to allow the composition of watch functions. +type WatchReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret watch.Interface, err error) +} + +// ProxyReactor is an interface to allow the composition of proxy get +// functions. +type ProxyReactor interface { + // Handles indicates whether or not this Reactor deals with a given + // action. + Handles(action Action) bool + // React handles a watch action and returns results. It may choose to + // delegate by indicating handled=false. + React(action Action) (handled bool, ret restclient.ResponseWrapper, err error) +} + +// ReactionFunc is a function that returns an object or error for a given +// Action. If "handled" is false, then the test client will ignore the +// results and continue to the next ReactionFunc. A ReactionFunc can describe +// reactions on subresources by testing the result of the action's +// GetSubresource() method. +type ReactionFunc func(action Action) (handled bool, ret runtime.Object, err error) + +// WatchReactionFunc is a function that returns a watch interface. If +// "handled" is false, then the test client will ignore the results and +// continue to the next ReactionFunc. +type WatchReactionFunc func(action Action) (handled bool, ret watch.Interface, err error) + +// ProxyReactionFunc is a function that returns a ResponseWrapper interface +// for a given Action. If "handled" is false, then the test client will +// ignore the results and continue to the next ProxyReactionFunc. +type ProxyReactionFunc func(action Action) (handled bool, ret restclient.ResponseWrapper, err error) + +// AddReactor appends a reactor to the end of the chain. +func (c *Fake) AddReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append(c.ReactionChain, &SimpleReactor{verb, resource, reaction}) +} + +// PrependReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) { + c.ReactionChain = append([]Reactor{&SimpleReactor{verb, resource, reaction}}, c.ReactionChain...) +} + +// AddWatchReactor appends a reactor to the end of the chain. +func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction}) +} + +// PrependWatchReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) { + c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...) +} + +// AddProxyReactor appends a reactor to the end of the chain. +func (c *Fake) AddProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append(c.ProxyReactionChain, &SimpleProxyReactor{resource, reaction}) +} + +// PrependProxyReactor adds a reactor to the beginning of the chain. +func (c *Fake) PrependProxyReactor(resource string, reaction ProxyReactionFunc) { + c.ProxyReactionChain = append([]ProxyReactor{&SimpleProxyReactor{resource, reaction}}, c.ProxyReactionChain...) +} + +// Invokes records the provided Action and then invokes the ReactionFunc that +// handles the action if one exists. defaultReturnObj is expected to be of the +// same type a normal call would return. +func (c *Fake) Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action.DeepCopy()) + if !handled { + continue + } + + return ret, err + } + + return defaultReturnObj, nil +} + +// InvokesWatch records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesWatch(action Action) (watch.Interface, error) { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.WatchReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action.DeepCopy()) + if !handled { + continue + } + + return ret, err + } + + return nil, fmt.Errorf("unhandled watch: %#v", action) +} + +// InvokesProxy records the provided Action and then invokes the ReactionFunc +// that handles the action if one exists. +func (c *Fake) InvokesProxy(action Action) restclient.ResponseWrapper { + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action.DeepCopy()) + for _, reactor := range c.ProxyReactionChain { + if !reactor.Handles(action) { + continue + } + + handled, ret, err := reactor.React(action.DeepCopy()) + if !handled || err != nil { + continue + } + + return ret + } + + return nil +} + +// ClearActions clears the history of actions called on the fake client. +func (c *Fake) ClearActions() { + c.Lock() + defer c.Unlock() + + c.actions = make([]Action, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the +// fake client. +func (c *Fake) Actions() []Action { + c.RLock() + defer c.RUnlock() + fa := make([]Action, len(c.actions)) + copy(fa, c.actions) + return fa +} diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go new file mode 100644 index 000000000..90f16f560 --- /dev/null +++ b/vendor/k8s.io/client-go/testing/fixture.go @@ -0,0 +1,547 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "sync" + + "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// ObjectTracker keeps track of objects. It is intended to be used to +// fake calls to a server by returning objects based on their kind, +// namespace and name. +type ObjectTracker interface { + // Add adds an object to the tracker. If object being added + // is a list, its items are added separately. + Add(obj runtime.Object) error + + // Get retrieves the object by its kind, namespace and name. + Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) + + // Create adds an object to the tracker in the specified namespace. + Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // Update updates an existing object in the tracker in the specified namespace. + Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error + + // List retrieves all objects of a given kind in the given + // namespace. Only non-List kinds are accepted. + List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) + + // Delete deletes an existing object from the tracker. If object + // didn't exist in the tracker prior to deletion, Delete returns + // no error. + Delete(gvr schema.GroupVersionResource, ns, name string) error + + // Watch watches objects from the tracker. Watch returns a channel + // which will push added / modified / deleted object. + Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) +} + +// ObjectScheme abstracts the implementation of common operations on objects. +type ObjectScheme interface { + runtime.ObjectCreater + runtime.ObjectTyper +} + +// ObjectReaction returns a ReactionFunc that applies core.Action to +// the given tracker. +func ObjectReaction(tracker ObjectTracker) ReactionFunc { + return func(action Action) (bool, runtime.Object, error) { + ns := action.GetNamespace() + gvr := action.GetResource() + // Here and below we need to switch on implementation types, + // not on interfaces, as some interfaces are identical + // (e.g. UpdateAction and CreateAction), so if we use them, + // updates and creates end up matching the same case branch. + switch action := action.(type) { + + case ListActionImpl: + obj, err := tracker.List(gvr, action.GetKind(), ns) + return true, obj, err + + case GetActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + return true, obj, err + + case CreateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + if action.GetSubresource() == "" { + err = tracker.Create(gvr, action.GetObject(), ns) + } else { + // TODO: Currently we're handling subresource creation as an update + // on the enclosing resource. This works for some subresources but + // might not be generic enough. + err = tracker.Update(gvr, action.GetObject(), ns) + } + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case UpdateActionImpl: + objMeta, err := meta.Accessor(action.GetObject()) + if err != nil { + return true, nil, err + } + err = tracker.Update(gvr, action.GetObject(), ns) + if err != nil { + return true, nil, err + } + obj, err := tracker.Get(gvr, ns, objMeta.GetName()) + return true, obj, err + + case DeleteActionImpl: + err := tracker.Delete(gvr, ns, action.GetName()) + if err != nil { + return true, nil, err + } + return true, nil, nil + + case PatchActionImpl: + obj, err := tracker.Get(gvr, ns, action.GetName()) + if err != nil { + // object is not registered + return false, nil, err + } + + old, err := json.Marshal(obj) + if err != nil { + return true, nil, err + } + // Only supports strategic merge patch and JSONPatch as coded. + switch action.GetPatchType() { + case types.JSONPatchType: + patch, err := jsonpatch.DecodePatch(action.GetPatch()) + if err != nil { + return true, nil, err + } + modified, err := patch.Apply(old) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(modified, obj); err != nil { + return true, nil, err + } + case types.StrategicMergePatchType: + mergedByte, err := strategicpatch.StrategicMergePatch(old, action.GetPatch(), obj) + if err != nil { + return true, nil, err + } + if err = json.Unmarshal(mergedByte, obj); err != nil { + return true, nil, err + } + default: + return true, nil, fmt.Errorf("PatchType is not supported") + } + + if err = tracker.Update(gvr, obj, ns); err != nil { + return true, nil, err + } + + return true, obj, nil + + default: + return false, nil, fmt.Errorf("no reaction implemented for %s", action) + } + } +} + +type tracker struct { + scheme ObjectScheme + decoder runtime.Decoder + lock sync.RWMutex + objects map[schema.GroupVersionResource][]runtime.Object + // The value type of watchers is a map of which the key is either a namespace or + // all/non namespace aka "" and its value is list of fake watchers. + // Manipulations on resources will broadcast the notification events into the + // watchers' channel. Note that too many unhandled events (currently 100, + // see apimachinery/pkg/watch.DefaultChanSize) will cause a panic. + watchers map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher +} + +var _ ObjectTracker = &tracker{} + +// NewObjectTracker returns an ObjectTracker that can be used to keep track +// of objects for the fake clientset. Mostly useful for unit tests. +func NewObjectTracker(scheme ObjectScheme, decoder runtime.Decoder) ObjectTracker { + return &tracker{ + scheme: scheme, + decoder: decoder, + objects: make(map[schema.GroupVersionResource][]runtime.Object), + watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), + } +} + +func (t *tracker) List(gvr schema.GroupVersionResource, gvk schema.GroupVersionKind, ns string) (runtime.Object, error) { + // Heuristic for list kind: original kind + List suffix. Might + // not always be true but this tracker has a pretty limited + // understanding of the actual API model. + listGVK := gvk + listGVK.Kind = listGVK.Kind + "List" + // GVK does have the concept of "internal version". The scheme recognizes + // the runtime.APIVersionInternal, but not the empty string. + if listGVK.Version == "" { + listGVK.Version = runtime.APIVersionInternal + } + + list, err := t.scheme.New(listGVK) + if err != nil { + return nil, err + } + + if !meta.IsListType(list) { + return nil, fmt.Errorf("%q is not a list type", listGVK.Kind) + } + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return list, nil + } + + matchingObjs, err := filterByNamespaceAndName(objs, ns, "") + if err != nil { + return nil, err + } + if err := meta.SetList(list, matchingObjs); err != nil { + return nil, err + } + return list.DeepCopyObject(), nil +} + +func (t *tracker) Watch(gvr schema.GroupVersionResource, ns string) (watch.Interface, error) { + t.lock.Lock() + defer t.lock.Unlock() + + fakewatcher := watch.NewRaceFreeFake() + + if _, exists := t.watchers[gvr]; !exists { + t.watchers[gvr] = make(map[string][]*watch.RaceFreeFakeWatcher) + } + t.watchers[gvr][ns] = append(t.watchers[gvr][ns], fakewatcher) + return fakewatcher, nil +} + +func (t *tracker) Get(gvr schema.GroupVersionResource, ns, name string) (runtime.Object, error) { + errNotFound := errors.NewNotFound(gvr.GroupResource(), name) + + t.lock.RLock() + defer t.lock.RUnlock() + + objs, ok := t.objects[gvr] + if !ok { + return nil, errNotFound + } + + matchingObjs, err := filterByNamespaceAndName(objs, ns, name) + if err != nil { + return nil, err + } + if len(matchingObjs) == 0 { + return nil, errNotFound + } + if len(matchingObjs) > 1 { + return nil, fmt.Errorf("more than one object matched gvr %s, ns: %q name: %q", gvr, ns, name) + } + + // Only one object should match in the tracker if it works + // correctly, as Add/Update methods enforce kind/namespace/name + // uniqueness. + obj := matchingObjs[0].DeepCopyObject() + if status, ok := obj.(*metav1.Status); ok { + if status.Status != metav1.StatusSuccess { + return nil, &errors.StatusError{ErrStatus: *status} + } + } + + return obj, nil +} + +func (t *tracker) Add(obj runtime.Object) error { + if meta.IsListType(obj) { + return t.addList(obj, false) + } + objMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + gvks, _, err := t.scheme.ObjectKinds(obj) + if err != nil { + return err + } + if len(gvks) == 0 { + return fmt.Errorf("no registered kinds for %v", obj) + } + for _, gvk := range gvks { + // NOTE: UnsafeGuessKindToResource is a heuristic and default match. The + // actual registration in apiserver can specify arbitrary route for a + // gvk. If a test uses such objects, it cannot preset the tracker with + // objects via Add(). Instead, it should trigger the Create() function + // of the tracker, where an arbitrary gvr can be specified. + gvr, _ := meta.UnsafeGuessKindToResource(gvk) + // Resource doesn't have the concept of "__internal" version, just set it to "". + if gvr.Version == runtime.APIVersionInternal { + gvr.Version = "" + } + + err := t.add(gvr, obj, objMeta.GetNamespace(), false) + if err != nil { + return err + } + } + return nil +} + +func (t *tracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, false) +} + +func (t *tracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { + return t.add(gvr, obj, ns, true) +} + +func (t *tracker) getWatches(gvr schema.GroupVersionResource, ns string) []*watch.RaceFreeFakeWatcher { + watches := []*watch.RaceFreeFakeWatcher{} + if t.watchers[gvr] != nil { + if w := t.watchers[gvr][ns]; w != nil { + watches = append(watches, w...) + } + if w := t.watchers[gvr][""]; w != nil { + watches = append(watches, w...) + } + } + return watches +} + +func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns string, replaceExisting bool) error { + t.lock.Lock() + defer t.lock.Unlock() + + gr := gvr.GroupResource() + + // To avoid the object from being accidentally modified by caller + // after it's been added to the tracker, we always store the deep + // copy. + obj = obj.DeepCopyObject() + + newMeta, err := meta.Accessor(obj) + if err != nil { + return err + } + + // Propagate namespace to the new object if hasn't already been set. + if len(newMeta.GetNamespace()) == 0 { + newMeta.SetNamespace(ns) + } + + if ns != newMeta.GetNamespace() { + msg := fmt.Sprintf("request namespace does not match object namespace, request: %q object: %q", ns, newMeta.GetNamespace()) + return errors.NewBadRequest(msg) + } + + for i, existingObj := range t.objects[gvr] { + oldMeta, err := meta.Accessor(existingObj) + if err != nil { + return err + } + if oldMeta.GetNamespace() == newMeta.GetNamespace() && oldMeta.GetName() == newMeta.GetName() { + if replaceExisting { + for _, w := range t.getWatches(gvr, ns) { + w.Modify(obj) + } + t.objects[gvr][i] = obj + return nil + } + return errors.NewAlreadyExists(gr, newMeta.GetName()) + } + } + + if replaceExisting { + // Tried to update but no matching object was found. + return errors.NewNotFound(gr, newMeta.GetName()) + } + + t.objects[gvr] = append(t.objects[gvr], obj) + + for _, w := range t.getWatches(gvr, ns) { + w.Add(obj) + } + + return nil +} + +func (t *tracker) addList(obj runtime.Object, replaceExisting bool) error { + list, err := meta.ExtractList(obj) + if err != nil { + return err + } + errs := runtime.DecodeList(list, t.decoder) + if len(errs) > 0 { + return errs[0] + } + for _, obj := range list { + if err := t.Add(obj); err != nil { + return err + } + } + return nil +} + +func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error { + t.lock.Lock() + defer t.lock.Unlock() + + found := false + + for i, existingObj := range t.objects[gvr] { + objMeta, err := meta.Accessor(existingObj) + if err != nil { + return err + } + if objMeta.GetNamespace() == ns && objMeta.GetName() == name { + obj := t.objects[gvr][i] + t.objects[gvr] = append(t.objects[gvr][:i], t.objects[gvr][i+1:]...) + for _, w := range t.getWatches(gvr, ns) { + w.Delete(obj) + } + found = true + break + } + } + + if found { + return nil + } + + return errors.NewNotFound(gvr.GroupResource(), name) +} + +// filterByNamespaceAndName returns all objects in the collection that +// match provided namespace and name. Empty namespace matches +// non-namespaced objects. +func filterByNamespaceAndName(objs []runtime.Object, ns, name string) ([]runtime.Object, error) { + var res []runtime.Object + + for _, obj := range objs { + acc, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + if ns != "" && acc.GetNamespace() != ns { + continue + } + if name != "" && acc.GetName() != name { + continue + } + res = append(res, obj) + } + + return res, nil +} + +func DefaultWatchReactor(watchInterface watch.Interface, err error) WatchReactionFunc { + return func(action Action) (bool, watch.Interface, error) { + return true, watchInterface, err + } +} + +// SimpleReactor is a Reactor. Each reaction function is attached to a given verb,resource tuple. "*" in either field matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleReactor struct { + Verb string + Resource string + + Reaction ReactionFunc +} + +func (r *SimpleReactor) Handles(action Action) bool { + verbCovers := r.Verb == "*" || r.Verb == action.GetVerb() + if !verbCovers { + return false + } + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) { + return r.Reaction(action) +} + +// SimpleWatchReactor is a WatchReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions +type SimpleWatchReactor struct { + Resource string + + Reaction WatchReactionFunc +} + +func (r *SimpleWatchReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) { + return r.Reaction(action) +} + +// SimpleProxyReactor is a ProxyReactor. Each reaction function is attached to a given resource. "*" matches everything for that value. +// For instance, *,pods matches all verbs on pods. This allows for easier composition of reaction functions. +type SimpleProxyReactor struct { + Resource string + + Reaction ProxyReactionFunc +} + +func (r *SimpleProxyReactor) Handles(action Action) bool { + resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource + if !resourceCovers { + return false + } + + return true +} + +func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) { + return r.Reaction(action) +} diff --git a/vendor/k8s.io/client-go/tools/watch/informerwatcher.go b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go new file mode 100644 index 000000000..35a346949 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go @@ -0,0 +1,114 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +func newTicketer() *ticketer { + return &ticketer{ + cond: sync.NewCond(&sync.Mutex{}), + } +} + +type ticketer struct { + counter uint64 + + cond *sync.Cond + current uint64 +} + +func (t *ticketer) GetTicket() uint64 { + // -1 to start from 0 + return atomic.AddUint64(&t.counter, 1) - 1 +} + +func (t *ticketer) WaitForTicket(ticket uint64, f func()) { + t.cond.L.Lock() + defer t.cond.L.Unlock() + for ticket != t.current { + t.cond.Wait() + } + + f() + + t.current++ + t.cond.Broadcast() +} + +// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface +// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method. +func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface) { + ch := make(chan watch.Event) + w := watch.NewProxyWatcher(ch) + t := newTicketer() + + indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + go t.WaitForTicket(t.GetTicket(), func() { + select { + case ch <- watch.Event{ + Type: watch.Added, + Object: obj.(runtime.Object), + }: + case <-w.StopChan(): + } + }) + }, + UpdateFunc: func(old, new interface{}) { + go t.WaitForTicket(t.GetTicket(), func() { + select { + case ch <- watch.Event{ + Type: watch.Modified, + Object: new.(runtime.Object), + }: + case <-w.StopChan(): + } + }) + }, + DeleteFunc: func(obj interface{}) { + go t.WaitForTicket(t.GetTicket(), func() { + staleObj, stale := obj.(cache.DeletedFinalStateUnknown) + if stale { + // We have no means of passing the additional information down using watch API based on watch.Event + // but the caller can filter such objects by checking if metadata.deletionTimestamp is set + obj = staleObj + } + + select { + case ch <- watch.Event{ + Type: watch.Deleted, + Object: obj.(runtime.Object), + }: + case <-w.StopChan(): + } + }) + }, + }, cache.Indexers{}) + + go func() { + informer.Run(w.StopChan()) + }() + + return indexer, informer, w +} diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go new file mode 100644 index 000000000..aa4bbc211 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/until.go @@ -0,0 +1,225 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" +) + +// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition failed or detected an error state. +type PreconditionFunc func(store cache.Store) (bool, error) + +// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition cannot be checked and should terminate. In general, it is better to define +// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed +// from false to true). +type ConditionFunc func(event watch.Event) (bool, error) + +// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry. +var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout") + +// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch +// encountered. The first condition that returns an error terminates the watch (and the event is also returned). +// If no event has been received, the returned event will be nil. +// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +// Waits until context deadline or until context is canceled. +// +// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!! +// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error. +// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below, +// Warning: solving such issues. +// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone. +func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) { + ch := watcher.ResultChan() + defer watcher.Stop() + var lastEvent *watch.Event + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + continue + } + } + ConditionSucceeded: + for { + select { + case event, ok := <-ch: + if !ok { + return lastEvent, ErrWatchClosed + } + lastEvent = &event + + done, err := condition(event) + if err != nil { + return lastEvent, err + } + if done { + break ConditionSucceeded + } + + case <-ctx.Done(): + return lastEvent, wait.ErrWaitTimeout + } + } + } + return lastEvent, nil +} + +// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced, +// and watches the output until each provided condition succeeds, in a way that is identical +// to function UntilWithoutRetry. (See above.) +// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'. +// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will +// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple +// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will +// re-list to recover and you always get an event, if there has been a change, after recovery. +// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for +// particular object, not between more of them even it's the same resource. +// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like: +// waiting for object reaching a state, "small" controllers, ... +func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) { + indexer, informer, watcher := NewIndexerInformerWatcher(lw, objType) + // Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and + // let UntilWithoutRetry to stop it + defer watcher.Stop() + + if precondition != nil { + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err()) + } + + done, err := precondition(indexer) + if err != nil { + return nil, err + } + + if done { + return nil, nil + } + } + + return UntilWithoutRetry(ctx, watcher, conditions...) +} + +// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration. +func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if timeout < 0 { + // This should be handled in validation + klog.Errorf("Timeout for context shall not be negative!") + timeout = 0 + } + + if timeout == 0 { + return context.WithCancel(parent) + } + + return context.WithTimeout(parent, timeout) +} + +// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout +// if timeout is exceeded without all conditions returning true, or an error if an error occurs. +// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until. +// TODO: remove when no longer used +// +// Deprecated: Use UntilWithSync instead. +func ListWatchUntil(timeout time.Duration, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) { + if len(conditions) == 0 { + return nil, nil + } + + list, err := lw.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + initialItems, err := meta.ExtractList(list) + if err != nil { + return nil, err + } + + // use the initial items as simulated "adds" + var lastEvent *watch.Event + currIndex := 0 + passedConditions := 0 + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + continue + } + } + + ConditionSucceeded: + for currIndex < len(initialItems) { + lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]} + currIndex++ + + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + passedConditions = passedConditions + 1 + break ConditionSucceeded + } + } + } + if passedConditions == len(conditions) { + return lastEvent, nil + } + remainingConditions := conditions[passedConditions:] + + metaObj, err := meta.ListAccessor(list) + if err != nil { + return nil, err + } + currResourceVersion := metaObj.GetResourceVersion() + + watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion}) + if err != nil { + return nil, err + } + + ctx, cancel := ContextWithOptionalTimeout(context.Background(), timeout) + defer cancel() + evt, err := UntilWithoutRetry(ctx, watchInterface, remainingConditions...) + if err == ErrWatchClosed { + // present a consistent error interface to callers + err = wait.ErrWaitTimeout + } + return evt, err +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/scheme.go b/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/scheme.go new file mode 100644 index 000000000..acfcc8f8e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/legacyscheme/scheme.go @@ -0,0 +1,35 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyscheme + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +// NOTE: If you are copying this file to start a new api group, STOP! Copy the +// extensions group instead. This Scheme is special and should appear ONLY in +// the api group, unless you really know what you're doing. +// TODO(lavalamp): make the above error impossible. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go new file mode 100644 index 000000000..5de5f2765 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/service/util.go @@ -0,0 +1,85 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package service + +import ( + "fmt" + api "k8s.io/kubernetes/pkg/apis/core" + netsets "k8s.io/kubernetes/pkg/util/net/sets" + "strings" +) + +const ( + defaultLoadBalancerSourceRanges = "0.0.0.0/0" +) + +// IsAllowAll checks whether the netsets.IPNet allows traffic from 0.0.0.0/0 +func IsAllowAll(ipnets netsets.IPNet) bool { + for _, s := range ipnets.StringSlice() { + if s == "0.0.0.0/0" { + return true + } + } + return false +} + +// GetLoadBalancerSourceRanges first try to parse and verify LoadBalancerSourceRanges field from a service. +// If the field is not specified, turn to parse and verify the AnnotationLoadBalancerSourceRangesKey annotation from a service, +// extracting the source ranges to allow, and if not present returns a default (allow-all) value. +func GetLoadBalancerSourceRanges(service *api.Service) (netsets.IPNet, error) { + var ipnets netsets.IPNet + var err error + // if SourceRange field is specified, ignore sourceRange annotation + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + specs := service.Spec.LoadBalancerSourceRanges + ipnets, err = netsets.ParseIPNets(specs...) + + if err != nil { + return nil, fmt.Errorf("service.Spec.LoadBalancerSourceRanges: %v is not valid. Expecting a list of IP ranges. For example, 10.0.0.0/24. Error msg: %v", specs, err) + } + } else { + val := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] + val = strings.TrimSpace(val) + if val == "" { + val = defaultLoadBalancerSourceRanges + } + specs := strings.Split(val, ",") + ipnets, err = netsets.ParseIPNets(specs...) + if err != nil { + return nil, fmt.Errorf("%s: %s is not valid. Expecting a comma-separated list of source IP ranges. For example, 10.0.0.0/24,192.168.2.0/24", api.AnnotationLoadBalancerSourceRangesKey, val) + } + } + return ipnets, nil +} + +// RequestsOnlyLocalTraffic checks if service requests OnlyLocal traffic. +func RequestsOnlyLocalTraffic(service *api.Service) bool { + if service.Spec.Type != api.ServiceTypeLoadBalancer && + service.Spec.Type != api.ServiceTypeNodePort { + return false + } + + return service.Spec.ExternalTrafficPolicy == api.ServiceExternalTrafficPolicyTypeLocal +} + +// NeedsHealthCheck checks if service needs health check. +func NeedsHealthCheck(service *api.Service) bool { + if service.Spec.Type != api.ServiceTypeLoadBalancer { + return false + } + return RequestsOnlyLocalTraffic(service) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go new file mode 100644 index 000000000..558e8a48c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go @@ -0,0 +1,304 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pod + +import ( + "fmt" + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// FindPort locates the container port for the given pod and portName. If the +// targetPort is a number, use that. If the targetPort is a string, look that +// string up in all named ports in all containers in the target pod. If no +// match is found, fail. +func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) { + portName := svcPort.TargetPort + switch portName.Type { + case intstr.String: + name := portName.StrVal + for _, container := range pod.Spec.Containers { + for _, port := range container.Ports { + if port.Name == name && port.Protocol == svcPort.Protocol { + return int(port.ContainerPort), nil + } + } + } + case intstr.Int: + return portName.IntValue(), nil + } + + return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID) +} + +// Visitor is called with each object name, and returns true if visiting should continue +type Visitor func(name string) (shouldContinue bool) + +// VisitPodSecretNames invokes the visitor function with the name of every secret +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool { + for _, reference := range pod.Spec.ImagePullSecrets { + if !visitor(reference.Name) { + return false + } + } + for i := range pod.Spec.InitContainers { + if !visitContainerSecretNames(&pod.Spec.InitContainers[i], visitor) { + return false + } + } + for i := range pod.Spec.Containers { + if !visitContainerSecretNames(&pod.Spec.Containers[i], visitor) { + return false + } + } + var source *v1.VolumeSource + + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.AzureFile != nil: + if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) { + return false + } + case source.CephFS != nil: + if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) { + return false + } + case source.Cinder != nil: + if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) { + return false + } + case source.FlexVolume != nil: + if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) { + return false + } + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].Secret != nil { + if !visitor(source.Projected.Sources[j].Secret.Name) { + return false + } + } + } + case source.RBD != nil: + if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) { + return false + } + case source.Secret != nil: + if !visitor(source.Secret.SecretName) { + return false + } + case source.ScaleIO != nil: + if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) { + return false + } + case source.ISCSI != nil: + if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) { + return false + } + case source.StorageOS != nil: + if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) { + return false + } + } + } + return true +} + +func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.SecretRef != nil { + if !visitor(env.SecretRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil { + if !visitor(envVar.ValueFrom.SecretKeyRef.Name) { + return false + } + } + } + return true +} + +// VisitPodConfigmapNames invokes the visitor function with the name of every configmap +// referenced by the pod spec. If visitor returns false, visiting is short-circuited. +// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited. +// Returns true if visiting completed, false if visiting was short-circuited. +func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool { + for i := range pod.Spec.InitContainers { + if !visitContainerConfigmapNames(&pod.Spec.InitContainers[i], visitor) { + return false + } + } + for i := range pod.Spec.Containers { + if !visitContainerConfigmapNames(&pod.Spec.Containers[i], visitor) { + return false + } + } + var source *v1.VolumeSource + for i := range pod.Spec.Volumes { + source = &pod.Spec.Volumes[i].VolumeSource + switch { + case source.Projected != nil: + for j := range source.Projected.Sources { + if source.Projected.Sources[j].ConfigMap != nil { + if !visitor(source.Projected.Sources[j].ConfigMap.Name) { + return false + } + } + } + case source.ConfigMap != nil: + if !visitor(source.ConfigMap.Name) { + return false + } + } + } + return true +} + +func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool { + for _, env := range container.EnvFrom { + if env.ConfigMapRef != nil { + if !visitor(env.ConfigMapRef.Name) { + return false + } + } + } + for _, envVar := range container.Env { + if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil { + if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) { + return false + } + } + } + return true +} + +// GetContainerStatus extracts the status of container "name" from "statuses". +// It also returns if "name" exists. +func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) { + for i := range statuses { + if statuses[i].Name == name { + return statuses[i], true + } + } + return v1.ContainerStatus{}, false +} + +// GetExistingContainerStatus extracts the status of container "name" from "statuses", +// It also returns if "name" exists. +func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus { + status, _ := GetContainerStatus(statuses, name) + return status +} + +// IsPodAvailable returns true if a pod is available; false otherwise. +// Precondition for an available pod is that it must be ready. On top +// of that, there are two cases when a pod can be considered available: +// 1. minReadySeconds == 0, or +// 2. LastTransitionTime (is set) + minReadySeconds < current time +func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool { + if !IsPodReady(pod) { + return false + } + + c := GetPodReadyCondition(pod.Status) + minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second + if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) { + return true + } + return false +} + +// IsPodReady returns true if a pod is ready; false otherwise. +func IsPodReady(pod *v1.Pod) bool { + return IsPodReadyConditionTrue(pod.Status) +} + +// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. +func IsPodReadyConditionTrue(status v1.PodStatus) bool { + condition := GetPodReadyCondition(status) + return condition != nil && condition.Status == v1.ConditionTrue +} + +// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. +// Returns nil if the condition is not present. +func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { + _, condition := GetPodCondition(&status, v1.PodReady) + return condition +} + +// GetPodCondition extracts the provided condition from the given status and returns that. +// Returns nil and -1 if the condition is not present, and the index of the located condition. +func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if status == nil { + return -1, nil + } + return GetPodConditionFromList(status.Conditions, conditionType) +} + +// GetPodConditionFromList extracts the provided condition from the given list of condition and +// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present. +func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) { + if conditions == nil { + return -1, nil + } + for i := range conditions { + if conditions[i].Type == conditionType { + return i, &conditions[i] + } + } + return -1, nil +} + +// UpdatePodCondition updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the +// status has changed. +// Returns true if pod condition has changed or has been added. +func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this pod condition. + conditionIndex, oldCondition := GetPodCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new pod condition. + status.Conditions = append(status.Conditions, *condition) + return true + } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go new file mode 100644 index 000000000..1ff549998 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package apps // import "k8s.io/kubernetes/pkg/apis/apps" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go new file mode 100644 index 000000000..b56ec96cb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/register.go @@ -0,0 +1,64 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/autoscaling" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + // TODO this will get cleaned up with the scheme types are fixed + scheme.AddKnownTypes(SchemeGroupVersion, + &DaemonSet{}, + &DaemonSetList{}, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &autoscaling.Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go new file mode 100644 index 000000000..c15927d45 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/types.go @@ -0,0 +1,801 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + Type StatefulSetUpdateStrategyType + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + RollingUpdate *RollingUpdateStatefulSetStrategy +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + Partition int32 +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // Replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas int32 + + // Selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template api.PodTemplateSpec + + // VolumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []api.PersistentVolumeClaim + + // ServiceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string + + // PodManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration *int64 + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 + + // Represents the latest available observations of a statefulset's current state. + Conditions []StatefulSetCondition +} + +type StatefulSetConditionType string + +// TODO: Add valid condition types for Statefulsets. + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []StatefulSet +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. +type ControllerRevision struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Data is the Object representing the state. + Data runtime.Object + + // Revision indicates the revision of the state represented by Data. + Revision int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ControllerRevision objects. + Items []ControllerRevision +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Deployment struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus +} + +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas int32 + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector + + // Template describes the pods that will be created. + Template api.PodTemplateSpec + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which means + // "retaining all old ReplicaSets". + // +optional + RevisionHistoryLimit *int32 + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool + + // DEPRECATED. + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is set to + // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". + // +optional + ProgressDeadlineSeconds *int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED. +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta + // Required: This must match the Name of a deployment. + Name string + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string + // The config of this deployment rollback. + RollbackTo RollbackConfig +} + +// DEPRECATED. +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + Revision int64 +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that at least 70% of original number of pods are available at all times + // during the update. + // +optional + MaxUnavailable intstr.IntOrString + + // The maximum number of pods that can be scheduled above the original number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of total pods at + // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of original pods. + // +optional + MaxSurge intstr.IntOrString +} + +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 + + // Represents the latest available observations of a deployment's current state. + Conditions []DeploymentCondition + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time this condition was updated. + LastUpdateTime metav1.Time + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type DeploymentList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of deployments. + Items []Deployment +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable intstr.IntOrString +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template api.PodTemplateSpec + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 + + // DEPRECATED. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + CurrentNumberScheduled int32 + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + NumberMisscheduled int32 + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + DesiredNumberScheduled int32 + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 + + // Represents the latest available observations of a DaemonSet's current state. + Conditions []DaemonSetCondition +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus +} + +const ( + // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta + + // A list of daemon sets. + Items []DaemonSet +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this ReplicaSet. + // +optional + Spec ReplicaSetSpec + + // Status is the current status of this ReplicaSet. This data may be + // out of date by some window of time. + // +optional + Status ReplicaSetStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicaSet +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +// As the internal representation of a ReplicaSet, it must have +// a Template set. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the replica count. + // Must match in order to be controlled. + // If empty, defaulted to labels on pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // +optional + Template api.PodTemplateSpec +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replica set's current state. + // +optional + Conditions []ReplicaSetCondition +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go new file mode 100644 index 000000000..4b4e0e663 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/apps/zz_generated.deepcopy.go @@ -0,0 +1,800 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package apps + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + core "k8s.io/kubernetes/pkg/apis/core" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + out.Data = in.Data.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RollbackTo = in.RollbackTo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. +func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { + if in == nil { + return nil + } + out := new(DeploymentRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. +func (in *RollbackConfig) DeepCopy() *RollbackConfig { + if in == nil { + return nil + } + out := new(RollbackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + out.MaxUnavailable = in.MaxUnavailable + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + out.MaxUnavailable = in.MaxUnavailable + out.MaxSurge = in.MaxSurge + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]core.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStatefulSetStrategy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go new file mode 100644 index 000000000..ccf034535 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go @@ -0,0 +1,34 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric +// specs when converting the `Metrics` field from autoscaling/v2beta1 +const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics" + +// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric +// statuses when converting the `CurrentMetrics` field from autoscaling/v2beta1 +const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics" + +// HorizontalPodAutoscalerConditionsAnnotation is the annotation which holds the conditions +// of an HPA when converting the `Conditions` field from autoscaling/v2beta1 +const HorizontalPodAutoscalerConditionsAnnotation = "autoscaling.alpha.kubernetes.io/conditions" + +// DefaultCPUUtilization is the default value for CPU utilization, provided no other +// metrics are present. This is here because it's used by both the v2beta1 defaulting +// logic, and the pseudo-defaulting done in v1 conversion. +const DefaultCPUUtilization = 80 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go new file mode 100644 index 000000000..7c91aac8b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go new file mode 100644 index 000000000..6c321a3ab --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go new file mode 100644 index 000000000..4ab6f8aff --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go @@ -0,0 +1,416 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector string +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + Kind string + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string + // API version of the referent + // +optional + APIVersion string +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference + // MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 + // MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 + // Metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // Type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource + // External refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + DescribedObject CrossVersionObjectReference + Target MetricTarget + Metric MetricIdentifier +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier + // target specifies the target value for the given metric + Target MetricTarget +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // Name is the name of the resource in question. + Name api.ResourceName + // Target specifies the target value for the given metric + Target MetricTarget +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +type ExternalMetricSource struct { + // Metric identifies the target metric by name and selector + Metric MetricIdentifier + // Target specifies the target value for the given metric + Target MetricTarget +} + +// MetricIdentifier defines the name and optionally selector for a metric +type MetricIdentifier struct { + // Name is the name of the given metric + Name string + // Selector is the selector for the given metric + // it is the string-encoded form of a standard kubernetes label selector + // +optional + Selector *metav1.LabelSelector +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +type MetricTarget struct { + // Type represents whether the metric type is Utilization, Value, or AverageValue + Type MetricTargetType + // Value is the target value of the metric (as a quantity). + Value *resource.Quantity + // TargetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + AverageValue *resource.Quantity + + // AverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + AverageUtilization *int32 +} + +// MetricTargetType specifies the type of metric being targeted, and should be either +// "Value", "AverageValue", or "Utilization" +type MetricTargetType string + +var ( + UtilizationMetricType MetricTargetType = "Utilization" + ValueMetricType MetricTargetType = "Value" + AverageValueMetricType MetricTargetType = "AverageValue" +) + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // ObservedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 + + // LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time + + // CurrentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 + + // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 + + // CurrentMetrics is the last read state of the metrics used by this autoscaler. + // +optional + CurrentMetrics []MetricStatus + + // Conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + Conditions []HorizontalPodAutoscalerCondition +} + +// ConditionStatus indicates the status of a condition (true, false, or unknown). +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +var ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // Type describes the current condition + Type HorizontalPodAutoscalerConditionType + // Status is the status of the condition (True, False, Unknown) + Status ConditionStatus + // LastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time + // Reason is the reason for the condition's last transition. + // +optional + Reason string + // Message is a human-readable explanation containing details about + // the transition + // +optional + Message string +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // Type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus + // External refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + Metric MetricIdentifier + Current MetricValueStatus + + DescribedObject CrossVersionObjectReference +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + Metric MetricIdentifier + Current MetricValueStatus +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name api.ResourceName + Current MetricValueStatus +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + Metric MetricIdentifier + Current MetricValueStatus +} + +type MetricValueStatus struct { + Value *resource.Quantity + AverageValue *resource.Quantity + AverageUtilization *int32 +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta + // Metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec + + // Status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta + // Metadata is the standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go new file mode 100644 index 000000000..03b8cd8b6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -0,0 +1,547 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package autoscaling + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier. +func (in *MetricIdentifier) DeepCopy() *MetricIdentifier { + if in == nil { + return nil + } + out := new(MetricIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTarget) DeepCopyInto(out *MetricTarget) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget. +func (in *MetricTarget) DeepCopy() *MetricTarget { + if in == nil { + return nil + } + out := new(MetricTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus. +func (in *MetricValueStatus) DeepCopy() *MetricValueStatus { + if in == nil { + return nil + } + out := new(MetricValueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.DescribedObject = in.DescribedObject + in.Target.DeepCopyInto(&out.Target) + in.Metric.DeepCopyInto(&out.Metric) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + out.DescribedObject = in.DescribedObject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go new file mode 100644 index 000000000..bef73c0db --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/v1/annotation_key_constants.go. + +package core + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. + SeccompProfileRuntimeDefault string = "runtime/default" + + // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. + // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault. + DeprecatedSeccompProfileDockerDefault string = "docker/default" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by + // the kubelet prior to running + BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go new file mode 100644 index 000000000..6017bfdab --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package api contains the latest (or "internal") version of the +// Kubernetes API objects. This is the API objects as represented in memory. +// The contract presented to clients is located in the versioned packages, +// which are sub-directories. The first one is "v1". Those packages +// describe how a particular version is serialized to storage/network. +package core // import "k8s.io/kubernetes/pkg/apis/core" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go new file mode 100644 index 000000000..a26f80568 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +// Field path constants that are specific to the internal API +// representation. +const ( + NodeUnschedulableField = "spec.unschedulable" + ObjectNameField = "metadata.name" + PodHostField = "spec.nodeName" + PodStatusField = "status.phase" + SecretTypeField = "type" + + EventReasonField = "action" + EventSourceField = "reportingComponent" + EventTypeField = "type" + EventInvolvedKindField = "involvedObject.kind" + EventInvolvedNamespaceField = "involvedObject.namespace" + EventInvolvedNameField = "involvedObject.name" + EventInvolvedUIDField = "involvedObject.uid" + EventInvolvedAPIVersionField = "involvedObject.apiVersion" + EventInvolvedResourceVersionField = "involvedObject.resourceVersion" + EventInvolvedFieldPathField = "involvedObject.fieldPath" +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go new file mode 100644 index 000000000..10c33f66b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -0,0 +1,539 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/apis/core" +) + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) +} + +// IsQuotaHugePageResourceName returns true if the resource name has the quota +// related huge page resource prefix. +func IsQuotaHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix) +} + +// HugePageResourceName returns a ResourceName with the canonical hugepage +// prefix prepended for the specified page size. The page size is converted +// to its canonical representation. +func HugePageResourceName(pageSize resource.Quantity) core.ResourceName { + return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String())) +} + +// HugePageSizeFromResourceName returns the page size for the specified huge page +// resource name. If the specified input is not a valid huge page resource name +// an error is returned. +func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) { + if !IsHugePageResourceName(name) { + return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) + } + pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix) + return resource.ParseQuantity(pageSize) +} + +// NonConvertibleFields iterates over the provided map and filters out all but +// any keys with the "non-convertible.kubernetes.io" prefix. +func NonConvertibleFields(annotations map[string]string) map[string]string { + nonConvertibleKeys := map[string]string{} + for key, value := range annotations { + if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) { + nonConvertibleKeys[key] = value + } + } + return nonConvertibleKeys +} + +// Semantic can do semantic deep equality checks for core objects. +// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true +var Semantic = conversion.EqualitiesOrDie( + func(a, b resource.Quantity) bool { + // Ignore formatting, only care that numeric value stayed the same. + // TODO: if we decide it's important, it should be safe to start comparing the format. + // + // Uninitialized quantities are equivalent to 0 quantities. + return a.Cmp(b) == 0 + }, + func(a, b metav1.MicroTime) bool { + return a.UTC() == b.UTC() + }, + func(a, b metav1.Time) bool { + return a.UTC() == b.UTC() + }, + func(a, b labels.Selector) bool { + return a.String() == b.String() + }, + func(a, b fields.Selector) bool { + return a.String() == b.String() + }, +) + +var standardResourceQuotaScopes = sets.NewString( + string(core.ResourceQuotaScopeTerminating), + string(core.ResourceQuotaScopeNotTerminating), + string(core.ResourceQuotaScopeBestEffort), + string(core.ResourceQuotaScopeNotBestEffort), + string(core.ResourceQuotaScopePriorityClass), +) + +// IsStandardResourceQuotaScope returns true if the scope is a standard value +func IsStandardResourceQuotaScope(str string) bool { + return standardResourceQuotaScopes.Has(str) +} + +var podObjectCountQuotaResources = sets.NewString( + string(core.ResourcePods), +) + +var podComputeQuotaResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), +) + +// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope +func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { + switch scope { + case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopePriorityClass: + return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) + case core.ResourceQuotaScopeBestEffort: + return podObjectCountQuotaResources.Has(resource) + default: + return true + } +} + +var standardContainerResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), +) + +// IsStandardContainerResourceName returns true if the container can make a resource request +// for the specified resource +func IsStandardContainerResourceName(str string) bool { + return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) +} + +// IsExtendedResourceName returns true if: +// 1. the resource name is not in the default namespace; +// 2. resource name does not have "requests." prefix, +// to avoid confusion with the convention in quota +// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name +func IsExtendedResourceName(name core.ResourceName) bool { + if IsNativeResource(name) || strings.HasPrefix(string(name), core.DefaultResourceRequestsPrefix) { + return false + } + // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name + nameForQuota := fmt.Sprintf("%s%s", core.DefaultResourceRequestsPrefix, string(name)) + if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 { + return false + } + return true +} + +// IsNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are +// implicitly in the kubernetes.io/ namespace. +func IsNativeResource(name core.ResourceName) bool { + return !strings.Contains(string(name), "/") || + strings.Contains(string(name), core.ResourceDefaultNamespacePrefix) +} + +// IsOvercommitAllowed returns true if the resource is in the default +// namespace and is not hugepages. +func IsOvercommitAllowed(name core.ResourceName) bool { + return IsNativeResource(name) && + !IsHugePageResourceName(name) +} + +var standardLimitRangeTypes = sets.NewString( + string(core.LimitTypePod), + string(core.LimitTypeContainer), + string(core.LimitTypePersistentVolumeClaim), +) + +// IsStandardLimitRangeType returns true if the type is Pod or Container +func IsStandardLimitRangeType(str string) bool { + return standardLimitRangeTypes.Has(str) +} + +var standardQuotaResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsStorage), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceConfigMaps), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsStandardQuotaResourceName returns true if the resource is known to +// the quota tracking system +func IsStandardQuotaResourceName(str string) bool { + return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) +} + +var standardResources = sets.NewString( + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceStorage), + string(core.ResourceRequestsStorage), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsStandardResourceName returns true if the resource is known to the system +func IsStandardResourceName(str string) bool { + return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) +} + +var integerResources = sets.NewString( + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), +) + +// IsIntegerResourceName returns true if the resource is measured in integer values +func IsIntegerResourceName(str string) bool { + return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *core.Service) bool { + return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != "" +} + +var standardFinalizers = sets.NewString( + string(core.FinalizerKubernetes), + metav1.FinalizerOrphanDependents, + metav1.FinalizerDeleteDependents, +) + +func IsStandardFinalizerName(str string) bool { + return standardFinalizers.Has(str) +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]core.NodeAddress, addAddresses ...core.NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, core.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, core.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, core.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []core.PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, core.ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, core.ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, core.ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode { + accessModes := []core.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement core type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []core.NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case core.NodeSelectorOpIn: + op = selection.In + case core.NodeSelectorOpNotIn: + op = selection.NotIn + case core.NodeSelectorOpExists: + op = selection.Exists + case core.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case core.NodeSelectorOpGt: + op = selection.GreaterThan + case core.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements +// fields.Selector. +func NodeSelectorRequirementsAsFieldSelector(nsm []core.NodeSelectorRequirement) (fields.Selector, error) { + if len(nsm) == 0 { + return fields.Nothing(), nil + } + + selectors := []fields.Selector{} + for _, expr := range nsm { + switch expr.Operator { + case core.NodeSelectorOpIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) + + case core.NodeSelectorOpNotIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) + + default: + return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator) + } + } + + return fields.AndSelectors(selectors...), nil +} + +// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations +// and converts it to the []Toleration type in core. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) { + var tolerations []core.Toleration + if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations) + if err != nil { + return tolerations, err + } + } + return tolerations, nil +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool { + podTolerations := pod.Spec.Tolerations + + var newTolerations []core.Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if Semantic.DeepEqual(toleration, podTolerations[i]) { + return false + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + pod.Spec.Tolerations = newTolerations + return true +} + +// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations +// and converts it to the []Taint type in core. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { + var taints []core.Taint + if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints) + if err != nil { + return []core.Taint{}, err + } + } + return taints, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *core.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go new file mode 100644 index 000000000..d2d82e27d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go @@ -0,0 +1,38 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the v1 monolithic api, making it available as an +// option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1" +) + +func init() { + Install(legacyscheme.Scheme) +} + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(core.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go new file mode 100644 index 000000000..937cd056c --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -0,0 +1,28 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import "encoding/json" + +// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations +// to prevent anyone from marshaling these internal structs. + +var _ = json.Marshaler(&AvoidPods{}) +var _ = json.Unmarshaler(&AvoidPods{}) + +func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } +func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go new file mode 100644 index 000000000..55b27f30b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -0,0 +1,34 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go new file mode 100644 index 000000000..cf199cee7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pods + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/fieldpath" +) + +// ConvertDownwardAPIFieldLabel converts the specified downward API field label +// and its value in the pod of the specified version to the internal version, +// and returns the converted label and value. This function returns an error if +// the conversion fails. +func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, error) { + if version != "v1" { + return "", "", fmt.Errorf("unsupported pod version: %s", version) + } + + if path, _, ok := fieldpath.SplitMaybeSubscriptedPath(label); ok { + switch path { + case "metadata.annotations", "metadata.labels": + return label, value, nil + default: + return "", "", fmt.Errorf("field label does not support subscript: %s", label) + } + } + + switch label { + case "metadata.annotations", + "metadata.labels", + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.restartPolicy", + "spec.serviceAccountName", + "spec.schedulerName", + "status.phase", + "status.hostIP", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go new file mode 100644 index 000000000..c6cd8681d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationControllerList{}, + &ReplicationController{}, + &ServiceList{}, + &Service{}, + &ServiceProxyOptions{}, + &NodeList{}, + &Node{}, + &NodeProxyOptions{}, + &Endpoints{}, + &EndpointsList{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &Secret{}, + &SecretList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + ) + + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go new file mode 100644 index 000000000..1367e00e5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageEphemeral() *resource.Quantity { + if val, ok := (*self)[ResourceEphemeralStorage]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go new file mode 100644 index 000000000..ae1feb74d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. +func (t *Taint) ToString() string { + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go new file mode 100644 index 000000000..1dfbc9f1b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//TODO: consider making these methods functions, because we don't want helper +//functions in the k8s.io/api repo. + +package core + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go new file mode 100644 index 000000000..251547f60 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -0,0 +1,4725 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic = "kube-public" + // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) + NamespaceNodeLease = "kube-node-lease" + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault = "/dev/termination-log" +) + +// Volume represents a named volume in a pod that may be accessed by any containers in the pod. +type Volume struct { + // Required: This must be a DNS_LABEL. Each volume in a pod must have + // a unique name. + Name string + // The VolumeSource represents the location and type of a volume to mount. + // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + // +optional + VolumeSource +} + +// VolumeSource represents the source location of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents file or directory on the host machine that is + // directly exposed to the container. This is generally used for system + // agents or other privileged things that are allowed to see the host + // machine. Most containers will NOT need this. + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // +optional + EmptyDir *EmptyDirVolumeSource + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. + // +optional + GitRepo *GitRepoVolumeSource + // Secret represents a secret that should populate this volume. + // +optional + Secret *SecretVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIVolumeSource + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime + // +optional + Glusterfs *GlusterfsVolumeSource + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDVolumeSource + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexVolumeSource + + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderVolumeSource + + // CephFS represents a Cephfs mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + + // DownwardAPI represents metadata about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // +optional + StorageOS *StorageOSVolumeSource +} + +// Similar to VolumeSource but meant for the administrator who creates PVs. +// Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource + // AWSElasticBlockStore represents an AWS EBS disk that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // +optional + HostPath *HostPathVolumeSource + // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod + // +optional + Glusterfs *GlusterfsPersistentVolumeSource + // NFS represents an NFS mount on the host that shares a pod's lifetime + // +optional + NFS *NFSVolumeSource + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime + // +optional + RBD *RBDPersistentVolumeSource + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource + // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // +optional + ISCSI *ISCSIPersistentVolumeSource + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexPersistentVolumeSource + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // +optional + Cinder *CinderPersistentVolumeSource + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSPersistentVolumeSource + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFilePersistentVolumeSource + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource + // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOPersistentVolumeSource + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver. + // +optional + CSI *CSIPersistentVolumeSource +} + +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume + ClaimName string + // Optional: Defaults to false (read/write). ReadOnly here + // will force the ReadOnly setting in VolumeMounts + // +optional + ReadOnly bool +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's deprecated and will be removed in a future release. (#51440) + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolume struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + //Spec defines a persistent volume owned by the cluster + // +optional + Spec PersistentVolumeSpec + + // Status represents the current information about persistent volume. + // +optional + Status PersistentVolumeStatus +} + +type PersistentVolumeSpec struct { + // Resources represents the actual resources of the volume + Capacity ResourceList + // Source represents the location and type of a volume to mount. + PersistentVolumeSource + // AccessModes contains all ways the volume can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // ClaimRef is expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is + // ignored, i.e. labels of this PV do not need to match PVC selector. + // +optional + ClaimRef *ObjectReference + // Optional: what happens to a persistent volume when released from its claim. + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // +optional + MountOptions []string + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is a beta feature. + // +optional + VolumeMode *PersistentVolumeMode + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + NodeAffinity *VolumeNodeAffinity +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +type VolumeNodeAffinity struct { + // Required specifies hard node constraints that must be met. + Required *NodeSelector +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + // DEPRECATED: The PersistentVolumeReclaimRecycle called Recycle is being deprecated. See announcement here: https://groups.google.com/forum/#!topic/kubernetes-dev/uexugCza84I + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim + // +optional + Phase PersistentVolumePhase + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string + // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI + // +optional + Reason string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolume +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the volume requested by a pod author + // +optional + Spec PersistentVolumeClaimSpec + + // Status represents the current information about a claim + // +optional + Status PersistentVolumeClaimStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PersistentVolumeClaimList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + Items []PersistentVolumeClaim +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // Contains the types of access modes required + // +optional + AccessModes []PersistentVolumeAccessMode + // A label query over volumes to consider for binding. This selector is + // ignored when VolumeName is set + // +optional + Selector *metav1.LabelSelector + // Resources represents the minimum resources required + // +optional + Resources ResourceRequirements + // VolumeName is the binding reference to the PersistentVolume backing this + // claim. When set to non-empty value Selector is not evaluated + // +optional + VolumeName string + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + // +optional + StorageClassName *string + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is a beta feature. + // +optional + VolumeMode *PersistentVolumeMode + // This field requires the VolumeSnapshotDataSource alpha feature gate to be + // enabled and currently VolumeSnapshot is the only supported data source. + // If the provisioner can support VolumeSnapshot data source, it will create + // a new volume and data will be restored to the volume at the same time. + // If the provisioner does not support VolumeSnapshot data source, volume will + // not be created and the failure will be reported as an event. + // In the future, we plan to support more data source types and the behavior + // of the provisioner may change. + // +optional + DataSource *TypedLocalObjectReference +} + +type PersistentVolumeClaimConditionType string + +// These are valid conditions of Pvc +const ( + // An user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" + // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node + PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" +) + +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim + // +optional + Phase PersistentVolumeClaimPhase + // AccessModes contains all ways the volume backing the PVC can be mounted + // +optional + AccessModes []PersistentVolumeAccessMode + // Represents the actual resources of the underlying volume + // +optional + Capacity ResourceList + // +optional + Conditions []PersistentVolumeClaimCondition +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // If the path is a symlink, it will follow the link to the real path. + Path string + // Defaults to "" + Type *HostPathType +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // TODO: Longer term we want to represent the selection of underlying + // media more like a scheduling problem - user says what traits they + // need, we give them a backing store that satisfies that. For now + // this will cover the most common needs. + // Optional: what type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // +optional + Medium StorageMedium + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node + StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" + // ProtocolSCTP is the SCTP protocol. + ProtocolSCTP Protocol = "SCTP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource. Used to identify the disk in GCE + PDName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *LocalObjectReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *SecretReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Optional: FC target worldwide names (WWNs) + // +optional + TargetWWNs []string + // Optional: FC target lun number + // +optional + Lun *int32 + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: FC volume World Wide Identifiers (WWIDs) + // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. + // +optional + WWIDs []string +} + +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: Extra driver options if any. + // +optional + Options map[string]string +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique id of the persistent disk resource. Used to identify the disk in AWS + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Partition on the disk to mount. + // If omitted, kubelet will attempt to mount the device name. + // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. + // +optional + Partition int32 + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. +type GitRepoVolumeSource struct { + // Repository URL + Repository string + // Commit hash, this is optional + // +optional + Revision string + // Clone target, this is optional + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string + // TODO: Consider credentials here. +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // +optional + SecretName string + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server + Server string + + // Path is the exported NFS share + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the NFS export to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string + + // Volume is a string that references an already created Quobyte volume by name. + Volume string + + // Defaults to false (read/write). ReadOnly here will force + // the Quobyte to be mounted with read-only permissions + // +optional + ReadOnly bool + + // User to map volume access to + // Defaults to the root user + // +optional + User string + + // Group to map volume access to + // Default is no group + // +optional + Group string +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // Required: EndpointsName is the endpoint name that details Glusterfs topology + EndpointsName string + + // Required: Path is the Glusterfs volume path + Path string + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the Glusterfs to be mounted with read-only permissions + // +optional + ReadOnly bool +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + EndpointsName string + + // Path is the Glusterfs volume path. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + Path string + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // Required: CephMonitors is a collection of Ceph monitors + CephMonitors []string + // Required: RBDImage is the rados image name + RBDImage string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: RadosPool is the rados pool name,default is rbd + // +optional + RBDPool string + // Optional: RBDUser is the rados user name, default is admin + // +optional + RadosUser string + // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring + // +optional + Keyring string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a cinder volume resource in Openstack. A Cinder volume +// must exist before mounting to a container. The volume must also be +// in the same region as the kubelet. Cinder volumes support ownership +// management and SELinux relabeling. +type CinderPersistentVolumeSource struct { + // Unique id of the volume used to identify the cinder volume + VolumeID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *SecretReference +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *LocalObjectReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + Monitors []string + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string + // Optional: User is the rados user name, default is admin + // +optional + User string + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // +optional + SecretFile string + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // +optional + SecretRef *SecretReference + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string +} + +// Represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Represents a single file containing information from the downward API +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string + // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string + // Share Name + ShareName string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string + // The URI of the data disk in the blob storage + DataDiskURI string + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume that can be defined +// by a an admin via a storage class, for instance. +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string + // The name of the storage system as configured in ScaleIO. + System string + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string + // The name of a volume created in the ScaleIO system + // that is associated with this volume source. + VolumeName string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool +} + +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +type ServiceAccountTokenProjection struct { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + Audience string + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + ExpirationSeconds int64 + // Path is the path relative to the mount point of the file to project the + // token into. + Path string +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + Secret *SecretProjection + // information about the downwardAPI data to project + DownwardAPI *DownwardAPIProjection + // information about the configMap data to project + ConfigMap *ConfigMapProjection + // information about the serviceAccountToken data to project + ServiceAccountToken *ServiceAccountTokenProjection +} + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string + // Optional: mode bits to use on this file, should be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 +} + +// Local represents directly-attached storage with node affinity (Beta feature) +type LocalVolumeSource struct { + // The full path to the volume on the node. + // It can be either a directory or block device (disk, partition, ...). + Path string + + // Filesystem type to mount. + // It applies only when the Path is a block device. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // +optional + FSType *string +} + +// Represents storage that is managed by an external CSI volume driver. +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // +optional + FSType string + + // Attributes of the volume to publish. + // +optional + VolumeAttributes map[string]string + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerPublishSecretRef *SecretReference + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodeStageSecretRef *SecretReference + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodePublishSecretRef *SecretReference +} + +// ContainerPort represents a network port in a single container +type ContainerPort struct { + // Optional: If specified, this must be an IANA_SVC_NAME Each named port + // in a pod must have a unique name. + // +optional + Name string + // Optional: If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // +optional + HostPort int32 + // Required: This must be a valid port number, 0 < x < 65536. + ContainerPort int32 + // Required: Supports "TCP", "UDP" and "SCTP" + // +optional + Protocol Protocol + // Optional: What host IP to bind the external port to. + // +optional + HostIP string +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // Required: This must match the Name of a Volume [above]. + Name string + // Optional: Defaults to false (read-write). + // +optional + ReadOnly bool + // Required. If the path is not an absolute path (e.g. some/path) it + // will be prepended with the appropriate root prefix for the operating + // system. On Linux this is '/', on Windows this is 'C:\'. + MountPath string + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationNone is used. + // This field is beta in 1.10. + // +optional + MountPropagation *MountPropagationMode +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationNone means that the volume in a container will + // not receive new mounts from the host or other containers, and filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode corresponds to "private" in Linux terminology. + MountPropagationNone MountPropagationMode = "None" + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// VolumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Required: This must be a C_IDENTIFIER. + Name string + // Optional: no more than one of the following may be specified. + // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // +optional + Value string + // Optional: Specifies a source the value of this var should come from. + // +optional + ValueFrom *EnvVarSource +} + +// EnvVarSource represents a source for the value of an EnvVar. +// Only one of its fields may be set. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. + // +optional + FieldRef *ObjectFieldSelector + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector + // Selects a key of a secret in the pod's namespace. + // +optional + SecretKeyRef *SecretKeySelector +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Required: Version of the schema the FieldPath is written in terms of. + // If no value is specified, it will be defaulted to the APIVersion of the + // enclosing object. + APIVersion string + // Required: Path of the field to select in the specified API version + FieldPath string +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string + // Required: resource to select + Resource string + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference + // The key to select. + Key string + // Specify whether the ConfigMap or it's key must be defined + // +optional + Optional *bool +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference + // The key of the secret to select from. Must be a valid secret key. + Key string + // Specify whether the Secret or it's key must be defined + // +optional + Optional *bool +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. + // +optional + Prefix string + // The ConfigMap to select from. + //+optional + ConfigMapRef *ConfigMapEnvSource + // The Secret to select from. + //+optional + SecretRef *SecretEnvSource +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference + // Specify whether the Secret must be defined + // +optional + Optional *bool +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string + // The header field value + Value string +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Optional: Path to access on the HTTP server. + // +optional + Path string + // Required: Name or number of the port to access on the container. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. You + // probably want to set "Host" in httpHeaders instead. + // +optional + Host string + // Optional: Scheme to use for connecting to the host, defaults to HTTP. + // +optional + Scheme URIScheme + // Optional: Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Required: Port to connect to. + // +optional + Port intstr.IntOrString + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // +optional + Command []string +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler + // Length of time before health checking is activated. In seconds. + // +optional + InitialDelaySeconds int32 + // Length of time before health checking times out. In seconds. + // +optional + TimeoutSeconds int32 + // How often (in seconds) to perform the probe. + // +optional + PeriodSeconds int32 + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Must be 1 for liveness. + // +optional + SuccessThreshold int32 + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // +optional + FailureThreshold int32 +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Capabilities represent POSIX capabilities that can be added or removed to a running container. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability + // Removed capabilities + // +optional + Drop []Capability +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // +optional + Limits ResourceList + // Requests describes the minimum amount of compute resources required. + // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value + // +optional + Requests ResourceList +} + +// Container represents a single container that is expected to be run on the host. +type Container struct { + // Required: This must be a DNS_LABEL. Each container in a pod must + // have a unique name. + Name string + // Required. + Image string + // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Command []string + // Optional: The docker image's cmd is used if this is not provided; cannot be updated. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // +optional + Args []string + // Optional: Defaults to Docker's default. + // +optional + WorkingDir string + // +optional + Ports []ContainerPort + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource + // +optional + Env []EnvVar + // Compute resource requirements. + // +optional + Resources ResourceRequirements + // +optional + VolumeMounts []VolumeMount + // volumeDevices is the list of block devices to be used by the container. + // This is a beta feature. + // +optional + VolumeDevices []VolumeDevice + // +optional + LivenessProbe *Probe + // +optional + ReadinessProbe *Probe + // +optional + Lifecycle *Lifecycle + // Required. + // +optional + TerminationMessagePath string + // +optional + TerminationMessagePolicy TerminationMessagePolicy + // Required: Policy for pulling images for this container + ImagePullPolicy PullPolicy + // Optional: SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // +optional + SecurityContext *SecurityContext + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + // +optional + Stdin bool + // +optional + StdinOnce bool + // +optional + TTY bool +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction + // TCPSocket specifies an action involving a TCP port. + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, the container + // is terminated and restarted. + // +optional + PostStart *Handler + // PreStop is called immediately before a container is terminated. The reason for termination is + // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. + // +optional + PreStop *Handler +} + +// The below types are used by kube_client and api_server. + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +type ContainerStateWaiting struct { + // A brief CamelCase string indicating details about why the container is in waiting state. + // +optional + Reason string + // A human-readable message indicating details about why the container is in waiting state. + // +optional + Message string +} + +type ContainerStateRunning struct { + // +optional + StartedAt metav1.Time +} + +type ContainerStateTerminated struct { + ExitCode int32 + // +optional + Signal int32 + // +optional + Reason string + // +optional + Message string + // +optional + StartedAt metav1.Time + // +optional + FinishedAt metav1.Time + // +optional + ContainerID string +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // +optional + Waiting *ContainerStateWaiting + // +optional + Running *ContainerStateRunning + // +optional + Terminated *ContainerStateTerminated +} + +type ContainerStatus struct { + // Each container in a pod must have a unique name. + Name string + // +optional + State ContainerState + // +optional + LastTerminationState ContainerState + // Ready specifies whether the container has passed its readiness check. + Ready bool + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 + Image string + ImageID string + // +optional + ContainerID string +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +type PodConditionType string + +// These are valid conditions of pod. +const ( + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" +) + +type PodCondition struct { + Type PodConditionType + Status ConditionStatus + // +optional + LastProbeTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Pod +} + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm +} + +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +type NodeSelectorTerm struct { + // A list of node selector requirements by node's labels. + MatchExpressions []NodeSelectorRequirement + // A list of node selector requirements by node's fields. + MatchFields []NodeSelectorRequirement +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +type TopologySelectorTerm struct { + // A list of topology selector requirements by labels. + // +optional + MatchLabelExpressions []TopologySelectorLabelRequirement +} + +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +type TopologySelectorLabelRequirement struct { + // The label key that the selector applies to. + Key string + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + Values []string +} + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm + + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running. +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + // +optional + Namespaces []string + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + TopologyKey string +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string + // Required. The taint value corresponding to the taint key. + // +optional + Value string + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded *metav1.Time +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodReadinessGate contains the reference to a pod condition +type PodReadinessGate struct { + // ConditionType refers to a condition in the pod's condition list with matching type. + ConditionType PodConditionType +} + +// PodSpec is a description of a pod +type PodSpec struct { + Volumes []Volume + // List of initialization containers belonging to the pod. + InitContainers []Container + // List of containers belonging to the pod. + Containers []Container + // +optional + RestartPolicy RestartPolicy + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // +optional + TerminationGracePeriodSeconds *int64 + // Optional duration in seconds relative to the StartTime that the pod may be active on a node + // before the system actively tries to terminate the pod; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy + // NodeSelector is a selector which must be true for the pod to fit on a node + // +optional + NodeSelector map[string]string + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccountName string + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // +optional + ImagePullSecrets []LocalObjectReference + // Specifies the hostname of the Pod. + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + HostAliases []HostAlias + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md + // +optional + ReadinessGates []PodReadinessGate + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // This is an alpha feature and may change in the future. + // +optional + RuntimeClassName *string + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // If not specified, the default is true. + // +optional + EnableServiceLinks *bool +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + IP string + Hostnames []string +} + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string + // Value of a property to set + Value string +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // Use the host's network namespace. If this option is set, the ports that will be + // used must be specified. + // Optional: Default to false + // +k8s:conversion-gen=false + // +optional + HostNetwork bool + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsNonRoot *bool + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + Sysctls []Sysctl +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string + // +optional + Value *string +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system. +type PodStatus struct { + // +optional + Phase PodPhase + // +optional + Conditions []PodCondition + // A human readable message indicating details about why the pod is in this state. + // +optional + Message string + // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' + // +optional + Reason string + // nominatedNodeName is set when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // +optional + NominatedNodeName string + + // +optional + HostIP string + // +optional + PodIP string + + // Date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time + // +optional + QOSClass PodQOSClass + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status + InitContainerStatuses []ContainerStatus + // The list has one entry per container in the manifest. Each entry is + // currently the output of `docker inspect`. This output format is *not* + // final and should not be relied upon. + // TODO: Make real decisions about what our info should look like. Re-enable fuzz test + // when we have done this. + // +optional + ContainerStatuses []ContainerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Pod is a collection of containers, used as either input (create, update) or as output (list, get). +type Pod struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec + + // Status represents the current information about a pod. This data may not be up + // to date. + // +optional + Status PodStatus +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Metadata of the pods created from this template. + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a pod. + // +optional + Spec PodSpec +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Template defines the pods that will be created from this pod template + // +optional + Template PodTemplateSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []PodTemplate +} + +// ReplicationControllerSpec is the specification of a replication controller. +// As the internal representation of a replication controller, it may have either +// a TemplateRef or a Template set. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + Replicas int32 + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. This reference is ignored if a Template is set. + // Must be set before converting to a versioned API object + // +optional + //TemplateRef *ObjectReference + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. Internally, this takes precedence over a + // TemplateRef. + // +optional + Template *PodTemplateSpec +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the number of actual replicas. + Replicas int32 + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 + + // ObservedGeneration is the most recent generation observed by the controller. + // +optional + ObservedGeneration int64 + + // Represents the latest available observations of a replication controller's current state. + // +optional + Conditions []ReplicationControllerCondition +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + // +optional + Reason string + // A human readable message indicating details about the transition. + // +optional + Message string +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired behavior of this replication controller. + // +optional + Spec ReplicationControllerSpec + + // Status is the current status of this replication controller. This data may be + // out of date by some window of time. + // +optional + Status ReplicationControllerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ReplicationController +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Service +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +const ( + // DefaultClientIPServiceAffinitySeconds is the default timeout seconds + // of Client IP based session affinity - 3 hours. + DefaultClientIPServiceAffinitySeconds int32 = 10800 + // MaxClientIPServiceAffinitySeconds is the max timeout seconds + // of Client IP based session affinity - 1 day. + MaxClientIPServiceAffinitySeconds int32 = 86400 +) + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 +} + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the ClusterIP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus +} + +// LoadBalancerStatus represents the status of a load-balancer +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer; + // traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string +} + +// ServiceSpec describes the attributes that a user creates on a service +type ServiceSpec struct { + // Type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Type ServiceType + + // Required: The list of ports that are exposed by this service. + Ports []ServicePort + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + Selector map[string]string + + // ClusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string + + // ExternalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + ExternalName string + + // ExternalIPs are used by external load balancers, or can be set by + // users to handle external traffic that arrives at a node. + // +optional + ExternalIPs []string + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string + + // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. + // +optional + SessionAffinity ServiceAffinity + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig + + // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // +optional + LoadBalancerSourceRanges []string + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // +optional + PublishNotReadyAddresses bool +} + +type ServicePort struct { + // Optional if only one ServicePort is defined on this service: The + // name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. + Name string + + // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + Protocol Protocol + + // The port that will be exposed on the service. + Port int32 + + // Optional: The target port on pods selected by this service. If this + // is a string, it will be looked up as a named port in the target + // Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + TargetPort intstr.IntOrString + + // The port on each node on which this service is exposed. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + NodePort int32 +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a service. + // +optional + Spec ServiceSpec + + // Status represents the current status of a service. + // +optional + Status ServiceStatus +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount + Secrets []ObjectReference + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // +optional + ImagePullSecrets []LocalObjectReference + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ServiceAccount +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // The set of all endpoints is the union of all subsets. + Subsets []EndpointSubset +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + Addresses []EndpointAddress + NotReadyAddresses []EndpointAddress + Ports []EndpointPort +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, see #4447. + IP string + // Optional: Hostname of this endpoint + // Meant to be used by DNS servers etc. + // +optional + Hostname string + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string + // Optional: The kubernetes object related to the entry point. + TargetRef *ObjectReference +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port (corresponds to ServicePort.Name). Optional + // if only one port is defined. Must be a DNS_LABEL. + Name string + + // The port number. + Port int32 + + // The IP protocol for this port. + Protocol Protocol +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Endpoints +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node + // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. + // +optional + PodCIDR string + + // ID of the node assigned by the cloud provider + // Note: format is "://" + // +optional + ProviderID string + + // Unschedulable controls node schedulability of new pods. By default node is schedulable. + // +optional + Unschedulable bool + + // If specified, the node's taints. + // +optional + Taints []Taint + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource + + // Deprecated. Not all kubelets will set this field. Remove field after 1.13. + // see: https://issues.k8s.io/61966 + // +optional + DoNotUse_ExternalID string +} + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. +type NodeConfigSource struct { + ConfigMap *ConfigMapNodeConfigSource +} + +type ConfigMapNodeConfigSource struct { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + Namespace string + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + Name string + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + UID types.UID + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + ResourceVersion string + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + KubeletConfigKey string +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercase for backwards compatibility (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string + // Boot ID reported by the node. + BootID string + // Kernel Version reported by the node. + KernelVersion string + // OS Image reported by the node. + OSImage string + // ContainerRuntime Version reported by the node. + ContainerRuntimeVersion string + // Kubelet Version reported by the node. + KubeletVersion string + // KubeProxy Version reported by the node. + KubeProxyVersion string + // The Operating System reported by the node + OperatingSystem string + // The Architecture reported by the node + Architecture string +} + +// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. +type NodeConfigStatus struct { + // Assigned reports the checkpointed config the node will try to use. + // When Node.Spec.ConfigSource is updated, the node checkpoints the associated + // config payload to local disk, along with a record indicating intended + // config. The node refers to this record to choose its config checkpoint, and + // reports this record in Assigned. Assigned only updates in the status after + // the record has been checkpointed to disk. When the Kubelet is restarted, + // it tries to make the Assigned config the Active config by loading and + // validating the checkpointed payload identified by Assigned. + // +optional + Assigned *NodeConfigSource + // Active reports the checkpointed config the node is actively using. + // Active will represent either the current version of the Assigned config, + // or the current LastKnownGood config, depending on whether attempting to use the + // Assigned config results in an error. + // +optional + Active *NodeConfigSource + // LastKnownGood reports the checkpointed config the node will fall back to + // when it encounters an error attempting to use the Assigned config. + // The Assigned config becomes the LastKnownGood config when the node determines + // that the Assigned config is stable and correct. + // This is currently implemented as a 10-minute soak period starting when the local + // record of Assigned config is updated. If the Assigned config is Active at the end + // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is + // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, + // because the local default config is always assumed good. + // You should not make assumptions about the node's method of determining config stability + // and correctness, as this may change or become configurable in the future. + // +optional + LastKnownGood *NodeConfigSource + // Error describes any problems reconciling the Spec.ConfigSource to the Active config. + // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned + // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting + // to load or validate the Assigned config, etc. + // Errors may occur at different points while syncing config. Earlier errors (e.g. download or + // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across + // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in + // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error + // by fixing the config assigned in Spec.ConfigSource. + // You can find additional information for debugging by searching the error message in the Kubelet log. + // Error is a human-readable description of the error state; machines can check whether or not Error + // is empty, but should not rely on the stability of the Error text across Kubelet versions. + // +optional + Error string +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // +optional + Capacity ResourceList + // Allocatable represents the resources of a node that are available for scheduling. + // +optional + Allocatable ResourceList + // NodePhase is the current lifecycle phase of the node. + // +optional + Phase NodePhase + // Conditions is an array of current node conditions. + // +optional + Conditions []NodeCondition + // Queried from cloud provider, if available. + // +optional + Addresses []NodeAddress + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints + // Set of ids/uuids to uniquely identify the node. + // +optional + NodeInfo NodeSystemInfo + // List of container images on this node + // +optional + Images []ContainerImage + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume + // Status of the config assigned to the node via the dynamic Kubelet config feature. + // +optional + Config *NodeConfigStatus +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName + + // DevicePath represents the device path where the volume should be available + DevicePath string +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time + // (brief) reason why this entry was added to the list. + // +optional + Reason string + // Human readable message indicating why this entry was added to the list. + // +optional + Message string +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + Names []string + // The size of the image in bytes. + // +optional + SizeBytes int64 +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReady, NodeReachable +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +type NodeCondition struct { + Type NodeConditionType + Status ConditionStatus + // +optional + LastHeartbeatTime metav1.Time + // +optional + LastTransitionTime metav1.Time + // +optional + Reason string + // +optional + Message string +} + +type NodeAddressType string + +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +type NodeAddress struct { + Type NodeAddressType + Address string +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + // +optional + Capacity ResourceList +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" +) + +const ( + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" + // Name prefix for storage resource limits + ResourceAttachableVolumesPrefix = "attachable-volumes-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node is a worker node in Kubernetes +// The name of the node according to etcd is in ObjectMeta.Name. +type Node struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of a node. + // +optional + Spec NodeSpec + + // Status describes the current status of a Node + // +optional + Status NodeStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeList is a list of nodes. +type NodeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Node +} + +// NamespaceSpec describes the attributes on a Namespace +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []FinalizerName +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // +optional + Phase NamespacePhase +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A namespace provides a scope for Names. +// Use of multiple namespaces is optional +type Namespace struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the behavior of the Namespace. + // +optional + Spec NamespaceSpec + + // Status describes the current status of a Namespace + // +optional + Status NamespaceStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Namespace +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta + // ObjectMeta describes the object that is being bound. + // +optional + metav1.ObjectMeta + + // Target is the object to bind to. + Target ObjectReference +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodLogOptions is the query options for a Pod's logs REST call +type PodLogOptions struct { + metav1.TypeMeta + + // Container for which to return logs + Container string + // If true, follow the logs for the pod + Follow bool + // If true, return previous terminated container logs + Previous bool + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. + Timestamps bool + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the attach call + // +optional + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the attach call + // +optional + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the attach call + // +optional + Stderr bool + + // TTY if true indicates that a tty will be allocated for the attach call + // +optional + TTY bool + + // Container to attach to. + // +optional + Container string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodExecOptions is the query options to a Pod's remote exec call +type PodExecOptions struct { + metav1.TypeMeta + + // Stdin if true indicates that stdin is to be redirected for the exec call + Stdin bool + + // Stdout if true indicates that stdout is to be redirected for the exec call + Stdout bool + + // Stderr if true indicates that stderr is to be redirected for the exec call + Stderr bool + + // TTY if true indicates that a tty will be allocated for the exec call + TTY bool + + // Container in which to execute the command. + Container string + + // Command is the remote command to execute; argv array; not executed within a shell. + Command []string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPortForwardOptions is the query options to a Pod's port forward call +type PodPortForwardOptions struct { + metav1.TypeMeta + + // The list of ports to forward + // +optional + Ports []int32 +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodProxyOptions is the query options to a Pod's proxy call +type PodProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeProxyOptions is the query options to a Node's proxy call +type NodeProxyOptions struct { + metav1.TypeMeta + + // Path is the URL path to use for the current proxy request + Path string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + Path string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ObjectReference struct { + // +optional + Kind string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // +optional + APIVersion string + // +optional + ResourceVersion string + + // Optional. If referring to a piece of an object instead of an entire object, this string + // should contain information to identify the sub-object. For example, if the object + // reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string +} + +// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. +type LocalObjectReference struct { + //TODO: Add other useful fields. apiVersion, kind, uid? + Name string +} + +// TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace. +type TypedLocalObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string + // Kind is the type of resource being referenced + Kind string + // Name is the name of resource being referenced + Name string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SerializedReference struct { + metav1.TypeMeta + // +optional + Reference ObjectReference +} + +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string + // Node name on which the event is generated. + // +optional + Host string +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. +// TODO: Decide whether to store these separately or with the object they apply to. +type Event struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Required. The object that this event is about. Mapped to events.Event.regarding + // +optional + InvolvedObject ObjectReference + + // Optional; this should be a short, machine understandable string that gives the reason + // for this event being generated. For example, if the event is reporting that a container + // can't start, the Reason might be "ImageNotFound". + // TODO: provide exact specification for format. + // +optional + Reason string + + // Optional. A human-readable description of the status of this operation. + // TODO: decide on maximum length. Mapped to events.Event.note + // +optional + Message string + + // Optional. The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time + + // The number of times this event has occurred. + // +optional + Count int32 + + // Type of this event (Normal, Warning), new types could be added in the future. + // +optional + Type string + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string +} + +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 + // Time of the last occurrence observed + LastObservedTime metav1.MicroTime + // State of this Series: Ongoing or Finished + State EventSeriesState +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List metainternalversion.List + +// A type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind +type LimitRangeItem struct { + // Type of resource that this limit applies to + // +optional + Type LimitType + // Max usage constraints on this kind by resource name + // +optional + Max ResourceList + // Min usage constraints on this kind by resource name + // +optional + Min ResourceList + // Default resource requirement limit value by resource name. + // +optional + Default ResourceList + // DefaultRequest resource requirement request value by resource name. + // +optional + DefaultRequest ResourceList + // MaxLimitRequestRatio represents the max burst value for the named resource + // +optional + MaxLimitRequestRatio ResourceList +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced + Limits []LimitRangeItem +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRange sets resource usage limits for each kind of resource in a Namespace +type LimitRange struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the limits enforced + // +optional + Spec LimitRangeSpec +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of LimitRange objects + Items []LimitRange +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" + // Default resource requests prefix + DefaultResourceRequestsPrefix = "requests." +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" + // Match all pod objects that have priority class mentioned + ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota +type ResourceQuotaSpec struct { + // Hard is the set of desired hard limits for each named resource + // +optional + Hard ResourceList + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope + // ScopeSelector is also a collection of filters like Scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // +optional + ScopeSelector *ScopeSelector +} + +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector terms. +type ScopeSelector struct { + // A list of scope selector requirements by scope of the resources. + // +optional + MatchExpressions []ScopedResourceSelectorRequirement +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +type ScopedResourceSelectorRequirement struct { + // The name of the scope that the selector applies to. + ScopeName ResourceQuotaScope + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + Operator ScopeSelectorOperator + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + Values []string +} + +// A scope selector operator is the set of operators that can be used in +// a scope selector requirement. +type ScopeSelectorOperator string + +const ( + ScopeSelectorOpIn ScopeSelectorOperator = "In" + ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" + ScopeSelectorOpExists ScopeSelectorOperator = "Exists" + ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" +) + +// ResourceQuotaStatus defines the enforced hard limits and observed use +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource + // +optional + Hard ResourceList + // Used is the current observed total usage of the resource in the namespace + // +optional + Used ResourceList +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Spec defines the desired quota + // +optional + Spec ResourceQuotaSpec + + // Status defines the actual enforced quota and its current usage + // +optional + Status ResourceQuotaStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of ResourceQuota items +type ResourceQuotaList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is a list of ResourceQuota objects + Items []ResourceQuota +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. + // +optional + Data map[string][]byte + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default; arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authentication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secret. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type SecretList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Secret +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMap holds configuration data for components or applications to consume. +type ConfigMap struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. + // +optional + Data map[string]string + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + BinaryData map[string][]byte +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + // Items is the list of ConfigMaps. + Items []ConfigMap +} + +// These constants are for remote command execution and port forwarding and are +// used by both the client side and server side components. +// +// This is probably not the ideal place for them, but it didn't seem worth it +// to create pkg/exec and pkg/portforward just to contain a single file with +// constants in it. Suggestions for more appropriate alternatives are +// definitely welcome! +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType + Status ConditionStatus + // +optional + Message string + // +optional + Error string +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + + // +optional + Conditions []ComponentCondition +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ComponentStatusList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []ComponentStatus +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsGroup *int64 + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool + // The read-only root filesystem allows you to restrict the locations that an application can write + // files to, ensuring the persistent data can only be written to mounts. + // +optional + ReadOnlyRootFilesystem *bool + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // +optional + AllowPrivilegeEscalation *bool + // ProcMount denotes the type of proc mount to use for the containers. + // The default is DefaultProcMount which uses the container runtime defaults for + // readonly paths and masked paths. + // +optional + ProcMount *ProcMountType +} + +type ProcMountType string + +const ( + // DefaultProcMount uses the container runtime defaults for readonly and masked + // paths for /proc. Most container runtimes mask certain paths in /proc to avoid + // accidental security exposure of special devices or information. + DefaultProcMount ProcMountType = "Default" + + // UnmaskedProcMount bypasses the default masking behavior of the container + // runtime and ensures the newly created /proc the container stays intact with + // no modifications. + UnmaskedProcMount ProcMountType = "Unmasked" +) + +// SELinuxOptions are the labels to be applied to the container. +type SELinuxOptions struct { + // SELinux user label + // +optional + User string + // SELinux role label + // +optional + Role string + // SELinux type label + // +optional + Type string + // SELinux level label. + // +optional + Level string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record +// the global allocation state of the cluster. The schema of Range and Data generic, in that Range +// should be a string representation of the inputs to a range (for instance, for IP allocation it +// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a +// binary range. Consumers should use annotations to record additional information (schema version, +// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation +// of the cluster, thus the object is less strongly typed than most. +type RangeAllocation struct { + metav1.TypeMeta + // +optional + metav1.ObjectMeta + // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or + // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define + // a start and end unless there is an implicit end. + Range string + // A byte array representing the serialized state of a range allocation. Additional clarifiers on + // the type or format of data should be represented with annotations. For IP allocations, this is + // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing + // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). + Data []byte +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go new file mode 100644 index 000000000..926a39789 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -0,0 +1,547 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "reflect" + + "k8s.io/api/core/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/apps" + "k8s.io/kubernetes/pkg/apis/core" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddConversionFuncs( + Convert_core_Pod_To_v1_Pod, + Convert_core_PodSpec_To_v1_PodSpec, + Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_core_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_Pod_To_core_Pod, + Convert_v1_PodSpec_To_core_PodSpec, + Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, + Convert_v1_Secret_To_core_Secret, + Convert_v1_ServiceSpec_To_core_ServiceSpec, + Convert_v1_ResourceList_To_core_ResourceList, + Convert_v1_ReplicationController_To_apps_ReplicaSet, + Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec, + Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus, + Convert_apps_ReplicaSet_To_v1_ReplicationController, + Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec, + Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus, + ) + if err != nil { + return err + } + + // Add field conversion funcs. + err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Pod"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "spec.nodeName", + "spec.restartPolicy", + "spec.schedulerName", + "spec.serviceAccountName", + "status.phase", + "status.podIP", + "status.nominatedNodeName": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Node"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "spec.unschedulable": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ReplicationController"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "status.replicas": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + return err + } + if err := AddFieldLabelConversionsForEvent(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForSecret(scheme); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationController_To_apps_ReplicaSet(in *v1.ReplicationController, out *apps.ReplicaSet, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(in *v1.ReplicationControllerSpec, out *apps.ReplicaSetSpec, s conversion.Scope) error { + out.Replicas = *in.Replicas + out.MinReadySeconds = in.MinReadySeconds + if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&in.Selector, out.Selector, s) + } + if in.Template != nil { + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, &out.Template, s); err != nil { + return err + } + } + return nil +} + +func Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(in *v1.ReplicationControllerStatus, out *apps.ReplicaSetStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + for _, cond := range in.Conditions { + out.Conditions = append(out.Conditions, apps.ReplicaSetCondition{ + Type: apps.ReplicaSetConditionType(cond.Type), + Status: core.ConditionStatus(cond.Status), + LastTransitionTime: cond.LastTransitionTime, + Reason: cond.Reason, + Message: cond.Message, + }) + } + return nil +} + +func Convert_apps_ReplicaSet_To_v1_ReplicationController(in *apps.ReplicaSet, out *v1.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + fieldErr, ok := err.(*field.Error) + if !ok { + return err + } + if out.Annotations == nil { + out.Annotations = make(map[string]string) + } + out.Annotations[v1.NonConvertibleAnnotationPrefix+"/"+fieldErr.Field] = reflect.ValueOf(fieldErr.BadValue).String() + } + if err := Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(in *apps.ReplicaSetSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = new(int32) + *out.Replicas = in.Replicas + out.MinReadySeconds = in.MinReadySeconds + var invalidErr error + if in.Selector != nil { + invalidErr = metav1.Convert_v1_LabelSelector_To_Map_string_To_string(in.Selector, &out.Selector, s) + } + out.Template = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { + return err + } + return invalidErr +} + +func Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(in *apps.ReplicaSetStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + for _, cond := range in.Conditions { + out.Conditions = append(out.Conditions, v1.ReplicationControllerCondition{ + Type: v1.ReplicationControllerConditionType(cond.Type), + Status: v1.ConditionStatus(cond.Status), + LastTransitionTime: cond.LastTransitionTime, + Reason: cond.Reason, + Message: cond.Message, + }) + } + return nil +} + +func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + out.Replicas = &in.Replicas + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { + if in.Replicas != nil { + out.Replicas = *in.Replicas + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = in.Selector + if in.Template != nil { + out.Template = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, out.Template, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { + return err + } + + // drop init container annotations so they don't take effect on legacy kubelets. + // remove this once the oldest supported kubelet no longer honors the annotations over the field. + out.Annotations = dropInitContainerAnnotations(out.Annotations) + + return nil +} + +func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil { + return err + } + + // drop init container annotations so they don't show up as differences when receiving requests from old clients + out.Annotations = dropInitContainerAnnotations(out.Annotations) + + return nil +} + +// The following two v1.PodSpec conversions are done here to support v1.ServiceAccount +// as an alias for ServiceAccountName. +func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if err := autoConvert_core_PodSpec_To_v1_PodSpec(in, out, s); err != nil { + return err + } + + // DeprecatedServiceAccount is an alias for ServiceAccountName. + out.DeprecatedServiceAccount = in.ServiceAccountName + + if in.SecurityContext != nil { + // the host namespace fields have to be handled here for backward compatibility + // with v1.0.0 + out.HostPID = in.SecurityContext.HostPID + out.HostNetwork = in.SecurityContext.HostNetwork + out.HostIPC = in.SecurityContext.HostIPC + out.ShareProcessNamespace = in.SecurityContext.ShareProcessNamespace + } + + return nil +} + +func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil { + return err + } + + // We support DeprecatedServiceAccount as an alias for ServiceAccountName. + // If both are specified, ServiceAccountName (the new field) wins. + if in.ServiceAccountName == "" { + out.ServiceAccountName = in.DeprecatedServiceAccount + } + + // the host namespace fields have to be handled specially for backward compatibility + // with v1.0.0 + if out.SecurityContext == nil { + out.SecurityContext = new(core.PodSecurityContext) + } + out.SecurityContext.HostNetwork = in.HostNetwork + out.SecurityContext.HostPID = in.HostPID + out.SecurityContext.HostIPC = in.HostIPC + out.SecurityContext.ShareProcessNamespace = in.ShareProcessNamespace + + return nil +} + +func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + if err := autoConvert_v1_Pod_To_core_Pod(in, out, s); err != nil { + return err + } + + // drop init container annotations so they don't show up as differences when receiving requests from old clients + out.Annotations = dropInitContainerAnnotations(out.Annotations) + + return nil +} + +func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + if err := autoConvert_core_Pod_To_v1_Pod(in, out, s); err != nil { + return err + } + + // drop init container annotations so they don't take effect on legacy kubelets. + // remove this once the oldest supported kubelet no longer honors the annotations over the field. + out.Annotations = dropInitContainerAnnotations(out.Annotations) + + return nil +} + +func Convert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + if err := autoConvert_v1_Secret_To_core_Secret(in, out, s); err != nil { + return err + } + + // StringData overwrites Data + if len(in.StringData) > 0 { + if out.Data == nil { + out.Data = map[string][]byte{} + } + for k, v := range in.StringData { + out.Data[k] = []byte(v) + } + } + + return nil +} + +func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { + if in.Capabilities != nil { + out.Capabilities = new(v1.Capabilities) + if err := Convert_core_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { + return err + } + } else { + out.Capabilities = nil + } + out.Privileged = in.Privileged + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(v1.SELinuxOptions) + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsGroup = in.RunAsGroup + out.RunAsNonRoot = in.RunAsNonRoot + out.ReadOnlyRootFilesystem = in.ReadOnlyRootFilesystem + out.AllowPrivilegeEscalation = in.AllowPrivilegeEscalation + if in.ProcMount != nil { + pm := string(*in.ProcMount) + pmt := v1.ProcMountType(pm) + out.ProcMount = &pmt + } + return nil +} + +func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(v1.SELinuxOptions) + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsGroup = in.RunAsGroup + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + if in.Sysctls != nil { + out.Sysctls = make([]v1.Sysctl, len(in.Sysctls)) + for i, sysctl := range in.Sysctls { + if err := Convert_core_Sysctl_To_v1_Sysctl(&sysctl, &out.Sysctls[i], s); err != nil { + return err + } + } + } + return nil +} + +func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { + out.SupplementalGroups = in.SupplementalGroups + if in.SELinuxOptions != nil { + out.SELinuxOptions = new(core.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + return err + } + } else { + out.SELinuxOptions = nil + } + out.RunAsUser = in.RunAsUser + out.RunAsGroup = in.RunAsGroup + out.RunAsNonRoot = in.RunAsNonRoot + out.FSGroup = in.FSGroup + if in.Sysctls != nil { + out.Sysctls = make([]core.Sysctl, len(in.Sysctls)) + for i, sysctl := range in.Sysctls { + if err := Convert_v1_Sysctl_To_core_Sysctl(&sysctl, &out.Sysctls[i], s); err != nil { + return err + } + } + } + + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_ResourceList_To_core_ResourceList(in *v1.ResourceList, out *core.ResourceList, s conversion.Scope) error { + if *in == nil { + return nil + } + if *out == nil { + *out = make(core.ResourceList, len(*in)) + } + for key, val := range *in { + // Moved to defaults + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + // const milliScale = -3 + // val.RoundUp(milliScale) + + (*out)[core.ResourceName(key)] = val + } + return nil +} + +func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Event"), + func(label, value string) (string, string, error) { + switch label { + case "involvedObject.kind", + "involvedObject.namespace", + "involvedObject.name", + "involvedObject.uid", + "involvedObject.apiVersion", + "involvedObject.resourceVersion", + "involvedObject.fieldPath", + "reason", + "source", + "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Namespace"), + func(label, value string) (string, string, error) { + switch label { + case "status.phase", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Secret"), + func(label, value string) (string, string, error) { + switch label { + case "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +var initContainerAnnotations = map[string]bool{ + "pod.beta.kubernetes.io/init-containers": true, + "pod.alpha.kubernetes.io/init-containers": true, + "pod.beta.kubernetes.io/init-container-statuses": true, + "pod.alpha.kubernetes.io/init-container-statuses": true, +} + +// dropInitContainerAnnotations returns a copy of the annotations with init container annotations removed, +// or the original annotations if no init container annotations were present. +// +// this can be removed once no clients prior to 1.8 are supported, and no kubelets prior to 1.8 can be run +// (we don't support kubelets older than 2 versions skewed from the apiserver, but we don't prevent them, either) +func dropInitContainerAnnotations(oldAnnotations map[string]string) map[string]string { + if len(oldAnnotations) == 0 { + return oldAnnotations + } + + found := false + for k := range initContainerAnnotations { + if _, ok := oldAnnotations[k]; ok { + found = true + break + } + } + if !found { + return oldAnnotations + } + + newAnnotations := make(map[string]string, len(oldAnnotations)) + for k, v := range oldAnnotations { + if !initContainerAnnotations[k] { + newAnnotations[k] = v + } + } + return newAnnotations +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go new file mode 100644 index 000000000..172d3797b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -0,0 +1,425 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/util/parsers" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_ResourceList(obj *v1.ResourceList) { + for key, val := range *obj { + // TODO(#18538): We round up resource values to milli scale to maintain API compatibility. + // In the future, we should instead reject values that need rounding. + const milliScale = -3 + val.RoundUp(milliScale) + + (*obj)[v1.ResourceName(key)] = val + } +} + +func SetDefaults_ReplicationController(obj *v1.ReplicationController) { + var labels map[string]string + if obj.Spec.Template != nil { + labels = obj.Spec.Template.Labels + } + // TODO: support templates defined elsewhere when we support them in the API + if labels != nil { + if len(obj.Spec.Selector) == 0 { + obj.Spec.Selector = labels + } + if len(obj.Labels) == 0 { + obj.Labels = labels + } + } + if obj.Spec.Replicas == nil { + obj.Spec.Replicas = new(int32) + *obj.Spec.Replicas = 1 + } +} +func SetDefaults_Volume(obj *v1.Volume) { + if utilpointer.AllPtrFieldsNil(&obj.VolumeSource) { + obj.VolumeSource = v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + } + } +} +func SetDefaults_ContainerPort(obj *v1.ContainerPort) { + if obj.Protocol == "" { + obj.Protocol = v1.ProtocolTCP + } +} +func SetDefaults_Container(obj *v1.Container) { + if obj.ImagePullPolicy == "" { + // Ignore error and assume it has been validated elsewhere + _, tag, _, _ := parsers.ParseImageName(obj.Image) + + // Check image tag + if tag == "latest" { + obj.ImagePullPolicy = v1.PullAlways + } else { + obj.ImagePullPolicy = v1.PullIfNotPresent + } + } + if obj.TerminationMessagePath == "" { + obj.TerminationMessagePath = v1.TerminationMessagePathDefault + } + if obj.TerminationMessagePolicy == "" { + obj.TerminationMessagePolicy = v1.TerminationMessageReadFile + } +} + +func SetDefaults_Service(obj *v1.Service) { + if obj.Spec.SessionAffinity == "" { + obj.Spec.SessionAffinity = v1.ServiceAffinityNone + } + if obj.Spec.SessionAffinity == v1.ServiceAffinityNone { + obj.Spec.SessionAffinityConfig = nil + } + if obj.Spec.SessionAffinity == v1.ServiceAffinityClientIP { + if obj.Spec.SessionAffinityConfig == nil || obj.Spec.SessionAffinityConfig.ClientIP == nil || obj.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds == nil { + timeoutSeconds := v1.DefaultClientIPServiceAffinitySeconds + obj.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{ + ClientIP: &v1.ClientIPConfig{ + TimeoutSeconds: &timeoutSeconds, + }, + } + } + } + if obj.Spec.Type == "" { + obj.Spec.Type = v1.ServiceTypeClusterIP + } + for i := range obj.Spec.Ports { + sp := &obj.Spec.Ports[i] + if sp.Protocol == "" { + sp.Protocol = v1.ProtocolTCP + } + if sp.TargetPort == intstr.FromInt(0) || sp.TargetPort == intstr.FromString("") { + sp.TargetPort = intstr.FromInt(int(sp.Port)) + } + } + // Defaults ExternalTrafficPolicy field for NodePort / LoadBalancer service + // to Global for consistency. + if (obj.Spec.Type == v1.ServiceTypeNodePort || + obj.Spec.Type == v1.ServiceTypeLoadBalancer) && + obj.Spec.ExternalTrafficPolicy == "" { + obj.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster + } +} +func SetDefaults_Pod(obj *v1.Pod) { + // If limits are specified, but requests are not, default requests to limits + // This is done here rather than a more specific defaulting pass on v1.ResourceRequirements + // because we only want this defaulting semantic to take place on a v1.Pod and not a v1.PodTemplate + for i := range obj.Spec.Containers { + // set requests to limits if requests are not specified, but limits are + if obj.Spec.Containers[i].Resources.Limits != nil { + if obj.Spec.Containers[i].Resources.Requests == nil { + obj.Spec.Containers[i].Resources.Requests = make(v1.ResourceList) + } + for key, value := range obj.Spec.Containers[i].Resources.Limits { + if _, exists := obj.Spec.Containers[i].Resources.Requests[key]; !exists { + obj.Spec.Containers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } + for i := range obj.Spec.InitContainers { + if obj.Spec.InitContainers[i].Resources.Limits != nil { + if obj.Spec.InitContainers[i].Resources.Requests == nil { + obj.Spec.InitContainers[i].Resources.Requests = make(v1.ResourceList) + } + for key, value := range obj.Spec.InitContainers[i].Resources.Limits { + if _, exists := obj.Spec.InitContainers[i].Resources.Requests[key]; !exists { + obj.Spec.InitContainers[i].Resources.Requests[key] = *(value.Copy()) + } + } + } + } + if obj.Spec.EnableServiceLinks == nil { + enableServiceLinks := v1.DefaultEnableServiceLinks + obj.Spec.EnableServiceLinks = &enableServiceLinks + } +} +func SetDefaults_PodSpec(obj *v1.PodSpec) { + // New fields added here will break upgrade tests: + // https://github.com/kubernetes/kubernetes/issues/69445 + // In most cases the new defaulted field can added to SetDefaults_Pod instead of here, so + // that it only materializes in the Pod object and not all objects with a PodSpec field. + if obj.DNSPolicy == "" { + obj.DNSPolicy = v1.DNSClusterFirst + } + if obj.RestartPolicy == "" { + obj.RestartPolicy = v1.RestartPolicyAlways + } + if obj.HostNetwork { + defaultHostNetworkPorts(&obj.Containers) + defaultHostNetworkPorts(&obj.InitContainers) + } + if obj.SecurityContext == nil { + obj.SecurityContext = &v1.PodSecurityContext{} + } + if obj.TerminationGracePeriodSeconds == nil { + period := int64(v1.DefaultTerminationGracePeriodSeconds) + obj.TerminationGracePeriodSeconds = &period + } + if obj.SchedulerName == "" { + obj.SchedulerName = v1.DefaultSchedulerName + } +} +func SetDefaults_Probe(obj *v1.Probe) { + if obj.TimeoutSeconds == 0 { + obj.TimeoutSeconds = 1 + } + if obj.PeriodSeconds == 0 { + obj.PeriodSeconds = 10 + } + if obj.SuccessThreshold == 0 { + obj.SuccessThreshold = 1 + } + if obj.FailureThreshold == 0 { + obj.FailureThreshold = 3 + } +} +func SetDefaults_SecretVolumeSource(obj *v1.SecretVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.SecretVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_ConfigMapVolumeSource(obj *v1.ConfigMapVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.ConfigMapVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_DownwardAPIVolumeSource(obj *v1.DownwardAPIVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.DownwardAPIVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_Secret(obj *v1.Secret) { + if obj.Type == "" { + obj.Type = v1.SecretTypeOpaque + } +} +func SetDefaults_ProjectedVolumeSource(obj *v1.ProjectedVolumeSource) { + if obj.DefaultMode == nil { + perm := int32(v1.ProjectedVolumeSourceDefaultMode) + obj.DefaultMode = &perm + } +} +func SetDefaults_ServiceAccountTokenProjection(obj *v1.ServiceAccountTokenProjection) { + hour := int64(time.Hour.Seconds()) + if obj.ExpirationSeconds == nil { + obj.ExpirationSeconds = &hour + } +} +func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) { + if obj.Status.Phase == "" { + obj.Status.Phase = v1.VolumePending + } + if obj.Spec.PersistentVolumeReclaimPolicy == "" { + obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain + } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } +} +func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) { + if obj.Status.Phase == "" { + obj.Status.Phase = v1.ClaimPending + } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } +} +func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_ISCSIPersistentVolumeSource(obj *v1.ISCSIPersistentVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} +func SetDefaults_AzureDiskVolumeSource(obj *v1.AzureDiskVolumeSource) { + if obj.CachingMode == nil { + obj.CachingMode = new(v1.AzureDataDiskCachingMode) + *obj.CachingMode = v1.AzureDataDiskCachingReadWrite + } + if obj.Kind == nil { + obj.Kind = new(v1.AzureDataDiskKind) + *obj.Kind = v1.AzureSharedBlobDisk + } + if obj.FSType == nil { + obj.FSType = new(string) + *obj.FSType = "ext4" + } + if obj.ReadOnly == nil { + obj.ReadOnly = new(bool) + *obj.ReadOnly = false + } +} +func SetDefaults_Endpoints(obj *v1.Endpoints) { + for i := range obj.Subsets { + ss := &obj.Subsets[i] + for i := range ss.Ports { + ep := &ss.Ports[i] + if ep.Protocol == "" { + ep.Protocol = v1.ProtocolTCP + } + } + } +} +func SetDefaults_HTTPGetAction(obj *v1.HTTPGetAction) { + if obj.Path == "" { + obj.Path = "/" + } + if obj.Scheme == "" { + obj.Scheme = v1.URISchemeHTTP + } +} +func SetDefaults_NamespaceStatus(obj *v1.NamespaceStatus) { + if obj.Phase == "" { + obj.Phase = v1.NamespaceActive + } +} +func SetDefaults_NodeStatus(obj *v1.NodeStatus) { + if obj.Allocatable == nil && obj.Capacity != nil { + obj.Allocatable = make(v1.ResourceList, len(obj.Capacity)) + for key, value := range obj.Capacity { + obj.Allocatable[key] = *(value.Copy()) + } + obj.Allocatable = obj.Capacity + } +} +func SetDefaults_ObjectFieldSelector(obj *v1.ObjectFieldSelector) { + if obj.APIVersion == "" { + obj.APIVersion = "v1" + } +} +func SetDefaults_LimitRangeItem(obj *v1.LimitRangeItem) { + // for container limits, we apply default values + if obj.Type == v1.LimitTypeContainer { + + if obj.Default == nil { + obj.Default = make(v1.ResourceList) + } + if obj.DefaultRequest == nil { + obj.DefaultRequest = make(v1.ResourceList) + } + + // If a default limit is unspecified, but the max is specified, default the limit to the max + for key, value := range obj.Max { + if _, exists := obj.Default[key]; !exists { + obj.Default[key] = *(value.Copy()) + } + } + // If a default limit is specified, but the default request is not, default request to limit + for key, value := range obj.Default { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + // If a default request is not specified, but the min is provided, default request to the min + for key, value := range obj.Min { + if _, exists := obj.DefaultRequest[key]; !exists { + obj.DefaultRequest[key] = *(value.Copy()) + } + } + } +} +func SetDefaults_ConfigMap(obj *v1.ConfigMap) { + if obj.Data == nil { + obj.Data = make(map[string]string) + } +} + +// With host networking default all container ports to host ports. +func defaultHostNetworkPorts(containers *[]v1.Container) { + for i := range *containers { + for j := range (*containers)[i].Ports { + if (*containers)[i].Ports[j].HostPort == 0 { + (*containers)[i].Ports[j].HostPort = (*containers)[i].Ports[j].ContainerPort + } + } + } +} + +func SetDefaults_RBDVolumeSource(obj *v1.RBDVolumeSource) { + if obj.RBDPool == "" { + obj.RBDPool = "rbd" + } + if obj.RadosUser == "" { + obj.RadosUser = "admin" + } + if obj.Keyring == "" { + obj.Keyring = "/etc/ceph/keyring" + } +} + +func SetDefaults_RBDPersistentVolumeSource(obj *v1.RBDPersistentVolumeSource) { + if obj.RBDPool == "" { + obj.RBDPool = "rbd" + } + if obj.RadosUser == "" { + obj.RadosUser = "admin" + } + if obj.Keyring == "" { + obj.Keyring = "/etc/ceph/keyring" + } +} + +func SetDefaults_ScaleIOVolumeSource(obj *v1.ScaleIOVolumeSource) { + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} + +func SetDefaults_ScaleIOPersistentVolumeSource(obj *v1.ScaleIOPersistentVolumeSource) { + if obj.StorageMode == "" { + obj.StorageMode = "ThinProvisioned" + } + if obj.FSType == "" { + obj.FSType = "xfs" + } +} + +func SetDefaults_HostPathVolumeSource(obj *v1.HostPathVolumeSource) { + typeVol := v1.HostPathUnset + if obj.Type == nil { + obj.Type = &typeVol + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go new file mode 100644 index 000000000..454e30183 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/core +// +k8s:conversion-gen-external-types=k8s.io/api/core/v1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/core/v1 + +// Package v1 is the v1 version of the API. +package v1 // import "k8s.io/kubernetes/pkg/apis/core/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go new file mode 100644 index 000000000..fa11a6b36 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -0,0 +1,527 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/kubernetes/pkg/apis/core/helper" +) + +// IsExtendedResourceName returns true if: +// 1. the resource name is not in the default namespace; +// 2. resource name does not have "requests." prefix, +// to avoid confusion with the convention in quota +// 3. it satisfies the rules in IsQualifiedName() after converted into quota resource name +func IsExtendedResourceName(name v1.ResourceName) bool { + if IsNativeResource(name) || strings.HasPrefix(string(name), v1.DefaultResourceRequestsPrefix) { + return false + } + // Ensure it satisfies the rules in IsQualifiedName() after converted into quota resource name + nameForQuota := fmt.Sprintf("%s%s", v1.DefaultResourceRequestsPrefix, string(name)) + if errs := validation.IsQualifiedName(string(nameForQuota)); len(errs) != 0 { + return false + } + return true +} + +// IsPrefixedNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. +func IsPrefixedNativeResource(name v1.ResourceName) bool { + return strings.Contains(string(name), v1.ResourceDefaultNamespacePrefix) +} + +// IsNativeResource returns true if the resource name is in the +// *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are +// implicitly in the kubernetes.io/ namespace. +func IsNativeResource(name v1.ResourceName) bool { + return !strings.Contains(string(name), "/") || + IsPrefixedNativeResource(name) +} + +// IsHugePageResourceName returns true if the resource name has the huge page +// resource prefix. +func IsHugePageResourceName(name v1.ResourceName) bool { + return strings.HasPrefix(string(name), v1.ResourceHugePagesPrefix) +} + +// HugePageResourceName returns a ResourceName with the canonical hugepage +// prefix prepended for the specified page size. The page size is converted +// to its canonical representation. +func HugePageResourceName(pageSize resource.Quantity) v1.ResourceName { + return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceHugePagesPrefix, pageSize.String())) +} + +// HugePageSizeFromResourceName returns the page size for the specified huge page +// resource name. If the specified input is not a valid huge page resource name +// an error is returned. +func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, error) { + if !IsHugePageResourceName(name) { + return resource.Quantity{}, fmt.Errorf("resource name: %s is an invalid hugepage name", name) + } + pageSize := strings.TrimPrefix(string(name), v1.ResourceHugePagesPrefix) + return resource.ParseQuantity(pageSize) +} + +// IsOvercommitAllowed returns true if the resource is in the default +// namespace and is not hugepages. +func IsOvercommitAllowed(name v1.ResourceName) bool { + return IsNativeResource(name) && + !IsHugePageResourceName(name) +} + +func IsAttachableVolumeResourceName(name v1.ResourceName) bool { + return strings.HasPrefix(string(name), v1.ResourceAttachableVolumesPrefix) +} + +// Extended and Hugepages resources +func IsScalarResourceName(name v1.ResourceName) bool { + return IsExtendedResourceName(name) || IsHugePageResourceName(name) || + IsPrefixedNativeResource(name) || IsAttachableVolumeResourceName(name) +} + +// this function aims to check if the service's ClusterIP is set or not +// the objective is not to perform validation here +func IsServiceIPSet(service *v1.Service) bool { + return service.Spec.ClusterIP != v1.ClusterIPNone && service.Spec.ClusterIP != "" +} + +// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, +// only if they do not already exist +func AddToNodeAddresses(addresses *[]v1.NodeAddress, addAddresses ...v1.NodeAddress) { + for _, add := range addAddresses { + exists := false + for _, existing := range *addresses { + if existing.Address == add.Address && existing.Type == add.Type { + exists = true + break + } + } + if !exists { + *addresses = append(*addresses, add) + } + } +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusEqual(l, r *v1.LoadBalancerStatus) bool { + return ingressSliceEqual(l.Ingress, r.Ingress) +} + +func ingressSliceEqual(lhs, rhs []v1.LoadBalancerIngress) bool { + if len(lhs) != len(rhs) { + return false + } + for i := range lhs { + if !ingressEqual(&lhs[i], &rhs[i]) { + return false + } + } + return true +} + +func ingressEqual(lhs, rhs *v1.LoadBalancerIngress) bool { + if lhs.IP != rhs.IP { + return false + } + if lhs.Hostname != rhs.Hostname { + return false + } + return true +} + +// TODO: make method on LoadBalancerStatus? +func LoadBalancerStatusDeepCopy(lb *v1.LoadBalancerStatus) *v1.LoadBalancerStatus { + c := &v1.LoadBalancerStatus{} + c.Ingress = make([]v1.LoadBalancerIngress, len(lb.Ingress)) + for i := range lb.Ingress { + c.Ingress[i] = lb.Ingress[i] + } + return c +} + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []v1.PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, v1.ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, v1.ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, v1.ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []v1.PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []v1.PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, v1.ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, v1.ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, v1.ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []v1.PersistentVolumeAccessMode) []v1.PersistentVolumeAccessMode { + accessModes := []v1.PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// labels.Selector. +func NodeSelectorRequirementsAsSelector(nsm []v1.NodeSelectorRequirement) (labels.Selector, error) { + if len(nsm) == 0 { + return labels.Nothing(), nil + } + selector := labels.NewSelector() + for _, expr := range nsm { + var op selection.Operator + switch expr.Operator { + case v1.NodeSelectorOpIn: + op = selection.In + case v1.NodeSelectorOpNotIn: + op = selection.NotIn + case v1.NodeSelectorOpExists: + op = selection.Exists + case v1.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case v1.NodeSelectorOpGt: + op = selection.GreaterThan + case v1.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// NodeSelectorRequirementsAsFieldSelector converts the []NodeSelectorRequirement core type into a struct that implements +// fields.Selector. +func NodeSelectorRequirementsAsFieldSelector(nsm []v1.NodeSelectorRequirement) (fields.Selector, error) { + if len(nsm) == 0 { + return fields.Nothing(), nil + } + + selectors := []fields.Selector{} + for _, expr := range nsm { + switch expr.Operator { + case v1.NodeSelectorOpIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermEqualSelector(expr.Key, expr.Values[0])) + + case v1.NodeSelectorOpNotIn: + if len(expr.Values) != 1 { + return nil, fmt.Errorf("unexpected number of value (%d) for node field selector operator %q", + len(expr.Values), expr.Operator) + } + selectors = append(selectors, fields.OneTermNotEqualSelector(expr.Key, expr.Values[0])) + + default: + return nil, fmt.Errorf("%q is not a valid node field selector operator", expr.Operator) + } + } + + return fields.AndSelectors(selectors...), nil +} + +// NodeSelectorRequirementKeysExistInNodeSelectorTerms checks if a NodeSelectorTerm with key is already specified in terms +func NodeSelectorRequirementKeysExistInNodeSelectorTerms(reqs []v1.NodeSelectorRequirement, terms []v1.NodeSelectorTerm) bool { + for _, req := range reqs { + for _, term := range terms { + for _, r := range term.MatchExpressions { + if r.Key == req.Key { + return true + } + } + } + } + return false +} + +// MatchNodeSelectorTerms checks whether the node labels and fields match node selector terms in ORed; +// nil or empty term matches no objects. +func MatchNodeSelectorTerms( + nodeSelectorTerms []v1.NodeSelectorTerm, + nodeLabels labels.Set, + nodeFields fields.Set, +) bool { + for _, req := range nodeSelectorTerms { + // nil or empty term selects no objects + if len(req.MatchExpressions) == 0 && len(req.MatchFields) == 0 { + continue + } + + if len(req.MatchExpressions) != 0 { + labelSelector, err := NodeSelectorRequirementsAsSelector(req.MatchExpressions) + if err != nil || !labelSelector.Matches(nodeLabels) { + continue + } + } + + if len(req.MatchFields) != 0 { + fieldSelector, err := NodeSelectorRequirementsAsFieldSelector(req.MatchFields) + if err != nil || !fieldSelector.Matches(nodeFields) { + continue + } + } + + return true + } + + return false +} + +// TopologySelectorRequirementsAsSelector converts the []TopologySelectorLabelRequirement api type into a struct +// that implements labels.Selector. +func TopologySelectorRequirementsAsSelector(tsm []v1.TopologySelectorLabelRequirement) (labels.Selector, error) { + if len(tsm) == 0 { + return labels.Nothing(), nil + } + + selector := labels.NewSelector() + for _, expr := range tsm { + r, err := labels.NewRequirement(expr.Key, selection.In, expr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + + return selector, nil +} + +// MatchTopologySelectorTerms checks whether given labels match topology selector terms in ORed; +// nil or empty term matches no objects; while empty term list matches all objects. +func MatchTopologySelectorTerms(topologySelectorTerms []v1.TopologySelectorTerm, lbls labels.Set) bool { + if len(topologySelectorTerms) == 0 { + // empty term list matches all objects + return true + } + + for _, req := range topologySelectorTerms { + // nil or empty term selects no objects + if len(req.MatchLabelExpressions) == 0 { + continue + } + + labelSelector, err := TopologySelectorRequirementsAsSelector(req.MatchLabelExpressions) + if err != nil || !labelSelector.Matches(lbls) { + continue + } + + return true + } + + return false +} + +// AddOrUpdateTolerationInPodSpec tries to add a toleration to the toleration list in PodSpec. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPodSpec(spec *v1.PodSpec, toleration *v1.Toleration) bool { + podTolerations := spec.Tolerations + + var newTolerations []v1.Toleration + updated := false + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + if helper.Semantic.DeepEqual(toleration, podTolerations[i]) { + return false + } + newTolerations = append(newTolerations, *toleration) + updated = true + continue + } + + newTolerations = append(newTolerations, podTolerations[i]) + } + + if !updated { + newTolerations = append(newTolerations, *toleration) + } + + spec.Tolerations = newTolerations + return true +} + +// AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. +// Returns true if something was updated, false otherwise. +func AddOrUpdateTolerationInPod(pod *v1.Pod, toleration *v1.Toleration) bool { + return AddOrUpdateTolerationInPodSpec(&pod.Spec, toleration) +} + +// TolerationsTolerateTaint checks if taint is tolerated by any of the tolerations. +func TolerationsTolerateTaint(tolerations []v1.Toleration, taint *v1.Taint) bool { + for i := range tolerations { + if tolerations[i].ToleratesTaint(taint) { + return true + } + } + return false +} + +type taintsFilterFunc func(*v1.Taint) bool + +// TolerationsTolerateTaintsWithFilter checks if given tolerations tolerates +// all the taints that apply to the filter in given taint list. +func TolerationsTolerateTaintsWithFilter(tolerations []v1.Toleration, taints []v1.Taint, applyFilter taintsFilterFunc) bool { + if len(taints) == 0 { + return true + } + + for i := range taints { + if applyFilter != nil && !applyFilter(&taints[i]) { + continue + } + + if !TolerationsTolerateTaint(tolerations, &taints[i]) { + return false + } + } + + return true +} + +// Returns true and list of Tolerations matching all Taints if all are tolerated, or false otherwise. +func GetMatchingTolerations(taints []v1.Taint, tolerations []v1.Toleration) (bool, []v1.Toleration) { + if len(taints) == 0 { + return true, []v1.Toleration{} + } + if len(tolerations) == 0 && len(taints) > 0 { + return false, []v1.Toleration{} + } + result := []v1.Toleration{} + for i := range taints { + tolerated := false + for j := range tolerations { + if tolerations[j].ToleratesTaint(&taints[i]) { + result = append(result, tolerations[j]) + tolerated = true + break + } + } + if !tolerated { + return false, []v1.Toleration{} + } + } + return true, result +} + +func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (v1.AvoidPods, error) { + var avoidPods v1.AvoidPods + if len(annotations) > 0 && annotations[v1.PreferAvoidPodsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[v1.PreferAvoidPodsAnnotationKey]), &avoidPods) + if err != nil { + return avoidPods, err + } + } + return avoidPods, nil +} + +// GetPersistentVolumeClass returns StorageClassName. +func GetPersistentVolumeClass(volume *v1.PersistentVolume) string { + // Use beta annotation first + if class, found := volume.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + return volume.Spec.StorageClassName +} + +// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was +// requested, it returns "". +func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { + // Use beta annotation first + if class, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return class + } + + if claim.Spec.StorageClassName != nil { + return *claim.Spec.StorageClassName + } + + return "" +} + +// ScopedResourceSelectorRequirementsAsSelector converts the ScopedResourceSelectorRequirement api type into a struct that implements +// labels.Selector. +func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorRequirement) (labels.Selector, error) { + selector := labels.NewSelector() + var op selection.Operator + switch ssr.Operator { + case v1.ScopeSelectorOpIn: + op = selection.In + case v1.ScopeSelectorOpNotIn: + op = selection.NotIn + case v1.ScopeSelectorOpExists: + op = selection.Exists + case v1.ScopeSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid scope selector operator", ssr.Operator) + } + r, err := labels.NewRequirement(string(ssr.ScopeName), op, ssr.Values) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + return selector, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go new file mode 100644 index 000000000..b446b7ea5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + localSchemeBuilder = &v1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs) +} + +// TODO: remove these global varialbes +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go new file mode 100644 index 000000000..439401425 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -0,0 +1,7595 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + apps "k8s.io/kubernetes/pkg/apis/apps" + core "k8s.io/kubernetes/pkg/apis/core" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1.AWSElasticBlockStoreVolumeSource)(nil), (*core.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(a.(*v1.AWSElasticBlockStoreVolumeSource), b.(*core.AWSElasticBlockStoreVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AWSElasticBlockStoreVolumeSource)(nil), (*v1.AWSElasticBlockStoreVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(a.(*core.AWSElasticBlockStoreVolumeSource), b.(*v1.AWSElasticBlockStoreVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Affinity)(nil), (*core.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Affinity_To_core_Affinity(a.(*v1.Affinity), b.(*core.Affinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Affinity)(nil), (*v1.Affinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Affinity_To_v1_Affinity(a.(*core.Affinity), b.(*v1.Affinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.AttachedVolume)(nil), (*core.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AttachedVolume_To_core_AttachedVolume(a.(*v1.AttachedVolume), b.(*core.AttachedVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AttachedVolume)(nil), (*v1.AttachedVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AttachedVolume_To_v1_AttachedVolume(a.(*core.AttachedVolume), b.(*v1.AttachedVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.AvoidPods)(nil), (*core.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AvoidPods_To_core_AvoidPods(a.(*v1.AvoidPods), b.(*core.AvoidPods), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AvoidPods)(nil), (*v1.AvoidPods)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AvoidPods_To_v1_AvoidPods(a.(*core.AvoidPods), b.(*v1.AvoidPods), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.AzureDiskVolumeSource)(nil), (*core.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(a.(*v1.AzureDiskVolumeSource), b.(*core.AzureDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AzureDiskVolumeSource)(nil), (*v1.AzureDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(a.(*core.AzureDiskVolumeSource), b.(*v1.AzureDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.AzureFilePersistentVolumeSource)(nil), (*core.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(a.(*v1.AzureFilePersistentVolumeSource), b.(*core.AzureFilePersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AzureFilePersistentVolumeSource)(nil), (*v1.AzureFilePersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(a.(*core.AzureFilePersistentVolumeSource), b.(*v1.AzureFilePersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.AzureFileVolumeSource)(nil), (*core.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(a.(*v1.AzureFileVolumeSource), b.(*core.AzureFileVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.AzureFileVolumeSource)(nil), (*v1.AzureFileVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(a.(*core.AzureFileVolumeSource), b.(*v1.AzureFileVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Binding)(nil), (*core.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Binding_To_core_Binding(a.(*v1.Binding), b.(*core.Binding), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Binding)(nil), (*v1.Binding)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Binding_To_v1_Binding(a.(*core.Binding), b.(*v1.Binding), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CSIPersistentVolumeSource)(nil), (*core.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(a.(*v1.CSIPersistentVolumeSource), b.(*core.CSIPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CSIPersistentVolumeSource)(nil), (*v1.CSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(a.(*core.CSIPersistentVolumeSource), b.(*v1.CSIPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Capabilities)(nil), (*core.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Capabilities_To_core_Capabilities(a.(*v1.Capabilities), b.(*core.Capabilities), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Capabilities)(nil), (*v1.Capabilities)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Capabilities_To_v1_Capabilities(a.(*core.Capabilities), b.(*v1.Capabilities), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CephFSPersistentVolumeSource)(nil), (*core.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(a.(*v1.CephFSPersistentVolumeSource), b.(*core.CephFSPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CephFSPersistentVolumeSource)(nil), (*v1.CephFSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(a.(*core.CephFSPersistentVolumeSource), b.(*v1.CephFSPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CephFSVolumeSource)(nil), (*core.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(a.(*v1.CephFSVolumeSource), b.(*core.CephFSVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CephFSVolumeSource)(nil), (*v1.CephFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(a.(*core.CephFSVolumeSource), b.(*v1.CephFSVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CinderPersistentVolumeSource)(nil), (*core.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(a.(*v1.CinderPersistentVolumeSource), b.(*core.CinderPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CinderPersistentVolumeSource)(nil), (*v1.CinderPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(a.(*core.CinderPersistentVolumeSource), b.(*v1.CinderPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.CinderVolumeSource)(nil), (*core.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(a.(*v1.CinderVolumeSource), b.(*core.CinderVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.CinderVolumeSource)(nil), (*v1.CinderVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(a.(*core.CinderVolumeSource), b.(*v1.CinderVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ClientIPConfig)(nil), (*core.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ClientIPConfig_To_core_ClientIPConfig(a.(*v1.ClientIPConfig), b.(*core.ClientIPConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ClientIPConfig)(nil), (*v1.ClientIPConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ClientIPConfig_To_v1_ClientIPConfig(a.(*core.ClientIPConfig), b.(*v1.ClientIPConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ComponentCondition)(nil), (*core.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ComponentCondition_To_core_ComponentCondition(a.(*v1.ComponentCondition), b.(*core.ComponentCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ComponentCondition)(nil), (*v1.ComponentCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ComponentCondition_To_v1_ComponentCondition(a.(*core.ComponentCondition), b.(*v1.ComponentCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ComponentStatus)(nil), (*core.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ComponentStatus_To_core_ComponentStatus(a.(*v1.ComponentStatus), b.(*core.ComponentStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ComponentStatus)(nil), (*v1.ComponentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ComponentStatus_To_v1_ComponentStatus(a.(*core.ComponentStatus), b.(*v1.ComponentStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ComponentStatusList)(nil), (*core.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ComponentStatusList_To_core_ComponentStatusList(a.(*v1.ComponentStatusList), b.(*core.ComponentStatusList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ComponentStatusList)(nil), (*v1.ComponentStatusList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ComponentStatusList_To_v1_ComponentStatusList(a.(*core.ComponentStatusList), b.(*v1.ComponentStatusList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ConfigMap)(nil), (*core.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMap_To_core_ConfigMap(a.(*v1.ConfigMap), b.(*core.ConfigMap), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ConfigMap)(nil), (*v1.ConfigMap)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMap_To_v1_ConfigMap(a.(*core.ConfigMap), b.(*v1.ConfigMap), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ConfigMapEnvSource)(nil), (*core.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(a.(*v1.ConfigMapEnvSource), b.(*core.ConfigMapEnvSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ConfigMapEnvSource)(nil), (*v1.ConfigMapEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(a.(*core.ConfigMapEnvSource), b.(*v1.ConfigMapEnvSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ConfigMapKeySelector)(nil), (*core.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(a.(*v1.ConfigMapKeySelector), b.(*core.ConfigMapKeySelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ConfigMapKeySelector)(nil), (*v1.ConfigMapKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(a.(*core.ConfigMapKeySelector), b.(*v1.ConfigMapKeySelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ConfigMapList)(nil), (*core.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapList_To_core_ConfigMapList(a.(*v1.ConfigMapList), b.(*core.ConfigMapList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ConfigMapList)(nil), (*v1.ConfigMapList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapList_To_v1_ConfigMapList(a.(*core.ConfigMapList), b.(*v1.ConfigMapList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ConfigMapNodeConfigSource)(nil), (*core.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(a.(*v1.ConfigMapNodeConfigSource), b.(*core.ConfigMapNodeConfigSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ConfigMapNodeConfigSource)(nil), (*v1.ConfigMapNodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(a.(*core.ConfigMapNodeConfigSource), b.(*v1.ConfigMapNodeConfigSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ConfigMapProjection)(nil), (*core.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(a.(*v1.ConfigMapProjection), b.(*core.ConfigMapProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ConfigMapProjection)(nil), (*v1.ConfigMapProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(a.(*core.ConfigMapProjection), b.(*v1.ConfigMapProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ConfigMapVolumeSource)(nil), (*core.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(a.(*v1.ConfigMapVolumeSource), b.(*core.ConfigMapVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ConfigMapVolumeSource)(nil), (*v1.ConfigMapVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(a.(*core.ConfigMapVolumeSource), b.(*v1.ConfigMapVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Container)(nil), (*core.Container)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Container_To_core_Container(a.(*v1.Container), b.(*core.Container), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Container)(nil), (*v1.Container)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Container_To_v1_Container(a.(*core.Container), b.(*v1.Container), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ContainerImage)(nil), (*core.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerImage_To_core_ContainerImage(a.(*v1.ContainerImage), b.(*core.ContainerImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerImage)(nil), (*v1.ContainerImage)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerImage_To_v1_ContainerImage(a.(*core.ContainerImage), b.(*v1.ContainerImage), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ContainerPort)(nil), (*core.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerPort_To_core_ContainerPort(a.(*v1.ContainerPort), b.(*core.ContainerPort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerPort)(nil), (*v1.ContainerPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerPort_To_v1_ContainerPort(a.(*core.ContainerPort), b.(*v1.ContainerPort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ContainerState)(nil), (*core.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerState_To_core_ContainerState(a.(*v1.ContainerState), b.(*core.ContainerState), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerState)(nil), (*v1.ContainerState)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerState_To_v1_ContainerState(a.(*core.ContainerState), b.(*v1.ContainerState), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ContainerStateRunning)(nil), (*core.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(a.(*v1.ContainerStateRunning), b.(*core.ContainerStateRunning), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerStateRunning)(nil), (*v1.ContainerStateRunning)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(a.(*core.ContainerStateRunning), b.(*v1.ContainerStateRunning), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ContainerStateTerminated)(nil), (*core.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(a.(*v1.ContainerStateTerminated), b.(*core.ContainerStateTerminated), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerStateTerminated)(nil), (*v1.ContainerStateTerminated)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(a.(*core.ContainerStateTerminated), b.(*v1.ContainerStateTerminated), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ContainerStateWaiting)(nil), (*core.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(a.(*v1.ContainerStateWaiting), b.(*core.ContainerStateWaiting), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerStateWaiting)(nil), (*v1.ContainerStateWaiting)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(a.(*core.ContainerStateWaiting), b.(*v1.ContainerStateWaiting), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ContainerStatus)(nil), (*core.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ContainerStatus_To_core_ContainerStatus(a.(*v1.ContainerStatus), b.(*core.ContainerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ContainerStatus)(nil), (*v1.ContainerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ContainerStatus_To_v1_ContainerStatus(a.(*core.ContainerStatus), b.(*v1.ContainerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.DaemonEndpoint)(nil), (*core.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(a.(*v1.DaemonEndpoint), b.(*core.DaemonEndpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DaemonEndpoint)(nil), (*v1.DaemonEndpoint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(a.(*core.DaemonEndpoint), b.(*v1.DaemonEndpoint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.DownwardAPIProjection)(nil), (*core.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(a.(*v1.DownwardAPIProjection), b.(*core.DownwardAPIProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DownwardAPIProjection)(nil), (*v1.DownwardAPIProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(a.(*core.DownwardAPIProjection), b.(*v1.DownwardAPIProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.DownwardAPIVolumeFile)(nil), (*core.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(a.(*v1.DownwardAPIVolumeFile), b.(*core.DownwardAPIVolumeFile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeFile)(nil), (*v1.DownwardAPIVolumeFile)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(a.(*core.DownwardAPIVolumeFile), b.(*v1.DownwardAPIVolumeFile), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.DownwardAPIVolumeSource)(nil), (*core.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(a.(*v1.DownwardAPIVolumeSource), b.(*core.DownwardAPIVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.DownwardAPIVolumeSource)(nil), (*v1.DownwardAPIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(a.(*core.DownwardAPIVolumeSource), b.(*v1.DownwardAPIVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EmptyDirVolumeSource)(nil), (*core.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(a.(*v1.EmptyDirVolumeSource), b.(*core.EmptyDirVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EmptyDirVolumeSource)(nil), (*v1.EmptyDirVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(a.(*core.EmptyDirVolumeSource), b.(*v1.EmptyDirVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EndpointAddress)(nil), (*core.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointAddress_To_core_EndpointAddress(a.(*v1.EndpointAddress), b.(*core.EndpointAddress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EndpointAddress)(nil), (*v1.EndpointAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointAddress_To_v1_EndpointAddress(a.(*core.EndpointAddress), b.(*v1.EndpointAddress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EndpointPort)(nil), (*core.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointPort_To_core_EndpointPort(a.(*v1.EndpointPort), b.(*core.EndpointPort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EndpointPort)(nil), (*v1.EndpointPort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointPort_To_v1_EndpointPort(a.(*core.EndpointPort), b.(*v1.EndpointPort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EndpointSubset)(nil), (*core.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointSubset_To_core_EndpointSubset(a.(*v1.EndpointSubset), b.(*core.EndpointSubset), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EndpointSubset)(nil), (*v1.EndpointSubset)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointSubset_To_v1_EndpointSubset(a.(*core.EndpointSubset), b.(*v1.EndpointSubset), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Endpoints)(nil), (*core.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Endpoints_To_core_Endpoints(a.(*v1.Endpoints), b.(*core.Endpoints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Endpoints)(nil), (*v1.Endpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Endpoints_To_v1_Endpoints(a.(*core.Endpoints), b.(*v1.Endpoints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EndpointsList)(nil), (*core.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EndpointsList_To_core_EndpointsList(a.(*v1.EndpointsList), b.(*core.EndpointsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EndpointsList)(nil), (*v1.EndpointsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EndpointsList_To_v1_EndpointsList(a.(*core.EndpointsList), b.(*v1.EndpointsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EnvFromSource)(nil), (*core.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EnvFromSource_To_core_EnvFromSource(a.(*v1.EnvFromSource), b.(*core.EnvFromSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EnvFromSource)(nil), (*v1.EnvFromSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EnvFromSource_To_v1_EnvFromSource(a.(*core.EnvFromSource), b.(*v1.EnvFromSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EnvVar)(nil), (*core.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EnvVar_To_core_EnvVar(a.(*v1.EnvVar), b.(*core.EnvVar), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EnvVar)(nil), (*v1.EnvVar)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EnvVar_To_v1_EnvVar(a.(*core.EnvVar), b.(*v1.EnvVar), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EnvVarSource)(nil), (*core.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EnvVarSource_To_core_EnvVarSource(a.(*v1.EnvVarSource), b.(*core.EnvVarSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EnvVarSource)(nil), (*v1.EnvVarSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EnvVarSource_To_v1_EnvVarSource(a.(*core.EnvVarSource), b.(*v1.EnvVarSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Event_To_core_Event(a.(*v1.Event), b.(*core.Event), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Event)(nil), (*v1.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Event_To_v1_Event(a.(*core.Event), b.(*v1.Event), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EventList)(nil), (*core.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventList_To_core_EventList(a.(*v1.EventList), b.(*core.EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EventList)(nil), (*v1.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EventList_To_v1_EventList(a.(*core.EventList), b.(*v1.EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EventSeries)(nil), (*core.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventSeries_To_core_EventSeries(a.(*v1.EventSeries), b.(*core.EventSeries), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EventSeries)(nil), (*v1.EventSeries)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EventSeries_To_v1_EventSeries(a.(*core.EventSeries), b.(*v1.EventSeries), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.EventSource)(nil), (*core.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventSource_To_core_EventSource(a.(*v1.EventSource), b.(*core.EventSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.EventSource)(nil), (*v1.EventSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_EventSource_To_v1_EventSource(a.(*core.EventSource), b.(*v1.EventSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ExecAction)(nil), (*core.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ExecAction_To_core_ExecAction(a.(*v1.ExecAction), b.(*core.ExecAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ExecAction)(nil), (*v1.ExecAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ExecAction_To_v1_ExecAction(a.(*core.ExecAction), b.(*v1.ExecAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.FCVolumeSource)(nil), (*core.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FCVolumeSource_To_core_FCVolumeSource(a.(*v1.FCVolumeSource), b.(*core.FCVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.FCVolumeSource)(nil), (*v1.FCVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FCVolumeSource_To_v1_FCVolumeSource(a.(*core.FCVolumeSource), b.(*v1.FCVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.FlexPersistentVolumeSource)(nil), (*core.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(a.(*v1.FlexPersistentVolumeSource), b.(*core.FlexPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.FlexPersistentVolumeSource)(nil), (*v1.FlexPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(a.(*core.FlexPersistentVolumeSource), b.(*v1.FlexPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.FlexVolumeSource)(nil), (*core.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(a.(*v1.FlexVolumeSource), b.(*core.FlexVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.FlexVolumeSource)(nil), (*v1.FlexVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(a.(*core.FlexVolumeSource), b.(*v1.FlexVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.FlockerVolumeSource)(nil), (*core.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(a.(*v1.FlockerVolumeSource), b.(*core.FlockerVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.FlockerVolumeSource)(nil), (*v1.FlockerVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(a.(*core.FlockerVolumeSource), b.(*v1.FlockerVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.GCEPersistentDiskVolumeSource)(nil), (*core.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(a.(*v1.GCEPersistentDiskVolumeSource), b.(*core.GCEPersistentDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.GCEPersistentDiskVolumeSource)(nil), (*v1.GCEPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(a.(*core.GCEPersistentDiskVolumeSource), b.(*v1.GCEPersistentDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.GitRepoVolumeSource)(nil), (*core.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(a.(*v1.GitRepoVolumeSource), b.(*core.GitRepoVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.GitRepoVolumeSource)(nil), (*v1.GitRepoVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(a.(*core.GitRepoVolumeSource), b.(*v1.GitRepoVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.GlusterfsPersistentVolumeSource)(nil), (*core.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(a.(*v1.GlusterfsPersistentVolumeSource), b.(*core.GlusterfsPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.GlusterfsPersistentVolumeSource)(nil), (*v1.GlusterfsPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(a.(*core.GlusterfsPersistentVolumeSource), b.(*v1.GlusterfsPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.GlusterfsVolumeSource)(nil), (*core.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(a.(*v1.GlusterfsVolumeSource), b.(*core.GlusterfsVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.GlusterfsVolumeSource)(nil), (*v1.GlusterfsVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(a.(*core.GlusterfsVolumeSource), b.(*v1.GlusterfsVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.HTTPGetAction)(nil), (*core.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HTTPGetAction_To_core_HTTPGetAction(a.(*v1.HTTPGetAction), b.(*core.HTTPGetAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HTTPGetAction)(nil), (*v1.HTTPGetAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HTTPGetAction_To_v1_HTTPGetAction(a.(*core.HTTPGetAction), b.(*v1.HTTPGetAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.HTTPHeader)(nil), (*core.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HTTPHeader_To_core_HTTPHeader(a.(*v1.HTTPHeader), b.(*core.HTTPHeader), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HTTPHeader)(nil), (*v1.HTTPHeader)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HTTPHeader_To_v1_HTTPHeader(a.(*core.HTTPHeader), b.(*v1.HTTPHeader), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Handler)(nil), (*core.Handler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Handler_To_core_Handler(a.(*v1.Handler), b.(*core.Handler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Handler)(nil), (*v1.Handler)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Handler_To_v1_Handler(a.(*core.Handler), b.(*v1.Handler), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.HostAlias)(nil), (*core.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HostAlias_To_core_HostAlias(a.(*v1.HostAlias), b.(*core.HostAlias), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HostAlias)(nil), (*v1.HostAlias)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HostAlias_To_v1_HostAlias(a.(*core.HostAlias), b.(*v1.HostAlias), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.HostPathVolumeSource)(nil), (*core.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(a.(*v1.HostPathVolumeSource), b.(*core.HostPathVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.HostPathVolumeSource)(nil), (*v1.HostPathVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(a.(*core.HostPathVolumeSource), b.(*v1.HostPathVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ISCSIPersistentVolumeSource)(nil), (*core.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(a.(*v1.ISCSIPersistentVolumeSource), b.(*core.ISCSIPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ISCSIPersistentVolumeSource)(nil), (*v1.ISCSIPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(a.(*core.ISCSIPersistentVolumeSource), b.(*v1.ISCSIPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ISCSIVolumeSource)(nil), (*core.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(a.(*v1.ISCSIVolumeSource), b.(*core.ISCSIVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ISCSIVolumeSource)(nil), (*v1.ISCSIVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(a.(*core.ISCSIVolumeSource), b.(*v1.ISCSIVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.KeyToPath)(nil), (*core.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_KeyToPath_To_core_KeyToPath(a.(*v1.KeyToPath), b.(*core.KeyToPath), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.KeyToPath)(nil), (*v1.KeyToPath)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_KeyToPath_To_v1_KeyToPath(a.(*core.KeyToPath), b.(*v1.KeyToPath), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Lifecycle)(nil), (*core.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Lifecycle_To_core_Lifecycle(a.(*v1.Lifecycle), b.(*core.Lifecycle), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Lifecycle)(nil), (*v1.Lifecycle)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Lifecycle_To_v1_Lifecycle(a.(*core.Lifecycle), b.(*v1.Lifecycle), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LimitRange)(nil), (*core.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRange_To_core_LimitRange(a.(*v1.LimitRange), b.(*core.LimitRange), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LimitRange)(nil), (*v1.LimitRange)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRange_To_v1_LimitRange(a.(*core.LimitRange), b.(*v1.LimitRange), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LimitRangeItem)(nil), (*core.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRangeItem_To_core_LimitRangeItem(a.(*v1.LimitRangeItem), b.(*core.LimitRangeItem), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LimitRangeItem)(nil), (*v1.LimitRangeItem)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRangeItem_To_v1_LimitRangeItem(a.(*core.LimitRangeItem), b.(*v1.LimitRangeItem), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LimitRangeList)(nil), (*core.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRangeList_To_core_LimitRangeList(a.(*v1.LimitRangeList), b.(*core.LimitRangeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LimitRangeList)(nil), (*v1.LimitRangeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRangeList_To_v1_LimitRangeList(a.(*core.LimitRangeList), b.(*v1.LimitRangeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LimitRangeSpec)(nil), (*core.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(a.(*v1.LimitRangeSpec), b.(*core.LimitRangeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LimitRangeSpec)(nil), (*v1.LimitRangeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(a.(*core.LimitRangeSpec), b.(*v1.LimitRangeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*core.List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_List_To_core_List(a.(*v1.List), b.(*core.List), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.List)(nil), (*v1.List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_List_To_v1_List(a.(*core.List), b.(*v1.List), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LoadBalancerIngress)(nil), (*core.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(a.(*v1.LoadBalancerIngress), b.(*core.LoadBalancerIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LoadBalancerIngress)(nil), (*v1.LoadBalancerIngress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(a.(*core.LoadBalancerIngress), b.(*v1.LoadBalancerIngress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LoadBalancerStatus)(nil), (*core.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(a.(*v1.LoadBalancerStatus), b.(*core.LoadBalancerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LoadBalancerStatus)(nil), (*v1.LoadBalancerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(a.(*core.LoadBalancerStatus), b.(*v1.LoadBalancerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LocalObjectReference)(nil), (*core.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LocalObjectReference_To_core_LocalObjectReference(a.(*v1.LocalObjectReference), b.(*core.LocalObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LocalObjectReference)(nil), (*v1.LocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LocalObjectReference_To_v1_LocalObjectReference(a.(*core.LocalObjectReference), b.(*v1.LocalObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.LocalVolumeSource)(nil), (*core.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(a.(*v1.LocalVolumeSource), b.(*core.LocalVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.LocalVolumeSource)(nil), (*v1.LocalVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(a.(*core.LocalVolumeSource), b.(*v1.LocalVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NFSVolumeSource)(nil), (*core.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(a.(*v1.NFSVolumeSource), b.(*core.NFSVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NFSVolumeSource)(nil), (*v1.NFSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(a.(*core.NFSVolumeSource), b.(*v1.NFSVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Namespace)(nil), (*core.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Namespace_To_core_Namespace(a.(*v1.Namespace), b.(*core.Namespace), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Namespace)(nil), (*v1.Namespace)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Namespace_To_v1_Namespace(a.(*core.Namespace), b.(*v1.Namespace), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NamespaceList)(nil), (*core.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NamespaceList_To_core_NamespaceList(a.(*v1.NamespaceList), b.(*core.NamespaceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NamespaceList)(nil), (*v1.NamespaceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamespaceList_To_v1_NamespaceList(a.(*core.NamespaceList), b.(*v1.NamespaceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NamespaceSpec)(nil), (*core.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NamespaceSpec_To_core_NamespaceSpec(a.(*v1.NamespaceSpec), b.(*core.NamespaceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NamespaceSpec)(nil), (*v1.NamespaceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamespaceSpec_To_v1_NamespaceSpec(a.(*core.NamespaceSpec), b.(*v1.NamespaceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NamespaceStatus)(nil), (*core.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NamespaceStatus_To_core_NamespaceStatus(a.(*v1.NamespaceStatus), b.(*core.NamespaceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NamespaceStatus)(nil), (*v1.NamespaceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NamespaceStatus_To_v1_NamespaceStatus(a.(*core.NamespaceStatus), b.(*v1.NamespaceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Node)(nil), (*core.Node)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Node_To_core_Node(a.(*v1.Node), b.(*core.Node), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Node)(nil), (*v1.Node)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Node_To_v1_Node(a.(*core.Node), b.(*v1.Node), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeAddress)(nil), (*core.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeAddress_To_core_NodeAddress(a.(*v1.NodeAddress), b.(*core.NodeAddress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeAddress)(nil), (*v1.NodeAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeAddress_To_v1_NodeAddress(a.(*core.NodeAddress), b.(*v1.NodeAddress), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeAffinity)(nil), (*core.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeAffinity_To_core_NodeAffinity(a.(*v1.NodeAffinity), b.(*core.NodeAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeAffinity)(nil), (*v1.NodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeAffinity_To_v1_NodeAffinity(a.(*core.NodeAffinity), b.(*v1.NodeAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeCondition)(nil), (*core.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeCondition_To_core_NodeCondition(a.(*v1.NodeCondition), b.(*core.NodeCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeCondition)(nil), (*v1.NodeCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeCondition_To_v1_NodeCondition(a.(*core.NodeCondition), b.(*v1.NodeCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeConfigSource)(nil), (*core.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeConfigSource_To_core_NodeConfigSource(a.(*v1.NodeConfigSource), b.(*core.NodeConfigSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeConfigSource)(nil), (*v1.NodeConfigSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeConfigSource_To_v1_NodeConfigSource(a.(*core.NodeConfigSource), b.(*v1.NodeConfigSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeConfigStatus)(nil), (*core.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(a.(*v1.NodeConfigStatus), b.(*core.NodeConfigStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeConfigStatus)(nil), (*v1.NodeConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(a.(*core.NodeConfigStatus), b.(*v1.NodeConfigStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeDaemonEndpoints)(nil), (*core.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(a.(*v1.NodeDaemonEndpoints), b.(*core.NodeDaemonEndpoints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeDaemonEndpoints)(nil), (*v1.NodeDaemonEndpoints)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(a.(*core.NodeDaemonEndpoints), b.(*v1.NodeDaemonEndpoints), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeList)(nil), (*core.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeList_To_core_NodeList(a.(*v1.NodeList), b.(*core.NodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeList)(nil), (*v1.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeList_To_v1_NodeList(a.(*core.NodeList), b.(*v1.NodeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeProxyOptions)(nil), (*core.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(a.(*v1.NodeProxyOptions), b.(*core.NodeProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeProxyOptions)(nil), (*v1.NodeProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(a.(*core.NodeProxyOptions), b.(*v1.NodeProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeResources)(nil), (*core.NodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeResources_To_core_NodeResources(a.(*v1.NodeResources), b.(*core.NodeResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeResources)(nil), (*v1.NodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeResources_To_v1_NodeResources(a.(*core.NodeResources), b.(*v1.NodeResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeSelector)(nil), (*core.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSelector_To_core_NodeSelector(a.(*v1.NodeSelector), b.(*core.NodeSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeSelector)(nil), (*v1.NodeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSelector_To_v1_NodeSelector(a.(*core.NodeSelector), b.(*v1.NodeSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeSelectorRequirement)(nil), (*core.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(a.(*v1.NodeSelectorRequirement), b.(*core.NodeSelectorRequirement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeSelectorRequirement)(nil), (*v1.NodeSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(a.(*core.NodeSelectorRequirement), b.(*v1.NodeSelectorRequirement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeSelectorTerm)(nil), (*core.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(a.(*v1.NodeSelectorTerm), b.(*core.NodeSelectorTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeSelectorTerm)(nil), (*v1.NodeSelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(a.(*core.NodeSelectorTerm), b.(*v1.NodeSelectorTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeSpec)(nil), (*core.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSpec_To_core_NodeSpec(a.(*v1.NodeSpec), b.(*core.NodeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeSpec)(nil), (*v1.NodeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSpec_To_v1_NodeSpec(a.(*core.NodeSpec), b.(*v1.NodeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeStatus)(nil), (*core.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeStatus_To_core_NodeStatus(a.(*v1.NodeStatus), b.(*core.NodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeStatus)(nil), (*v1.NodeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeStatus_To_v1_NodeStatus(a.(*core.NodeStatus), b.(*v1.NodeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.NodeSystemInfo)(nil), (*core.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(a.(*v1.NodeSystemInfo), b.(*core.NodeSystemInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.NodeSystemInfo)(nil), (*v1.NodeSystemInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(a.(*core.NodeSystemInfo), b.(*v1.NodeSystemInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ObjectFieldSelector)(nil), (*core.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(a.(*v1.ObjectFieldSelector), b.(*core.ObjectFieldSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ObjectFieldSelector)(nil), (*v1.ObjectFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(a.(*core.ObjectFieldSelector), b.(*v1.ObjectFieldSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ObjectReference)(nil), (*core.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ObjectReference_To_core_ObjectReference(a.(*v1.ObjectReference), b.(*core.ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ObjectReference)(nil), (*v1.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ObjectReference_To_v1_ObjectReference(a.(*core.ObjectReference), b.(*v1.ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolume)(nil), (*core.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolume_To_core_PersistentVolume(a.(*v1.PersistentVolume), b.(*core.PersistentVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolume)(nil), (*v1.PersistentVolume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolume_To_v1_PersistentVolume(a.(*core.PersistentVolume), b.(*v1.PersistentVolume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaim)(nil), (*core.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(a.(*v1.PersistentVolumeClaim), b.(*core.PersistentVolumeClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaim)(nil), (*v1.PersistentVolumeClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(a.(*core.PersistentVolumeClaim), b.(*v1.PersistentVolumeClaim), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimCondition)(nil), (*core.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(a.(*v1.PersistentVolumeClaimCondition), b.(*core.PersistentVolumeClaimCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimCondition)(nil), (*v1.PersistentVolumeClaimCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(a.(*core.PersistentVolumeClaimCondition), b.(*v1.PersistentVolumeClaimCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimList)(nil), (*core.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(a.(*v1.PersistentVolumeClaimList), b.(*core.PersistentVolumeClaimList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimList)(nil), (*v1.PersistentVolumeClaimList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(a.(*core.PersistentVolumeClaimList), b.(*v1.PersistentVolumeClaimList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimSpec)(nil), (*core.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(a.(*v1.PersistentVolumeClaimSpec), b.(*core.PersistentVolumeClaimSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimSpec)(nil), (*v1.PersistentVolumeClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(a.(*core.PersistentVolumeClaimSpec), b.(*v1.PersistentVolumeClaimSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimStatus)(nil), (*core.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(a.(*v1.PersistentVolumeClaimStatus), b.(*core.PersistentVolumeClaimStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimStatus)(nil), (*v1.PersistentVolumeClaimStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(a.(*core.PersistentVolumeClaimStatus), b.(*v1.PersistentVolumeClaimStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimVolumeSource)(nil), (*core.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(a.(*v1.PersistentVolumeClaimVolumeSource), b.(*core.PersistentVolumeClaimVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimVolumeSource)(nil), (*v1.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(a.(*core.PersistentVolumeClaimVolumeSource), b.(*v1.PersistentVolumeClaimVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeList)(nil), (*core.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(a.(*v1.PersistentVolumeList), b.(*core.PersistentVolumeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeList)(nil), (*v1.PersistentVolumeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(a.(*core.PersistentVolumeList), b.(*v1.PersistentVolumeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeSource)(nil), (*core.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(a.(*v1.PersistentVolumeSource), b.(*core.PersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeSource)(nil), (*v1.PersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(a.(*core.PersistentVolumeSource), b.(*v1.PersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeSpec)(nil), (*core.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(a.(*v1.PersistentVolumeSpec), b.(*core.PersistentVolumeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeSpec)(nil), (*v1.PersistentVolumeSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(a.(*core.PersistentVolumeSpec), b.(*v1.PersistentVolumeSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeStatus)(nil), (*core.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(a.(*v1.PersistentVolumeStatus), b.(*core.PersistentVolumeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeStatus)(nil), (*v1.PersistentVolumeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(a.(*core.PersistentVolumeStatus), b.(*v1.PersistentVolumeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PhotonPersistentDiskVolumeSource)(nil), (*core.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(a.(*v1.PhotonPersistentDiskVolumeSource), b.(*core.PhotonPersistentDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PhotonPersistentDiskVolumeSource)(nil), (*v1.PhotonPersistentDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(a.(*core.PhotonPersistentDiskVolumeSource), b.(*v1.PhotonPersistentDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Pod)(nil), (*core.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Pod_To_core_Pod(a.(*v1.Pod), b.(*core.Pod), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Pod)(nil), (*v1.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Pod_To_v1_Pod(a.(*core.Pod), b.(*v1.Pod), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodAffinity)(nil), (*core.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAffinity_To_core_PodAffinity(a.(*v1.PodAffinity), b.(*core.PodAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodAffinity)(nil), (*v1.PodAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAffinity_To_v1_PodAffinity(a.(*core.PodAffinity), b.(*v1.PodAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodAffinityTerm)(nil), (*core.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(a.(*v1.PodAffinityTerm), b.(*core.PodAffinityTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodAffinityTerm)(nil), (*v1.PodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(a.(*core.PodAffinityTerm), b.(*v1.PodAffinityTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodAntiAffinity)(nil), (*core.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(a.(*v1.PodAntiAffinity), b.(*core.PodAntiAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodAntiAffinity)(nil), (*v1.PodAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(a.(*core.PodAntiAffinity), b.(*v1.PodAntiAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodAttachOptions)(nil), (*core.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodAttachOptions_To_core_PodAttachOptions(a.(*v1.PodAttachOptions), b.(*core.PodAttachOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodAttachOptions)(nil), (*v1.PodAttachOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodAttachOptions_To_v1_PodAttachOptions(a.(*core.PodAttachOptions), b.(*v1.PodAttachOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodCondition)(nil), (*core.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodCondition_To_core_PodCondition(a.(*v1.PodCondition), b.(*core.PodCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodCondition)(nil), (*v1.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodCondition_To_v1_PodCondition(a.(*core.PodCondition), b.(*v1.PodCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodDNSConfig)(nil), (*core.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodDNSConfig_To_core_PodDNSConfig(a.(*v1.PodDNSConfig), b.(*core.PodDNSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodDNSConfig)(nil), (*v1.PodDNSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodDNSConfig_To_v1_PodDNSConfig(a.(*core.PodDNSConfig), b.(*v1.PodDNSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodDNSConfigOption)(nil), (*core.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(a.(*v1.PodDNSConfigOption), b.(*core.PodDNSConfigOption), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodDNSConfigOption)(nil), (*v1.PodDNSConfigOption)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(a.(*core.PodDNSConfigOption), b.(*v1.PodDNSConfigOption), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodExecOptions)(nil), (*core.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodExecOptions_To_core_PodExecOptions(a.(*v1.PodExecOptions), b.(*core.PodExecOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodExecOptions)(nil), (*v1.PodExecOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodExecOptions_To_v1_PodExecOptions(a.(*core.PodExecOptions), b.(*v1.PodExecOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodList)(nil), (*core.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodList_To_core_PodList(a.(*v1.PodList), b.(*core.PodList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodList)(nil), (*v1.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodList_To_v1_PodList(a.(*core.PodList), b.(*v1.PodList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodLogOptions)(nil), (*core.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodLogOptions_To_core_PodLogOptions(a.(*v1.PodLogOptions), b.(*core.PodLogOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodLogOptions)(nil), (*v1.PodLogOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodLogOptions_To_v1_PodLogOptions(a.(*core.PodLogOptions), b.(*v1.PodLogOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodPortForwardOptions)(nil), (*core.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(a.(*v1.PodPortForwardOptions), b.(*core.PodPortForwardOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodPortForwardOptions)(nil), (*v1.PodPortForwardOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(a.(*core.PodPortForwardOptions), b.(*v1.PodPortForwardOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodProxyOptions)(nil), (*core.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodProxyOptions_To_core_PodProxyOptions(a.(*v1.PodProxyOptions), b.(*core.PodProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodProxyOptions)(nil), (*v1.PodProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodProxyOptions_To_v1_PodProxyOptions(a.(*core.PodProxyOptions), b.(*v1.PodProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodReadinessGate)(nil), (*core.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodReadinessGate_To_core_PodReadinessGate(a.(*v1.PodReadinessGate), b.(*core.PodReadinessGate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodReadinessGate)(nil), (*v1.PodReadinessGate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodReadinessGate_To_v1_PodReadinessGate(a.(*core.PodReadinessGate), b.(*v1.PodReadinessGate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*v1.PodSecurityContext), b.(*core.PodSecurityContext), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodSecurityContext)(nil), (*v1.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSecurityContext_To_v1_PodSecurityContext(a.(*core.PodSecurityContext), b.(*v1.PodSecurityContext), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodSignature)(nil), (*core.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSignature_To_core_PodSignature(a.(*v1.PodSignature), b.(*core.PodSignature), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodSignature)(nil), (*v1.PodSignature)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSignature_To_v1_PodSignature(a.(*core.PodSignature), b.(*v1.PodSignature), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodSpec)(nil), (*core.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSpec_To_core_PodSpec(a.(*v1.PodSpec), b.(*core.PodSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodSpec)(nil), (*v1.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSpec_To_v1_PodSpec(a.(*core.PodSpec), b.(*v1.PodSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodStatus)(nil), (*core.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodStatus_To_core_PodStatus(a.(*v1.PodStatus), b.(*core.PodStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodStatus)(nil), (*v1.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodStatus_To_v1_PodStatus(a.(*core.PodStatus), b.(*v1.PodStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodStatusResult)(nil), (*core.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodStatusResult_To_core_PodStatusResult(a.(*v1.PodStatusResult), b.(*core.PodStatusResult), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodStatusResult)(nil), (*v1.PodStatusResult)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodStatusResult_To_v1_PodStatusResult(a.(*core.PodStatusResult), b.(*v1.PodStatusResult), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodTemplate)(nil), (*core.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodTemplate_To_core_PodTemplate(a.(*v1.PodTemplate), b.(*core.PodTemplate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodTemplate)(nil), (*v1.PodTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodTemplate_To_v1_PodTemplate(a.(*core.PodTemplate), b.(*v1.PodTemplate), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodTemplateList)(nil), (*core.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodTemplateList_To_core_PodTemplateList(a.(*v1.PodTemplateList), b.(*core.PodTemplateList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodTemplateList)(nil), (*v1.PodTemplateList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodTemplateList_To_v1_PodTemplateList(a.(*core.PodTemplateList), b.(*v1.PodTemplateList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PodTemplateSpec)(nil), (*core.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(a.(*v1.PodTemplateSpec), b.(*core.PodTemplateSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PodTemplateSpec)(nil), (*v1.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(a.(*core.PodTemplateSpec), b.(*v1.PodTemplateSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PortworxVolumeSource)(nil), (*core.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(a.(*v1.PortworxVolumeSource), b.(*core.PortworxVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PortworxVolumeSource)(nil), (*v1.PortworxVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(a.(*core.PortworxVolumeSource), b.(*v1.PortworxVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Preconditions)(nil), (*core.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Preconditions_To_core_Preconditions(a.(*v1.Preconditions), b.(*core.Preconditions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Preconditions)(nil), (*v1.Preconditions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Preconditions_To_v1_Preconditions(a.(*core.Preconditions), b.(*v1.Preconditions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PreferAvoidPodsEntry)(nil), (*core.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(a.(*v1.PreferAvoidPodsEntry), b.(*core.PreferAvoidPodsEntry), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PreferAvoidPodsEntry)(nil), (*v1.PreferAvoidPodsEntry)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(a.(*core.PreferAvoidPodsEntry), b.(*v1.PreferAvoidPodsEntry), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PreferredSchedulingTerm)(nil), (*core.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(a.(*v1.PreferredSchedulingTerm), b.(*core.PreferredSchedulingTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.PreferredSchedulingTerm)(nil), (*v1.PreferredSchedulingTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(a.(*core.PreferredSchedulingTerm), b.(*v1.PreferredSchedulingTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Probe)(nil), (*core.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Probe_To_core_Probe(a.(*v1.Probe), b.(*core.Probe), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Probe)(nil), (*v1.Probe)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Probe_To_v1_Probe(a.(*core.Probe), b.(*v1.Probe), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ProjectedVolumeSource)(nil), (*core.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(a.(*v1.ProjectedVolumeSource), b.(*core.ProjectedVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ProjectedVolumeSource)(nil), (*v1.ProjectedVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(a.(*core.ProjectedVolumeSource), b.(*v1.ProjectedVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.QuobyteVolumeSource)(nil), (*core.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(a.(*v1.QuobyteVolumeSource), b.(*core.QuobyteVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.QuobyteVolumeSource)(nil), (*v1.QuobyteVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(a.(*core.QuobyteVolumeSource), b.(*v1.QuobyteVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.RBDPersistentVolumeSource)(nil), (*core.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(a.(*v1.RBDPersistentVolumeSource), b.(*core.RBDPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.RBDPersistentVolumeSource)(nil), (*v1.RBDPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(a.(*core.RBDPersistentVolumeSource), b.(*v1.RBDPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.RBDVolumeSource)(nil), (*core.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(a.(*v1.RBDVolumeSource), b.(*core.RBDVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.RBDVolumeSource)(nil), (*v1.RBDVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(a.(*core.RBDVolumeSource), b.(*v1.RBDVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.RangeAllocation)(nil), (*core.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_RangeAllocation_To_core_RangeAllocation(a.(*v1.RangeAllocation), b.(*core.RangeAllocation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.RangeAllocation)(nil), (*v1.RangeAllocation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_RangeAllocation_To_v1_RangeAllocation(a.(*core.RangeAllocation), b.(*v1.RangeAllocation), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ReplicationController)(nil), (*core.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationController_To_core_ReplicationController(a.(*v1.ReplicationController), b.(*core.ReplicationController), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ReplicationController)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationController_To_v1_ReplicationController(a.(*core.ReplicationController), b.(*v1.ReplicationController), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerCondition)(nil), (*core.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(a.(*v1.ReplicationControllerCondition), b.(*core.ReplicationControllerCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerCondition)(nil), (*v1.ReplicationControllerCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(a.(*core.ReplicationControllerCondition), b.(*v1.ReplicationControllerCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerList)(nil), (*core.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(a.(*v1.ReplicationControllerList), b.(*core.ReplicationControllerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerList)(nil), (*v1.ReplicationControllerList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(a.(*core.ReplicationControllerList), b.(*v1.ReplicationControllerList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(a.(*core.ReplicationControllerSpec), b.(*v1.ReplicationControllerSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ReplicationControllerStatus)(nil), (*core.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(a.(*v1.ReplicationControllerStatus), b.(*core.ReplicationControllerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ReplicationControllerStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(a.(*core.ReplicationControllerStatus), b.(*v1.ReplicationControllerStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ResourceFieldSelector)(nil), (*core.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(a.(*v1.ResourceFieldSelector), b.(*core.ResourceFieldSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceFieldSelector)(nil), (*v1.ResourceFieldSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(a.(*core.ResourceFieldSelector), b.(*v1.ResourceFieldSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ResourceQuota)(nil), (*core.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuota_To_core_ResourceQuota(a.(*v1.ResourceQuota), b.(*core.ResourceQuota), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceQuota)(nil), (*v1.ResourceQuota)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuota_To_v1_ResourceQuota(a.(*core.ResourceQuota), b.(*v1.ResourceQuota), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ResourceQuotaList)(nil), (*core.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(a.(*v1.ResourceQuotaList), b.(*core.ResourceQuotaList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaList)(nil), (*v1.ResourceQuotaList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(a.(*core.ResourceQuotaList), b.(*v1.ResourceQuotaList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ResourceQuotaSpec)(nil), (*core.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(a.(*v1.ResourceQuotaSpec), b.(*core.ResourceQuotaSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaSpec)(nil), (*v1.ResourceQuotaSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(a.(*core.ResourceQuotaSpec), b.(*v1.ResourceQuotaSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ResourceQuotaStatus)(nil), (*core.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(a.(*v1.ResourceQuotaStatus), b.(*core.ResourceQuotaStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceQuotaStatus)(nil), (*v1.ResourceQuotaStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(a.(*core.ResourceQuotaStatus), b.(*v1.ResourceQuotaStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ResourceRequirements)(nil), (*core.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceRequirements_To_core_ResourceRequirements(a.(*v1.ResourceRequirements), b.(*core.ResourceRequirements), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ResourceRequirements)(nil), (*v1.ResourceRequirements)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ResourceRequirements_To_v1_ResourceRequirements(a.(*core.ResourceRequirements), b.(*v1.ResourceRequirements), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SELinuxOptions)(nil), (*core.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SELinuxOptions_To_core_SELinuxOptions(a.(*v1.SELinuxOptions), b.(*core.SELinuxOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SELinuxOptions)(nil), (*v1.SELinuxOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SELinuxOptions_To_v1_SELinuxOptions(a.(*core.SELinuxOptions), b.(*v1.SELinuxOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ScaleIOPersistentVolumeSource)(nil), (*core.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(a.(*v1.ScaleIOPersistentVolumeSource), b.(*core.ScaleIOPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ScaleIOPersistentVolumeSource)(nil), (*v1.ScaleIOPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(a.(*core.ScaleIOPersistentVolumeSource), b.(*v1.ScaleIOPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ScaleIOVolumeSource)(nil), (*core.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(a.(*v1.ScaleIOVolumeSource), b.(*core.ScaleIOVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ScaleIOVolumeSource)(nil), (*v1.ScaleIOVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(a.(*core.ScaleIOVolumeSource), b.(*v1.ScaleIOVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ScopeSelector)(nil), (*core.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScopeSelector_To_core_ScopeSelector(a.(*v1.ScopeSelector), b.(*core.ScopeSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ScopeSelector)(nil), (*v1.ScopeSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScopeSelector_To_v1_ScopeSelector(a.(*core.ScopeSelector), b.(*v1.ScopeSelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ScopedResourceSelectorRequirement)(nil), (*core.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(a.(*v1.ScopedResourceSelectorRequirement), b.(*core.ScopedResourceSelectorRequirement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ScopedResourceSelectorRequirement)(nil), (*v1.ScopedResourceSelectorRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(a.(*core.ScopedResourceSelectorRequirement), b.(*v1.ScopedResourceSelectorRequirement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Secret)(nil), (*core.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Secret_To_core_Secret(a.(*v1.Secret), b.(*core.Secret), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Secret)(nil), (*v1.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Secret_To_v1_Secret(a.(*core.Secret), b.(*v1.Secret), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SecretEnvSource)(nil), (*core.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretEnvSource_To_core_SecretEnvSource(a.(*v1.SecretEnvSource), b.(*core.SecretEnvSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretEnvSource)(nil), (*v1.SecretEnvSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretEnvSource_To_v1_SecretEnvSource(a.(*core.SecretEnvSource), b.(*v1.SecretEnvSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SecretKeySelector)(nil), (*core.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretKeySelector_To_core_SecretKeySelector(a.(*v1.SecretKeySelector), b.(*core.SecretKeySelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretKeySelector)(nil), (*v1.SecretKeySelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretKeySelector_To_v1_SecretKeySelector(a.(*core.SecretKeySelector), b.(*v1.SecretKeySelector), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SecretList)(nil), (*core.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretList_To_core_SecretList(a.(*v1.SecretList), b.(*core.SecretList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretList)(nil), (*v1.SecretList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretList_To_v1_SecretList(a.(*core.SecretList), b.(*v1.SecretList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SecretProjection)(nil), (*core.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretProjection_To_core_SecretProjection(a.(*v1.SecretProjection), b.(*core.SecretProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretProjection)(nil), (*v1.SecretProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretProjection_To_v1_SecretProjection(a.(*core.SecretProjection), b.(*v1.SecretProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SecretReference)(nil), (*core.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretReference_To_core_SecretReference(a.(*v1.SecretReference), b.(*core.SecretReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretReference)(nil), (*v1.SecretReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretReference_To_v1_SecretReference(a.(*core.SecretReference), b.(*v1.SecretReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SecretVolumeSource)(nil), (*core.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(a.(*v1.SecretVolumeSource), b.(*core.SecretVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecretVolumeSource)(nil), (*v1.SecretVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(a.(*core.SecretVolumeSource), b.(*v1.SecretVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SecurityContext)(nil), (*core.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecurityContext_To_core_SecurityContext(a.(*v1.SecurityContext), b.(*core.SecurityContext), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SecurityContext)(nil), (*v1.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecurityContext_To_v1_SecurityContext(a.(*core.SecurityContext), b.(*v1.SecurityContext), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SerializedReference)(nil), (*core.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SerializedReference_To_core_SerializedReference(a.(*v1.SerializedReference), b.(*core.SerializedReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SerializedReference)(nil), (*v1.SerializedReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SerializedReference_To_v1_SerializedReference(a.(*core.SerializedReference), b.(*v1.SerializedReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Service)(nil), (*core.Service)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Service_To_core_Service(a.(*v1.Service), b.(*core.Service), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Service)(nil), (*v1.Service)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Service_To_v1_Service(a.(*core.Service), b.(*v1.Service), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceAccount)(nil), (*core.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAccount_To_core_ServiceAccount(a.(*v1.ServiceAccount), b.(*core.ServiceAccount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceAccount)(nil), (*v1.ServiceAccount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccount_To_v1_ServiceAccount(a.(*core.ServiceAccount), b.(*v1.ServiceAccount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceAccountList)(nil), (*core.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAccountList_To_core_ServiceAccountList(a.(*v1.ServiceAccountList), b.(*core.ServiceAccountList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceAccountList)(nil), (*v1.ServiceAccountList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccountList_To_v1_ServiceAccountList(a.(*core.ServiceAccountList), b.(*v1.ServiceAccountList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceAccountTokenProjection)(nil), (*core.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(a.(*v1.ServiceAccountTokenProjection), b.(*core.ServiceAccountTokenProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceAccountTokenProjection)(nil), (*v1.ServiceAccountTokenProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(a.(*core.ServiceAccountTokenProjection), b.(*v1.ServiceAccountTokenProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceList)(nil), (*core.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceList_To_core_ServiceList(a.(*v1.ServiceList), b.(*core.ServiceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceList)(nil), (*v1.ServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceList_To_v1_ServiceList(a.(*core.ServiceList), b.(*v1.ServiceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServicePort)(nil), (*core.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServicePort_To_core_ServicePort(a.(*v1.ServicePort), b.(*core.ServicePort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServicePort)(nil), (*v1.ServicePort)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServicePort_To_v1_ServicePort(a.(*core.ServicePort), b.(*v1.ServicePort), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceProxyOptions)(nil), (*core.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(a.(*v1.ServiceProxyOptions), b.(*core.ServiceProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceProxyOptions)(nil), (*v1.ServiceProxyOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(a.(*core.ServiceProxyOptions), b.(*v1.ServiceProxyOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceSpec)(nil), (*core.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceSpec_To_core_ServiceSpec(a.(*v1.ServiceSpec), b.(*core.ServiceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceSpec)(nil), (*v1.ServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceSpec_To_v1_ServiceSpec(a.(*core.ServiceSpec), b.(*v1.ServiceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ServiceStatus)(nil), (*core.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceStatus_To_core_ServiceStatus(a.(*v1.ServiceStatus), b.(*core.ServiceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.ServiceStatus)(nil), (*v1.ServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ServiceStatus_To_v1_ServiceStatus(a.(*core.ServiceStatus), b.(*v1.ServiceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.SessionAffinityConfig)(nil), (*core.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(a.(*v1.SessionAffinityConfig), b.(*core.SessionAffinityConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.SessionAffinityConfig)(nil), (*v1.SessionAffinityConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(a.(*core.SessionAffinityConfig), b.(*v1.SessionAffinityConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.StorageOSPersistentVolumeSource)(nil), (*core.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(a.(*v1.StorageOSPersistentVolumeSource), b.(*core.StorageOSPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.StorageOSPersistentVolumeSource)(nil), (*v1.StorageOSPersistentVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(a.(*core.StorageOSPersistentVolumeSource), b.(*v1.StorageOSPersistentVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.StorageOSVolumeSource)(nil), (*core.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(a.(*v1.StorageOSVolumeSource), b.(*core.StorageOSVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.StorageOSVolumeSource)(nil), (*v1.StorageOSVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(a.(*core.StorageOSVolumeSource), b.(*v1.StorageOSVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Sysctl)(nil), (*core.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Sysctl_To_core_Sysctl(a.(*v1.Sysctl), b.(*core.Sysctl), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Sysctl)(nil), (*v1.Sysctl)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Sysctl_To_v1_Sysctl(a.(*core.Sysctl), b.(*v1.Sysctl), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.TCPSocketAction)(nil), (*core.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TCPSocketAction_To_core_TCPSocketAction(a.(*v1.TCPSocketAction), b.(*core.TCPSocketAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.TCPSocketAction)(nil), (*v1.TCPSocketAction)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TCPSocketAction_To_v1_TCPSocketAction(a.(*core.TCPSocketAction), b.(*v1.TCPSocketAction), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Taint)(nil), (*core.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Taint_To_core_Taint(a.(*v1.Taint), b.(*core.Taint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Taint)(nil), (*v1.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Taint_To_v1_Taint(a.(*core.Taint), b.(*v1.Taint), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Toleration)(nil), (*core.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Toleration_To_core_Toleration(a.(*v1.Toleration), b.(*core.Toleration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Toleration)(nil), (*v1.Toleration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Toleration_To_v1_Toleration(a.(*core.Toleration), b.(*v1.Toleration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.TopologySelectorLabelRequirement)(nil), (*core.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(a.(*v1.TopologySelectorLabelRequirement), b.(*core.TopologySelectorLabelRequirement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.TopologySelectorLabelRequirement)(nil), (*v1.TopologySelectorLabelRequirement)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(a.(*core.TopologySelectorLabelRequirement), b.(*v1.TopologySelectorLabelRequirement), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.TopologySelectorTerm)(nil), (*core.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(a.(*v1.TopologySelectorTerm), b.(*core.TopologySelectorTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.TopologySelectorTerm)(nil), (*v1.TopologySelectorTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(a.(*core.TopologySelectorTerm), b.(*v1.TopologySelectorTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.TypedLocalObjectReference)(nil), (*core.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(a.(*v1.TypedLocalObjectReference), b.(*core.TypedLocalObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.TypedLocalObjectReference)(nil), (*v1.TypedLocalObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(a.(*core.TypedLocalObjectReference), b.(*v1.TypedLocalObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.Volume)(nil), (*core.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Volume_To_core_Volume(a.(*v1.Volume), b.(*core.Volume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.Volume)(nil), (*v1.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Volume_To_v1_Volume(a.(*core.Volume), b.(*v1.Volume), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeDevice)(nil), (*core.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeDevice_To_core_VolumeDevice(a.(*v1.VolumeDevice), b.(*core.VolumeDevice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeDevice)(nil), (*v1.VolumeDevice)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeDevice_To_v1_VolumeDevice(a.(*core.VolumeDevice), b.(*v1.VolumeDevice), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeMount)(nil), (*core.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeMount_To_core_VolumeMount(a.(*v1.VolumeMount), b.(*core.VolumeMount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeMount)(nil), (*v1.VolumeMount)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeMount_To_v1_VolumeMount(a.(*core.VolumeMount), b.(*v1.VolumeMount), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeNodeAffinity)(nil), (*core.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(a.(*v1.VolumeNodeAffinity), b.(*core.VolumeNodeAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeNodeAffinity)(nil), (*v1.VolumeNodeAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(a.(*core.VolumeNodeAffinity), b.(*v1.VolumeNodeAffinity), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeProjection)(nil), (*core.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeProjection_To_core_VolumeProjection(a.(*v1.VolumeProjection), b.(*core.VolumeProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeProjection)(nil), (*v1.VolumeProjection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeProjection_To_v1_VolumeProjection(a.(*core.VolumeProjection), b.(*v1.VolumeProjection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VolumeSource)(nil), (*core.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VolumeSource_To_core_VolumeSource(a.(*v1.VolumeSource), b.(*core.VolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VolumeSource)(nil), (*v1.VolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VolumeSource_To_v1_VolumeSource(a.(*core.VolumeSource), b.(*v1.VolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.VsphereVirtualDiskVolumeSource)(nil), (*core.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(a.(*v1.VsphereVirtualDiskVolumeSource), b.(*core.VsphereVirtualDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.VsphereVirtualDiskVolumeSource)(nil), (*v1.VsphereVirtualDiskVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(a.(*core.VsphereVirtualDiskVolumeSource), b.(*v1.VsphereVirtualDiskVolumeSource), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.WeightedPodAffinityTerm)(nil), (*core.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(a.(*v1.WeightedPodAffinityTerm), b.(*core.WeightedPodAffinityTerm), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*core.WeightedPodAffinityTerm)(nil), (*v1.WeightedPodAffinityTerm)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(a.(*core.WeightedPodAffinityTerm), b.(*v1.WeightedPodAffinityTerm), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apps.ReplicaSetSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetSpec_To_v1_ReplicationControllerSpec(a.(*apps.ReplicaSetSpec), b.(*v1.ReplicationControllerSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apps.ReplicaSetStatus)(nil), (*v1.ReplicationControllerStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSetStatus_To_v1_ReplicationControllerStatus(a.(*apps.ReplicaSetStatus), b.(*v1.ReplicationControllerStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*apps.ReplicaSet)(nil), (*v1.ReplicationController)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apps_ReplicaSet_To_v1_ReplicationController(a.(*apps.ReplicaSet), b.(*v1.ReplicationController), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.PodSecurityContext)(nil), (*v1.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSecurityContext_To_v1_PodSecurityContext(a.(*core.PodSecurityContext), b.(*v1.PodSecurityContext), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.PodSpec)(nil), (*v1.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodSpec_To_v1_PodSpec(a.(*core.PodSpec), b.(*v1.PodSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.PodTemplateSpec)(nil), (*v1.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(a.(*core.PodTemplateSpec), b.(*v1.PodTemplateSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.Pod)(nil), (*v1.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_Pod_To_v1_Pod(a.(*core.Pod), b.(*v1.Pod), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.ReplicationControllerSpec)(nil), (*v1.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(a.(*core.ReplicationControllerSpec), b.(*v1.ReplicationControllerSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*core.SecurityContext)(nil), (*v1.SecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_core_SecurityContext_To_v1_SecurityContext(a.(*core.SecurityContext), b.(*v1.SecurityContext), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.PodSecurityContext)(nil), (*core.PodSecurityContext)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSecurityContext_To_core_PodSecurityContext(a.(*v1.PodSecurityContext), b.(*core.PodSecurityContext), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.PodSpec)(nil), (*core.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodSpec_To_core_PodSpec(a.(*v1.PodSpec), b.(*core.PodSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.PodTemplateSpec)(nil), (*core.PodTemplateSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(a.(*v1.PodTemplateSpec), b.(*core.PodTemplateSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.Pod)(nil), (*core.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Pod_To_core_Pod(a.(*v1.Pod), b.(*core.Pod), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*apps.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_apps_ReplicaSetSpec(a.(*v1.ReplicationControllerSpec), b.(*apps.ReplicaSetSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.ReplicationControllerSpec)(nil), (*core.ReplicationControllerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(a.(*v1.ReplicationControllerSpec), b.(*core.ReplicationControllerSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.ReplicationControllerStatus)(nil), (*apps.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationControllerStatus_To_apps_ReplicaSetStatus(a.(*v1.ReplicationControllerStatus), b.(*apps.ReplicaSetStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.ReplicationController)(nil), (*apps.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ReplicationController_To_apps_ReplicaSet(a.(*v1.ReplicationController), b.(*apps.ReplicaSet), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.ResourceList)(nil), (*core.ResourceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceList_To_core_ResourceList(a.(*v1.ResourceList), b.(*core.ResourceList), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.Secret)(nil), (*core.Secret)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Secret_To_core_Secret(a.(*v1.Secret), b.(*core.Secret), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*core.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*core.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*core.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_v1_Affinity_To_core_Affinity is an autogenerated conversion function. +func Convert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_core_Affinity(in, out, s) +} + +func autoConvert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_core_Affinity_To_v1_Affinity is an autogenerated conversion function. +func Convert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + return autoConvert_core_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + out.Name = core.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_AttachedVolume_To_core_AttachedVolume is an autogenerated conversion function. +func Convert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + return autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in, out, s) +} + +func autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + out.Name = v1.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. +func Convert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + return autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in, out, s) +} + +func autoConvert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]core.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_v1_AvoidPods_To_core_AvoidPods is an autogenerated conversion function. +func Convert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + return autoConvert_v1_AvoidPods_To_core_AvoidPods(in, out, s) +} + +func autoConvert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_core_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. +func Convert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + return autoConvert_core_AvoidPods_To_v1_AvoidPods(in, out, s) +} + +func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*core.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*core.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Binding_To_core_Binding is an autogenerated conversion function. +func Convert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_core_Binding(in, out, s) +} + +func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_core_Binding_To_v1_Binding is an autogenerated conversion function. +func Convert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + return autoConvert_core_Binding_To_v1_Binding(in, out, s) +} + +func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + out.FSType = in.FSType + out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes)) + out.ControllerPublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef)) + out.NodeStageSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef)) + out.NodePublishSecretRef = (*core.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef)) + return nil +} + +// Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + out.FSType = in.FSType + out.VolumeAttributes = *(*map[string]string)(unsafe.Pointer(&in.VolumeAttributes)) + out.ControllerPublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.ControllerPublishSecretRef)) + out.NodeStageSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodeStageSecretRef)) + out.NodePublishSecretRef = (*v1.SecretReference)(unsafe.Pointer(in.NodePublishSecretRef)) + return nil +} + +// Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + out.Add = *(*[]core.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]core.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_v1_Capabilities_To_core_Capabilities is an autogenerated conversion function. +func Convert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_core_Capabilities(in, out, s) +} + +func autoConvert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_core_Capabilities_To_v1_Capabilities is an autogenerated conversion function. +func Convert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + return autoConvert_core_Capabilities_To_v1_Capabilities(in, out, s) +} + +func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *v1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in *v1.CinderPersistentVolumeSource, out *core.CinderPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderPersistentVolumeSource_To_core_CinderPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *v1.CinderPersistentVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in *core.CinderPersistentVolumeSource, out *v1.CinderPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CinderPersistentVolumeSource_To_v1_CinderPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource is an autogenerated conversion function. +func Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in, out, s) +} + +func autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. +func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +} + +func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ClientIPConfig_To_core_ClientIPConfig is an autogenerated conversion function. +func Convert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + return autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in, out, s) +} + +func autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_core_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. +func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) +} + +func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + out.Type = core.ComponentConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_v1_ComponentCondition_To_core_ComponentCondition is an autogenerated conversion function. +func Convert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in, out, s) +} + +func autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + out.Type = v1.ComponentConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_core_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. +func Convert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + return autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in, out, s) +} + +func autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]core.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ComponentStatus_To_core_ComponentStatus is an autogenerated conversion function. +func Convert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in, out, s) +} + +func autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. +func Convert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + return autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in, out, s) +} + +func autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ComponentStatusList_To_core_ComponentStatusList is an autogenerated conversion function. +func Convert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in, out, s) +} + +func autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. +func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + return autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) +} + +func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData)) + return nil +} + +// Convert_v1_ConfigMap_To_core_ConfigMap is an autogenerated conversion function. +func Convert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_core_ConfigMap(in, out, s) +} + +func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + out.BinaryData = *(*map[string][]byte)(unsafe.Pointer(&in.BinaryData)) + return nil +} + +// Convert_core_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. +func Convert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + return autoConvert_core_ConfigMap_To_v1_ConfigMap(in, out, s) +} + +func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ConfigMapList_To_core_ConfigMapList is an autogenerated conversion function. +func Convert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in, out, s) +} + +func autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. +func Convert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + return autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in, out, s) +} + +func autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *v1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.KubeletConfigKey = in.KubeletConfigKey + return nil +} + +// Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource is an autogenerated conversion function. +func Convert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in *v1.ConfigMapNodeConfigSource, out *core.ConfigMapNodeConfigSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapNodeConfigSource_To_core_ConfigMapNodeConfigSource(in, out, s) +} + +func autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *v1.ConfigMapNodeConfigSource, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.KubeletConfigKey = in.KubeletConfigKey + return nil +} + +// Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource is an autogenerated conversion function. +func Convert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in *core.ConfigMapNodeConfigSource, out *v1.ConfigMapNodeConfigSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapNodeConfigSource_To_v1_ConfigMapNodeConfigSource(in, out, s) +} + +func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection is an autogenerated conversion function. +func Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in, out, s) +} + +func autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. +func Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + +func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]core.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]core.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]core.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*core.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*core.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = core.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = core.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.SecurityContext) + if err := Convert_v1_SecurityContext_To_core_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_v1_Container_To_core_Container is an autogenerated conversion function. +func Convert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_core_Container(in, out, s) +} + +func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + if err := Convert_core_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_core_Container_To_v1_Container is an autogenerated conversion function. +func Convert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + return autoConvert_core_Container_To_v1_Container(in, out, s) +} + +func autoConvert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_v1_ContainerImage_To_core_ContainerImage is an autogenerated conversion function. +func Convert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_core_ContainerImage(in, out, s) +} + +func autoConvert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_core_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. +func Convert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + return autoConvert_core_ContainerImage_To_v1_ContainerImage(in, out, s) +} + +func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = core.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_v1_ContainerPort_To_core_ContainerPort is an autogenerated conversion function. +func Convert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_core_ContainerPort(in, out, s) +} + +func autoConvert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = v1.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_core_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. +func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s) +} + +func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*core.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_v1_ContainerState_To_core_ContainerState is an autogenerated conversion function. +func Convert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_core_ContainerState(in, out, s) +} + +func autoConvert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_core_ContainerState_To_v1_ContainerState is an autogenerated conversion function. +func Convert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + return autoConvert_core_ContainerState_To_v1_ContainerState(in, out, s) +} + +func autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning is an autogenerated conversion function. +func Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in, out, s) +} + +func autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. +func Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) +} + +func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated is an autogenerated conversion function. +func Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in, out, s) +} + +func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. +func Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) +} + +func autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting is an autogenerated conversion function. +func Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in, out, s) +} + +func autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. +func Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) +} + +func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStatus_To_core_ContainerStatus is an autogenerated conversion function. +func Convert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in, out, s) +} + +func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. +func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + return autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in, out, s) +} + +func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint is an autogenerated conversion function. +func Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in, out, s) +} + +func autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. +func Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +} + +func autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection is an autogenerated conversion function. +func Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in, out, s) +} + +func autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. +func Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = core.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = v1.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_v1_EndpointAddress_To_core_EndpointAddress is an autogenerated conversion function. +func Convert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in, out, s) +} + +func autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_core_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. +func Convert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + return autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in, out, s) +} + +func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = core.Protocol(in.Protocol) + return nil +} + +// Convert_v1_EndpointPort_To_core_EndpointPort is an autogenerated conversion function. +func Convert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_core_EndpointPort(in, out, s) +} + +func autoConvert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = v1.Protocol(in.Protocol) + return nil +} + +// Convert_core_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. +func Convert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + return autoConvert_core_EndpointPort_To_v1_EndpointPort(in, out, s) +} + +func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]core.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_EndpointSubset_To_core_EndpointSubset is an autogenerated conversion function. +func Convert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in, out, s) +} + +func autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. +func Convert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + return autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in, out, s) +} + +func autoConvert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]core.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_v1_Endpoints_To_core_Endpoints is an autogenerated conversion function. +func Convert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_core_Endpoints(in, out, s) +} + +func autoConvert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_core_Endpoints_To_v1_Endpoints is an autogenerated conversion function. +func Convert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + return autoConvert_core_Endpoints_To_v1_Endpoints(in, out, s) +} + +func autoConvert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EndpointsList_To_core_EndpointsList is an autogenerated conversion function. +func Convert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_core_EndpointsList(in, out, s) +} + +func autoConvert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. +func Convert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + return autoConvert_core_EndpointsList_To_v1_EndpointsList(in, out, s) +} + +func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*core.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*core.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_EnvFromSource_To_core_EnvFromSource is an autogenerated conversion function. +func Convert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in, out, s) +} + +func autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. +func Convert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + return autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + +func autoConvert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*core.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_v1_EnvVar_To_core_EnvVar is an autogenerated conversion function. +func Convert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_core_EnvVar(in, out, s) +} + +func autoConvert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_core_EnvVar_To_v1_EnvVar is an autogenerated conversion function. +func Convert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + return autoConvert_core_EnvVar_To_v1_EnvVar(in, out, s) +} + +func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*core.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*core.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_v1_EnvVarSource_To_core_EnvVarSource is an autogenerated conversion function. +func Convert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in, out, s) +} + +func autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_core_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. +func Convert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + return autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in, out, s) +} + +func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_v1_EventSource_To_core_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_v1_Event_To_core_Event is an autogenerated conversion function. +func Convert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_core_Event(in, out, s) +} + +func autoConvert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*v1.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*v1.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_core_Event_To_v1_Event is an autogenerated conversion function. +func Convert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + return autoConvert_core_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EventList_To_core_EventList is an autogenerated conversion function. +func Convert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_core_EventList(in, out, s) +} + +func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EventList_To_v1_EventList is an autogenerated conversion function. +func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + return autoConvert_core_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = core.EventSeriesState(in.State) + return nil +} + +// Convert_v1_EventSeries_To_core_EventSeries is an autogenerated conversion function. +func Convert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + return autoConvert_v1_EventSeries_To_core_EventSeries(in, out, s) +} + +func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = v1.EventSeriesState(in.State) + return nil +} + +// Convert_core_EventSeries_To_v1_EventSeries is an autogenerated conversion function. +func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + return autoConvert_core_EventSeries_To_v1_EventSeries(in, out, s) +} + +func autoConvert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_v1_EventSource_To_core_EventSource is an autogenerated conversion function. +func Convert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_core_EventSource(in, out, s) +} + +func autoConvert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_core_EventSource_To_v1_EventSource is an autogenerated conversion function. +func Convert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + return autoConvert_core_EventSource_To_v1_EventSource(in, out, s) +} + +func autoConvert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_ExecAction_To_core_ExecAction is an autogenerated conversion function. +func Convert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_core_ExecAction(in, out, s) +} + +func autoConvert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_ExecAction_To_v1_ExecAction is an autogenerated conversion function. +func Convert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + return autoConvert_core_ExecAction_To_v1_ExecAction(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_v1_FCVolumeSource_To_core_FCVolumeSource is an autogenerated conversion function. +func Convert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in, out, s) +} + +func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_core_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. +func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource, out *core.FlexPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexPersistentVolumeSource_To_core_FlexPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in *core.FlexPersistentVolumeSource, out *v1.FlexPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlexPersistentVolumeSource_To_v1_FlexPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource is an autogenerated conversion function. +func Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in, out, s) +} + +func autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. +func Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +} + +func autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource is an autogenerated conversion function. +func Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in, out, s) +} + +func autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. +func Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +} + +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace)) + return nil +} + +// Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSource, out *core.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsPersistentVolumeSource_To_core_GlusterfsPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + out.EndpointsNamespace = (*string)(unsafe.Pointer(in.EndpointsNamespace)) + return nil +} + +// Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in *core.GlusterfsPersistentVolumeSource, out *v1.GlusterfsPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_GlusterfsPersistentVolumeSource_To_v1_GlusterfsPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = core.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]core.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_v1_HTTPGetAction_To_core_HTTPGetAction is an autogenerated conversion function. +func Convert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in, out, s) +} + +func autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = v1.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_core_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. +func Convert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + return autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) +} + +func autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_HTTPHeader_To_core_HTTPHeader is an autogenerated conversion function. +func Convert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in, out, s) +} + +func autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. +func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + return autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in, out, s) +} + +func autoConvert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_v1_Handler_To_core_Handler is an autogenerated conversion function. +func Convert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_core_Handler(in, out, s) +} + +func autoConvert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_core_Handler_To_v1_Handler is an autogenerated conversion function. +func Convert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + return autoConvert_core_Handler_To_v1_Handler(in, out, s) +} + +func autoConvert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_v1_HostAlias_To_core_HostAlias is an autogenerated conversion function. +func Convert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + return autoConvert_v1_HostAlias_To_core_HostAlias(in, out, s) +} + +func autoConvert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_core_HostAlias_To_v1_HostAlias is an autogenerated conversion function. +func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + return autoConvert_core_HostAlias_To_v1_HostAlias(in, out, s) +} + +func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*core.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource is an autogenerated conversion function. +func Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in, out, s) +} + +func autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. +func Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_KeyToPath_To_core_KeyToPath is an autogenerated conversion function. +func Convert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_core_KeyToPath(in, out, s) +} + +func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. +func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + return autoConvert_core_KeyToPath_To_v1_KeyToPath(in, out, s) +} + +func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + out.PostStart = (*core.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*core.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_v1_Lifecycle_To_core_Lifecycle is an autogenerated conversion function. +func Convert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_core_Lifecycle(in, out, s) +} + +func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_core_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. +func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + return autoConvert_core_Lifecycle_To_v1_Lifecycle(in, out, s) +} + +func autoConvert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1_LimitRange_To_core_LimitRange is an autogenerated conversion function. +func Convert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_core_LimitRange(in, out, s) +} + +func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_LimitRange_To_v1_LimitRange is an autogenerated conversion function. +func Convert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + return autoConvert_core_LimitRange_To_v1_LimitRange(in, out, s) +} + +func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + out.Type = core.LimitType(in.Type) + out.Max = *(*core.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*core.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*core.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*core.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*core.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_v1_LimitRangeItem_To_core_LimitRangeItem is an autogenerated conversion function. +func Convert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in, out, s) +} + +func autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + out.Type = v1.LimitType(in.Type) + out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_core_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. +func Convert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + return autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) +} + +func autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_LimitRangeList_To_core_LimitRangeList is an autogenerated conversion function. +func Convert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in, out, s) +} + +func autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. +func Convert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + return autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in, out, s) +} + +func autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]core.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec is an autogenerated conversion function. +func Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in, out, s) +} + +func autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. +func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) +} + +func autoConvert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_core_List is an autogenerated conversion function. +func Convert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + return autoConvert_v1_List_To_core_List(in, out, s) +} + +func autoConvert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_List_To_v1_List is an autogenerated conversion function. +func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + return autoConvert_core_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress is an autogenerated conversion function. +func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in, out, s) +} + +func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. +func Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) +} + +func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]core.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function. +func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s) +} + +func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. +func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1_LocalObjectReference_To_core_LocalObjectReference is an autogenerated conversion function. +func Convert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in, out, s) +} + +func autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_core_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. +func Convert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + return autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +} + +func autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + return nil +} + +// Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource is an autogenerated conversion function. +func Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in, out, s) +} + +func autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + return nil +} + +// Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. +func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) +} + +func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource is an autogenerated conversion function. +func Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in, out, s) +} + +func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. +func Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) +} + +func autoConvert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NamespaceSpec_To_core_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_core_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Namespace_To_core_Namespace is an autogenerated conversion function. +func Convert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_core_Namespace(in, out, s) +} + +func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Namespace_To_v1_Namespace is an autogenerated conversion function. +func Convert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + return autoConvert_core_Namespace_To_v1_Namespace(in, out, s) +} + +func autoConvert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NamespaceList_To_core_NamespaceList is an autogenerated conversion function. +func Convert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_core_NamespaceList(in, out, s) +} + +func autoConvert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. +func Convert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + return autoConvert_core_NamespaceList_To_v1_NamespaceList(in, out, s) +} + +func autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]core.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_v1_NamespaceSpec_To_core_NamespaceSpec is an autogenerated conversion function. +func Convert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in, out, s) +} + +func autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_core_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. +func Convert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + return autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + out.Phase = core.NamespacePhase(in.Phase) + return nil +} + +// Convert_v1_NamespaceStatus_To_core_NamespaceStatus is an autogenerated conversion function. +func Convert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in, out, s) +} + +func autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + out.Phase = v1.NamespacePhase(in.Phase) + return nil +} + +// Convert_core_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. +func Convert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + return autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) +} + +func autoConvert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NodeSpec_To_core_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_core_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Node_To_core_Node is an autogenerated conversion function. +func Convert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_core_Node(in, out, s) +} + +func autoConvert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Node_To_v1_Node is an autogenerated conversion function. +func Convert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + return autoConvert_core_Node_To_v1_Node(in, out, s) +} + +func autoConvert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + out.Type = core.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_v1_NodeAddress_To_core_NodeAddress is an autogenerated conversion function. +func Convert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_core_NodeAddress(in, out, s) +} + +func autoConvert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + out.Type = v1.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_core_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. +func Convert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + return autoConvert_core_NodeAddress_To_v1_NodeAddress(in, out, s) +} + +func autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*core.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_NodeAffinity_To_core_NodeAffinity is an autogenerated conversion function. +func Convert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in, out, s) +} + +func autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. +func Convert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + return autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + out.Type = core.NodeConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_NodeCondition_To_core_NodeCondition is an autogenerated conversion function. +func Convert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_core_NodeCondition(in, out, s) +} + +func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + out.Type = v1.NodeConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. +func Convert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + return autoConvert_core_NodeCondition_To_v1_NodeCondition(in, out, s) +} + +func autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + out.ConfigMap = (*core.ConfigMapNodeConfigSource)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_v1_NodeConfigSource_To_core_NodeConfigSource is an autogenerated conversion function. +func Convert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + return autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in, out, s) +} + +func autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + out.ConfigMap = (*v1.ConfigMapNodeConfigSource)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_core_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. +func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) +} + +func autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error { + out.Assigned = (*core.NodeConfigSource)(unsafe.Pointer(in.Assigned)) + out.Active = (*core.NodeConfigSource)(unsafe.Pointer(in.Active)) + out.LastKnownGood = (*core.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood)) + out.Error = in.Error + return nil +} + +// Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus is an autogenerated conversion function. +func Convert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in *v1.NodeConfigStatus, out *core.NodeConfigStatus, s conversion.Scope) error { + return autoConvert_v1_NodeConfigStatus_To_core_NodeConfigStatus(in, out, s) +} + +func autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error { + out.Assigned = (*v1.NodeConfigSource)(unsafe.Pointer(in.Assigned)) + out.Active = (*v1.NodeConfigSource)(unsafe.Pointer(in.Active)) + out.LastKnownGood = (*v1.NodeConfigSource)(unsafe.Pointer(in.LastKnownGood)) + out.Error = in.Error + return nil +} + +// Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus is an autogenerated conversion function. +func Convert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in *core.NodeConfigStatus, out *v1.NodeConfigStatus, s conversion.Scope) error { + return autoConvert_core_NodeConfigStatus_To_v1_NodeConfigStatus(in, out, s) +} + +func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NodeList_To_core_NodeList is an autogenerated conversion function. +func Convert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_core_NodeList(in, out, s) +} + +func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NodeList_To_v1_NodeList is an autogenerated conversion function. +func Convert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + return autoConvert_core_NodeList_To_v1_NodeList(in, out, s) +} + +func autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions is an autogenerated conversion function. +func Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in, out, s) +} + +func autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. +func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) +} + +func autoConvert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_v1_NodeResources_To_core_NodeResources is an autogenerated conversion function. +func Convert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_core_NodeResources(in, out, s) +} + +func autoConvert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_core_NodeResources_To_v1_NodeResources is an autogenerated conversion function. +func Convert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + return autoConvert_core_NodeResources_To_v1_NodeResources(in, out, s) +} + +func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]core.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_v1_NodeSelector_To_core_NodeSelector is an autogenerated conversion function. +func Convert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_core_NodeSelector(in, out, s) +} + +func autoConvert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_core_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. +func Convert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + return autoConvert_core_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + out.MatchFields = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchFields)) + return nil +} + +// Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm is an autogenerated conversion function. +func Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in, out, s) +} + +func autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + out.MatchFields = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchFields)) + return nil +} + +// Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. +func Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]core.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*core.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + out.DoNotUse_ExternalID = in.DoNotUse_ExternalID + return nil +} + +// Convert_v1_NodeSpec_To_core_NodeSpec is an autogenerated conversion function. +func Convert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_core_NodeSpec(in, out, s) +} + +func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + out.DoNotUse_ExternalID = in.DoNotUse_ExternalID + return nil +} + +// Convert_core_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. +func Convert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + return autoConvert_core_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*core.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = core.NodePhase(in.Phase) + out.Conditions = *(*[]core.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]core.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + out.Config = (*core.NodeConfigStatus)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_v1_NodeStatus_To_core_NodeStatus is an autogenerated conversion function. +func Convert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_core_NodeStatus(in, out, s) +} + +func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = v1.NodePhase(in.Phase) + out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + out.Config = (*v1.NodeConfigStatus)(unsafe.Pointer(in.Config)) + return nil +} + +// Convert_core_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. +func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + return autoConvert_core_NodeStatus_To_v1_NodeStatus(in, out, s) +} + +func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo is an autogenerated conversion function. +func Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in, out, s) +} + +func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. +func Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + +func autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector is an autogenerated conversion function. +func Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in, out, s) +} + +func autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. +func Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectReference_To_core_ObjectReference is an autogenerated conversion function. +func Convert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_core_ObjectReference(in, out, s) +} + +func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. +func Convert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + return autoConvert_core_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolume_To_core_PersistentVolume is an autogenerated conversion function. +func Convert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + return autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in, out, s) +} + +func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. +func Convert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + return autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = core.PersistentVolumeClaimConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = v1.PersistentVolumeClaimConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + out.DataSource = (*core.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource)) + return nil +} + +// Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + out.DataSource = (*v1.TypedLocalObjectReference)(unsafe.Pointer(in.DataSource)) + return nil +} + +// Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_core_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in, out, s) +} + +func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_core_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. +func Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*core.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*core.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.ISCSI = (*core.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Cinder = (*core.CinderPersistentVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.FlexVolume = (*core.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*core.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*core.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*core.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*v1.GlusterfsPersistentVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*v1.FlexPersistentVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderPersistentVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*v1.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*core.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = core.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + out.NodeAffinity = (*core.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + return nil +} + +// Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + out.NodeAffinity = (*v1.VolumeNodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + return nil +} + +// Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAffinity_To_core_PodAffinity is an autogenerated conversion function. +func Convert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_core_PodAffinity(in, out, s) +} + +func autoConvert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. +func Convert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + return autoConvert_core_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm is an autogenerated conversion function. +func Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in, out, s) +} + +func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*metav1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. +func Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity is an autogenerated conversion function. +func Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in, out, s) +} + +func autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. +func Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + +func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_v1_PodAttachOptions_To_core_PodAttachOptions is an autogenerated conversion function. +func Convert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in, out, s) +} + +func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_core_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. +func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + return autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) +} + +func autoConvert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + out.Type = core.PodConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PodCondition_To_core_PodCondition is an autogenerated conversion function. +func Convert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + return autoConvert_v1_PodCondition_To_core_PodCondition(in, out, s) +} + +func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + out.Type = v1.PodConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PodCondition_To_v1_PodCondition is an autogenerated conversion function. +func Convert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + return autoConvert_core_PodCondition_To_v1_PodCondition(in, out, s) +} + +func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]core.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_PodDNSConfig_To_core_PodDNSConfig is an autogenerated conversion function. +func Convert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in, out, s) +} + +func autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]v1.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_PodDNSConfig_To_v1_PodDNSConfig is an autogenerated conversion function. +func Convert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + return autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in, out, s) +} + +func autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption is an autogenerated conversion function. +func Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in, out, s) +} + +func autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption is an autogenerated conversion function. +func Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in, out, s) +} + +func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_PodExecOptions_To_core_PodExecOptions is an autogenerated conversion function. +func Convert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + return autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in, out, s) +} + +func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. +func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + return autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + +func autoConvert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_core_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodList_To_core_PodList is an autogenerated conversion function. +func Convert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + return autoConvert_v1_PodList_To_core_PodList(in, out, s) +} + +func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Pod, len(*in)) + for i := range *in { + if err := Convert_core_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodList_To_v1_PodList is an autogenerated conversion function. +func Convert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + return autoConvert_core_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*metav1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_v1_PodLogOptions_To_core_PodLogOptions is an autogenerated conversion function. +func Convert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + return autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in, out, s) +} + +func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*metav1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_core_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. +func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + return autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + +func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions is an autogenerated conversion function. +func Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in, out, s) +} + +func autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. +func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + +func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_PodProxyOptions_To_core_PodProxyOptions is an autogenerated conversion function. +func Convert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + return autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in, out, s) +} + +func autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. +func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + return autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in *v1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error { + out.ConditionType = core.PodConditionType(in.ConditionType) + return nil +} + +// Convert_v1_PodReadinessGate_To_core_PodReadinessGate is an autogenerated conversion function. +func Convert_v1_PodReadinessGate_To_core_PodReadinessGate(in *v1.PodReadinessGate, out *core.PodReadinessGate, s conversion.Scope) error { + return autoConvert_v1_PodReadinessGate_To_core_PodReadinessGate(in, out, s) +} + +func autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *v1.PodReadinessGate, s conversion.Scope) error { + out.ConditionType = v1.PodConditionType(in.ConditionType) + return nil +} + +// Convert_core_PodReadinessGate_To_v1_PodReadinessGate is an autogenerated conversion function. +func Convert_core_PodReadinessGate_To_v1_PodReadinessGate(in *core.PodReadinessGate, out *v1.PodReadinessGate, s conversion.Scope) error { + return autoConvert_core_PodReadinessGate_To_v1_PodReadinessGate(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + out.Sysctls = *(*[]core.Sysctl)(unsafe.Pointer(&in.Sysctls)) + return nil +} + +func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + // INFO: in.ShareProcessNamespace opted out of conversion generation + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + out.Sysctls = *(*[]v1.Sysctl)(unsafe.Pointer(&in.Sysctls)) + return nil +} + +func autoConvert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + out.PodController = (*metav1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_v1_PodSignature_To_core_PodSignature is an autogenerated conversion function. +func Convert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + return autoConvert_v1_PodSignature_To_core_PodSignature(in, out, s) +} + +func autoConvert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + out.PodController = (*metav1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_core_PodSignature_To_v1_PodSignature is an autogenerated conversion function. +func Convert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + return autoConvert_core_PodSignature_To_v1_PodSignature(in, out, s) +} + +func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]core.Volume, len(*in)) + for i := range *in { + if err := Convert_v1_Volume_To_core_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = core.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = core.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + // INFO: in.ShareProcessNamespace opted out of conversion generation + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.PodSecurityContext) + if err := Convert_v1_PodSecurityContext_To_core_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*core.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]core.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + out.ReadinessGates = *(*[]core.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) + out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) + return nil +} + +func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + if err := Convert_core_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + if err := Convert_core_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + out.ReadinessGates = *(*[]v1.PodReadinessGate)(unsafe.Pointer(&in.ReadinessGates)) + out.RuntimeClassName = (*string)(unsafe.Pointer(in.RuntimeClassName)) + out.EnableServiceLinks = (*bool)(unsafe.Pointer(in.EnableServiceLinks)) + return nil +} + +func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + out.Phase = core.PodPhase(in.Phase) + out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.NominatedNodeName = in.NominatedNodeName + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime)) + out.InitContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = core.PodQOSClass(in.QOSClass) + return nil +} + +// Convert_v1_PodStatus_To_core_PodStatus is an autogenerated conversion function. +func Convert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + return autoConvert_v1_PodStatus_To_core_PodStatus(in, out, s) +} + +func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + out.Phase = v1.PodPhase(in.Phase) + out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.NominatedNodeName = in.NominatedNodeName + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = v1.PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + return nil +} + +// Convert_core_PodStatus_To_v1_PodStatus is an autogenerated conversion function. +func Convert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + return autoConvert_core_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodStatusResult_To_core_PodStatusResult is an autogenerated conversion function. +func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + return autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s) +} + +func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. +func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + return autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s) +} + +func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodTemplate_To_core_PodTemplate is an autogenerated conversion function. +func Convert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_core_PodTemplate(in, out, s) +} + +func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. +func Convert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + return autoConvert_core_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_core_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodTemplateList_To_core_PodTemplateList is an autogenerated conversion function. +func Convert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in, out, s) +} + +func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_core_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. +func Convert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + return autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource is an autogenerated conversion function. +func Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in, out, s) +} + +func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. +func Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + +func autoConvert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_v1_Preconditions_To_core_Preconditions is an autogenerated conversion function. +func Convert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_core_Preconditions(in, out, s) +} + +func autoConvert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_core_Preconditions_To_v1_Preconditions is an autogenerated conversion function. +func Convert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + return autoConvert_core_Preconditions_To_v1_Preconditions(in, out, s) +} + +func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_v1_PodSignature_To_core_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_core_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + if err := Convert_v1_Handler_To_core_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_v1_Probe_To_core_Probe is an autogenerated conversion function. +func Convert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + return autoConvert_v1_Probe_To_core_Probe(in, out, s) +} + +func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + if err := Convert_core_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_core_Probe_To_v1_Probe is an autogenerated conversion function. +func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + return autoConvert_core_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]core.VolumeProjection, len(*in)) + for i := range *in { + if err := Convert_v1_VolumeProjection_To_core_VolumeProjection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Sources = nil + } + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]v1.VolumeProjection, len(*in)) + for i := range *in { + if err := Convert_core_VolumeProjection_To_v1_VolumeProjection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Sources = nil + } + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in, out, s) +} + +func autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. +func Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) +} + +func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_RangeAllocation_To_core_RangeAllocation is an autogenerated conversion function. +func Convert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + return autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in, out, s) +} + +func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_core_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. +func Convert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + return autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + +func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ReplicationController_To_core_ReplicationController is an autogenerated conversion function. +func Convert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + return autoConvert_v1_ReplicationController_To_core_ReplicationController(in, out, s) +} + +func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. +func Convert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + return autoConvert_core_ReplicationController_To_v1_ReplicationController(in, out, s) +} + +func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = core.ReplicationControllerConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = v1.ReplicationControllerConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_core_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList is an autogenerated conversion function. +func Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in, out, s) +} + +func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_core_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. +func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + +func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { + if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]core.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector is an autogenerated conversion function. +func Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in, out, s) +} + +func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. +func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + +func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ResourceQuota_To_core_ResourceQuota is an autogenerated conversion function. +func Convert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + return autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in, out, s) +} + +func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. +func Convert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + return autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + +func autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList is an autogenerated conversion function. +func Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in, out, s) +} + +func autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. +func Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]core.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + out.ScopeSelector = (*core.ScopeSelector)(unsafe.Pointer(in.ScopeSelector)) + return nil +} + +// Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + out.ScopeSelector = (*v1.ScopeSelector)(unsafe.Pointer(in.ScopeSelector)) + return nil +} + +// Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*core.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_v1_ResourceRequirements_To_core_ResourceRequirements is an autogenerated conversion function. +func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in, out, s) +} + +func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_core_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. +func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + return autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) +} + +func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_v1_SELinuxOptions_To_core_SELinuxOptions is an autogenerated conversion function. +func Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + return autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in, out, s) +} + +func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_core_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. +func Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + return autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + +func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_v1_ScopeSelector_To_core_ScopeSelector(in *v1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error { + out.MatchExpressions = *(*[]core.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_v1_ScopeSelector_To_core_ScopeSelector is an autogenerated conversion function. +func Convert_v1_ScopeSelector_To_core_ScopeSelector(in *v1.ScopeSelector, out *core.ScopeSelector, s conversion.Scope) error { + return autoConvert_v1_ScopeSelector_To_core_ScopeSelector(in, out, s) +} + +func autoConvert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *v1.ScopeSelector, s conversion.Scope) error { + out.MatchExpressions = *(*[]v1.ScopedResourceSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_core_ScopeSelector_To_v1_ScopeSelector is an autogenerated conversion function. +func Convert_core_ScopeSelector_To_v1_ScopeSelector(in *core.ScopeSelector, out *v1.ScopeSelector, s conversion.Scope) error { + return autoConvert_core_ScopeSelector_To_v1_ScopeSelector(in, out, s) +} + +func autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *v1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error { + out.ScopeName = core.ResourceQuotaScope(in.ScopeName) + out.Operator = core.ScopeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement is an autogenerated conversion function. +func Convert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in *v1.ScopedResourceSelectorRequirement, out *core.ScopedResourceSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_ScopedResourceSelectorRequirement_To_core_ScopedResourceSelectorRequirement(in, out, s) +} + +func autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *v1.ScopedResourceSelectorRequirement, s conversion.Scope) error { + out.ScopeName = v1.ResourceQuotaScope(in.ScopeName) + out.Operator = v1.ScopeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement is an autogenerated conversion function. +func Convert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in *core.ScopedResourceSelectorRequirement, out *v1.ScopedResourceSelectorRequirement, s conversion.Scope) error { + return autoConvert_core_ScopedResourceSelectorRequirement_To_v1_ScopedResourceSelectorRequirement(in, out, s) +} + +func autoConvert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + // INFO: in.StringData opted out of conversion generation + out.Type = core.SecretType(in.Type) + return nil +} + +func autoConvert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + out.Type = v1.SecretType(in.Type) + return nil +} + +// Convert_core_Secret_To_v1_Secret is an autogenerated conversion function. +func Convert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + return autoConvert_core_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretEnvSource_To_core_SecretEnvSource is an autogenerated conversion function. +func Convert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in, out, s) +} + +func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. +func Convert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + return autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretKeySelector_To_core_SecretKeySelector is an autogenerated conversion function. +func Convert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + return autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in, out, s) +} + +func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. +func Convert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + return autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + +func autoConvert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_core_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_SecretList_To_core_SecretList is an autogenerated conversion function. +func Convert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + return autoConvert_v1_SecretList_To_core_SecretList(in, out, s) +} + +func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Secret, len(*in)) + for i := range *in { + if err := Convert_core_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_SecretList_To_v1_SecretList is an autogenerated conversion function. +func Convert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + return autoConvert_core_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretProjection_To_core_SecretProjection is an autogenerated conversion function. +func Convert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_core_SecretProjection(in, out, s) +} + +func autoConvert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. +func Convert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + return autoConvert_core_SecretProjection_To_v1_SecretProjection(in, out, s) +} + +func autoConvert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_v1_SecretReference_To_core_SecretReference is an autogenerated conversion function. +func Convert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + return autoConvert_v1_SecretReference_To_core_SecretReference(in, out, s) +} + +func autoConvert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_core_SecretReference_To_v1_SecretReference is an autogenerated conversion function. +func Convert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + return autoConvert_core_SecretReference_To_v1_SecretReference(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource is an autogenerated conversion function. +func Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in, out, s) +} + +func autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. +func Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*core.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + out.ProcMount = (*core.ProcMountType)(unsafe.Pointer(in.ProcMount)) + return nil +} + +// Convert_v1_SecurityContext_To_core_SecurityContext is an autogenerated conversion function. +func Convert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_core_SecurityContext(in, out, s) +} + +func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsGroup = (*int64)(unsafe.Pointer(in.RunAsGroup)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + out.ProcMount = (*v1.ProcMountType)(unsafe.Pointer(in.ProcMount)) + return nil +} + +func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_SerializedReference_To_core_SerializedReference is an autogenerated conversion function. +func Convert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + return autoConvert_v1_SerializedReference_To_core_SerializedReference(in, out, s) +} + +func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_core_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. +func Convert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + return autoConvert_core_SerializedReference_To_v1_SerializedReference(in, out, s) +} + +func autoConvert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ServiceSpec_To_core_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ServiceStatus_To_core_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Service_To_core_Service is an autogenerated conversion function. +func Convert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + return autoConvert_v1_Service_To_core_Service(in, out, s) +} + +func autoConvert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Service_To_v1_Service is an autogenerated conversion function. +func Convert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + return autoConvert_core_Service_To_v1_Service(in, out, s) +} + +func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_v1_ServiceAccount_To_core_ServiceAccount is an autogenerated conversion function. +func Convert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + return autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in, out, s) +} + +func autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_core_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. +func Convert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + return autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + +func autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ServiceAccountList_To_core_ServiceAccountList is an autogenerated conversion function. +func Convert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in, out, s) +} + +func autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. +func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + return autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + +func autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *v1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { + out.Audience = in.Audience + if err := metav1.Convert_Pointer_int64_To_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { + return err + } + out.Path = in.Path + return nil +} + +// Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection is an autogenerated conversion function. +func Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in *v1.ServiceAccountTokenProjection, out *core.ServiceAccountTokenProjection, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(in, out, s) +} + +func autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *v1.ServiceAccountTokenProjection, s conversion.Scope) error { + out.Audience = in.Audience + if err := metav1.Convert_int64_To_Pointer_int64(&in.ExpirationSeconds, &out.ExpirationSeconds, s); err != nil { + return err + } + out.Path = in.Path + return nil +} + +// Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection is an autogenerated conversion function. +func Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in *core.ServiceAccountTokenProjection, out *v1.ServiceAccountTokenProjection, s conversion.Scope) error { + return autoConvert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(in, out, s) +} + +func autoConvert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_core_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ServiceList_To_core_ServiceList is an autogenerated conversion function. +func Convert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + return autoConvert_v1_ServiceList_To_core_ServiceList(in, out, s) +} + +func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Service, len(*in)) + for i := range *in { + if err := Convert_core_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ServiceList_To_v1_ServiceList is an autogenerated conversion function. +func Convert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + return autoConvert_core_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = core.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_v1_ServicePort_To_core_ServicePort is an autogenerated conversion function. +func Convert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + return autoConvert_v1_ServicePort_To_core_ServicePort(in, out, s) +} + +func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = v1.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_core_ServicePort_To_v1_ServicePort is an autogenerated conversion function. +func Convert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + return autoConvert_core_ServicePort_To_v1_ServicePort(in, out, s) +} + +func autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions is an autogenerated conversion function. +func Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in, out, s) +} + +func autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. +func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) +} + +func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.Type = core.ServiceType(in.Type) + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.SessionAffinity = core.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalName = in.ExternalName + out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + return nil +} + +// Convert_v1_ServiceSpec_To_core_ServiceSpec is an autogenerated conversion function. +func Convert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + return autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in, out, s) +} + +func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + out.Type = v1.ServiceType(in.Type) + out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.ExternalName = in.ExternalName + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) + out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + return nil +} + +// Convert_core_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. +func Convert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + return autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in, out, s) +} + +func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + if err := Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceStatus_To_core_ServiceStatus is an autogenerated conversion function. +func Convert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + return autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in, out, s) +} + +func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + if err := Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_core_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. +func Convert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + return autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*core.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig is an autogenerated conversion function. +func Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in, out, s) +} + +func autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. +func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) +} + +func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_Sysctl_To_core_Sysctl is an autogenerated conversion function. +func Convert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_core_Sysctl(in, out, s) +} + +func autoConvert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_Sysctl_To_v1_Sysctl is an autogenerated conversion function. +func Convert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + return autoConvert_core_Sysctl_To_v1_Sysctl(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_v1_TCPSocketAction_To_core_TCPSocketAction is an autogenerated conversion function. +func Convert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + return autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in, out, s) +} + +func autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_core_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. +func Convert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + return autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TimeAdded = (*metav1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_v1_Taint_To_core_Taint is an autogenerated conversion function. +func Convert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_core_Taint(in, out, s) +} + +func autoConvert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TimeAdded = (*metav1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_core_Taint_To_v1_Taint is an autogenerated conversion function. +func Convert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + return autoConvert_core_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_v1_Toleration_To_core_Toleration is an autogenerated conversion function. +func Convert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_core_Toleration(in, out, s) +} + +func autoConvert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_core_Toleration_To_v1_Toleration is an autogenerated conversion function. +func Convert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + return autoConvert_core_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *v1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement is an autogenerated conversion function. +func Convert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in *v1.TopologySelectorLabelRequirement, out *core.TopologySelectorLabelRequirement, s conversion.Scope) error { + return autoConvert_v1_TopologySelectorLabelRequirement_To_core_TopologySelectorLabelRequirement(in, out, s) +} + +func autoConvert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *v1.TopologySelectorLabelRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement is an autogenerated conversion function. +func Convert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in *core.TopologySelectorLabelRequirement, out *v1.TopologySelectorLabelRequirement, s conversion.Scope) error { + return autoConvert_core_TopologySelectorLabelRequirement_To_v1_TopologySelectorLabelRequirement(in, out, s) +} + +func autoConvert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *v1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error { + out.MatchLabelExpressions = *(*[]core.TopologySelectorLabelRequirement)(unsafe.Pointer(&in.MatchLabelExpressions)) + return nil +} + +// Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm is an autogenerated conversion function. +func Convert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in *v1.TopologySelectorTerm, out *core.TopologySelectorTerm, s conversion.Scope) error { + return autoConvert_v1_TopologySelectorTerm_To_core_TopologySelectorTerm(in, out, s) +} + +func autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *v1.TopologySelectorTerm, s conversion.Scope) error { + out.MatchLabelExpressions = *(*[]v1.TopologySelectorLabelRequirement)(unsafe.Pointer(&in.MatchLabelExpressions)) + return nil +} + +// Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm is an autogenerated conversion function. +func Convert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in *core.TopologySelectorTerm, out *v1.TopologySelectorTerm, s conversion.Scope) error { + return autoConvert_core_TopologySelectorTerm_To_v1_TopologySelectorTerm(in, out, s) +} + +func autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *v1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error { + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +// Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference is an autogenerated conversion function. +func Convert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in *v1.TypedLocalObjectReference, out *core.TypedLocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_TypedLocalObjectReference_To_core_TypedLocalObjectReference(in, out, s) +} + +func autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *v1.TypedLocalObjectReference, s conversion.Scope) error { + out.APIGroup = (*string)(unsafe.Pointer(in.APIGroup)) + out.Kind = in.Kind + out.Name = in.Name + return nil +} + +// Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference is an autogenerated conversion function. +func Convert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in *core.TypedLocalObjectReference, out *v1.TypedLocalObjectReference, s conversion.Scope) error { + return autoConvert_core_TypedLocalObjectReference_To_v1_TypedLocalObjectReference(in, out, s) +} + +func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Volume_To_core_Volume is an autogenerated conversion function. +func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_core_Volume(in, out, s) +} + +func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_core_Volume_To_v1_Volume is an autogenerated conversion function. +func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + return autoConvert_core_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_VolumeDevice_To_core_VolumeDevice is an autogenerated conversion function. +func Convert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + return autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in, out, s) +} + +func autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_VolumeDevice_To_v1_VolumeDevice is an autogenerated conversion function. +func Convert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + return autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*core.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_v1_VolumeMount_To_core_VolumeMount is an autogenerated conversion function. +func Convert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + return autoConvert_v1_VolumeMount_To_core_VolumeMount(in, out, s) +} + +func autoConvert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_core_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. +func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s) +} + +func autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error { + out.Required = (*core.NodeSelector)(unsafe.Pointer(in.Required)) + return nil +} + +// Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity is an autogenerated conversion function. +func Convert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in *v1.VolumeNodeAffinity, out *core.VolumeNodeAffinity, s conversion.Scope) error { + return autoConvert_v1_VolumeNodeAffinity_To_core_VolumeNodeAffinity(in, out, s) +} + +func autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error { + out.Required = (*v1.NodeSelector)(unsafe.Pointer(in.Required)) + return nil +} + +// Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity is an autogenerated conversion function. +func Convert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in *core.VolumeNodeAffinity, out *v1.VolumeNodeAffinity, s conversion.Scope) error { + return autoConvert_core_VolumeNodeAffinity_To_v1_VolumeNodeAffinity(in, out, s) +} + +func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*core.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(core.ServiceAccountTokenProjection) + if err := Convert_v1_ServiceAccountTokenProjection_To_core_ServiceAccountTokenProjection(*in, *out, s); err != nil { + return err + } + } else { + out.ServiceAccountToken = nil + } + return nil +} + +// Convert_v1_VolumeProjection_To_core_VolumeProjection is an autogenerated conversion function. +func Convert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in, out, s) +} + +func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(v1.ServiceAccountTokenProjection) + if err := Convert_core_ServiceAccountTokenProjection_To_v1_ServiceAccountTokenProjection(*in, *out, s); err != nil { + return err + } + } else { + out.ServiceAccountToken = nil + } + return nil +} + +// Convert_core_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. +func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + +func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*core.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*core.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*core.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*core.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*core.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*core.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*core.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*core.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(core.ProjectedVolumeSource) + if err := Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Projected = nil + } + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_v1_VolumeSource_To_core_VolumeSource is an autogenerated conversion function. +func Convert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + return autoConvert_v1_VolumeSource_To_core_VolumeSource(in, out, s) +} + +func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(v1.ProjectedVolumeSource) + if err := Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(*in, *out, s); err != nil { + return err + } + } else { + out.Projected = nil + } + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_core_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. +func Convert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + return autoConvert_core_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go new file mode 100644 index 000000000..00e0b384a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go @@ -0,0 +1,646 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&v1.ConfigMap{}, func(obj interface{}) { SetObjectDefaults_ConfigMap(obj.(*v1.ConfigMap)) }) + scheme.AddTypeDefaultingFunc(&v1.ConfigMapList{}, func(obj interface{}) { SetObjectDefaults_ConfigMapList(obj.(*v1.ConfigMapList)) }) + scheme.AddTypeDefaultingFunc(&v1.Endpoints{}, func(obj interface{}) { SetObjectDefaults_Endpoints(obj.(*v1.Endpoints)) }) + scheme.AddTypeDefaultingFunc(&v1.EndpointsList{}, func(obj interface{}) { SetObjectDefaults_EndpointsList(obj.(*v1.EndpointsList)) }) + scheme.AddTypeDefaultingFunc(&v1.LimitRange{}, func(obj interface{}) { SetObjectDefaults_LimitRange(obj.(*v1.LimitRange)) }) + scheme.AddTypeDefaultingFunc(&v1.LimitRangeList{}, func(obj interface{}) { SetObjectDefaults_LimitRangeList(obj.(*v1.LimitRangeList)) }) + scheme.AddTypeDefaultingFunc(&v1.Namespace{}, func(obj interface{}) { SetObjectDefaults_Namespace(obj.(*v1.Namespace)) }) + scheme.AddTypeDefaultingFunc(&v1.NamespaceList{}, func(obj interface{}) { SetObjectDefaults_NamespaceList(obj.(*v1.NamespaceList)) }) + scheme.AddTypeDefaultingFunc(&v1.Node{}, func(obj interface{}) { SetObjectDefaults_Node(obj.(*v1.Node)) }) + scheme.AddTypeDefaultingFunc(&v1.NodeList{}, func(obj interface{}) { SetObjectDefaults_NodeList(obj.(*v1.NodeList)) }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolume{}, func(obj interface{}) { SetObjectDefaults_PersistentVolume(obj.(*v1.PersistentVolume)) }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaim{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeClaim(obj.(*v1.PersistentVolumeClaim)) }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeClaimList{}, func(obj interface{}) { + SetObjectDefaults_PersistentVolumeClaimList(obj.(*v1.PersistentVolumeClaimList)) + }) + scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) }) + scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) }) + scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) }) + scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) }) + scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) }) + scheme.AddTypeDefaultingFunc(&v1.ReplicationController{}, func(obj interface{}) { SetObjectDefaults_ReplicationController(obj.(*v1.ReplicationController)) }) + scheme.AddTypeDefaultingFunc(&v1.ReplicationControllerList{}, func(obj interface{}) { + SetObjectDefaults_ReplicationControllerList(obj.(*v1.ReplicationControllerList)) + }) + scheme.AddTypeDefaultingFunc(&v1.ResourceQuota{}, func(obj interface{}) { SetObjectDefaults_ResourceQuota(obj.(*v1.ResourceQuota)) }) + scheme.AddTypeDefaultingFunc(&v1.ResourceQuotaList{}, func(obj interface{}) { SetObjectDefaults_ResourceQuotaList(obj.(*v1.ResourceQuotaList)) }) + scheme.AddTypeDefaultingFunc(&v1.Secret{}, func(obj interface{}) { SetObjectDefaults_Secret(obj.(*v1.Secret)) }) + scheme.AddTypeDefaultingFunc(&v1.SecretList{}, func(obj interface{}) { SetObjectDefaults_SecretList(obj.(*v1.SecretList)) }) + scheme.AddTypeDefaultingFunc(&v1.Service{}, func(obj interface{}) { SetObjectDefaults_Service(obj.(*v1.Service)) }) + scheme.AddTypeDefaultingFunc(&v1.ServiceList{}, func(obj interface{}) { SetObjectDefaults_ServiceList(obj.(*v1.ServiceList)) }) + return nil +} + +func SetObjectDefaults_ConfigMap(in *v1.ConfigMap) { + SetDefaults_ConfigMap(in) +} + +func SetObjectDefaults_ConfigMapList(in *v1.ConfigMapList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ConfigMap(a) + } +} + +func SetObjectDefaults_Endpoints(in *v1.Endpoints) { + SetDefaults_Endpoints(in) +} + +func SetObjectDefaults_EndpointsList(in *v1.EndpointsList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Endpoints(a) + } +} + +func SetObjectDefaults_LimitRange(in *v1.LimitRange) { + for i := range in.Spec.Limits { + a := &in.Spec.Limits[i] + SetDefaults_LimitRangeItem(a) + SetDefaults_ResourceList(&a.Max) + SetDefaults_ResourceList(&a.Min) + SetDefaults_ResourceList(&a.Default) + SetDefaults_ResourceList(&a.DefaultRequest) + SetDefaults_ResourceList(&a.MaxLimitRequestRatio) + } +} + +func SetObjectDefaults_LimitRangeList(in *v1.LimitRangeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_LimitRange(a) + } +} + +func SetObjectDefaults_Namespace(in *v1.Namespace) { + SetDefaults_NamespaceStatus(&in.Status) +} + +func SetObjectDefaults_NamespaceList(in *v1.NamespaceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Namespace(a) + } +} + +func SetObjectDefaults_Node(in *v1.Node) { + SetDefaults_NodeStatus(&in.Status) + SetDefaults_ResourceList(&in.Status.Capacity) + SetDefaults_ResourceList(&in.Status.Allocatable) +} + +func SetObjectDefaults_NodeList(in *v1.NodeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Node(a) + } +} + +func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { + SetDefaults_PersistentVolume(in) + SetDefaults_ResourceList(&in.Spec.Capacity) + if in.Spec.PersistentVolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(in.Spec.PersistentVolumeSource.HostPath) + } + if in.Spec.PersistentVolumeSource.RBD != nil { + SetDefaults_RBDPersistentVolumeSource(in.Spec.PersistentVolumeSource.RBD) + } + if in.Spec.PersistentVolumeSource.ISCSI != nil { + SetDefaults_ISCSIPersistentVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) + } + if in.Spec.PersistentVolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) + } + if in.Spec.PersistentVolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOPersistentVolumeSource(in.Spec.PersistentVolumeSource.ScaleIO) + } +} + +func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) { + SetDefaults_PersistentVolumeClaim(in) + SetDefaults_ResourceList(&in.Spec.Resources.Limits) + SetDefaults_ResourceList(&in.Spec.Resources.Requests) + SetDefaults_ResourceList(&in.Status.Capacity) +} + +func SetObjectDefaults_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolumeClaim(a) + } +} + +func SetObjectDefaults_PersistentVolumeList(in *v1.PersistentVolumeList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PersistentVolume(a) + } +} + +func SetObjectDefaults_Pod(in *v1.Pod) { + SetDefaults_Pod(in) + SetDefaults_PodSpec(&in.Spec) + for i := range in.Spec.Volumes { + a := &in.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + if b.ServiceAccountToken != nil { + SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.InitContainers { + a := &in.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Containers { + a := &in.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodList(in *v1.PodList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Pod(a) + } +} + +func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) { + SetDefaults_PodSpec(&in.Template.Spec) + for i := range in.Template.Spec.Volumes { + a := &in.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + if b.ServiceAccountToken != nil { + SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Template.Spec.InitContainers { + a := &in.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Template.Spec.Containers { + a := &in.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } +} + +func SetObjectDefaults_PodTemplateList(in *v1.PodTemplateList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_PodTemplate(a) + } +} + +func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) { + SetDefaults_ReplicationController(in) + if in.Spec.Template != nil { + SetDefaults_PodSpec(&in.Spec.Template.Spec) + for i := range in.Spec.Template.Spec.Volumes { + a := &in.Spec.Template.Spec.Volumes[i] + SetDefaults_Volume(a) + if a.VolumeSource.HostPath != nil { + SetDefaults_HostPathVolumeSource(a.VolumeSource.HostPath) + } + if a.VolumeSource.Secret != nil { + SetDefaults_SecretVolumeSource(a.VolumeSource.Secret) + } + if a.VolumeSource.ISCSI != nil { + SetDefaults_ISCSIVolumeSource(a.VolumeSource.ISCSI) + } + if a.VolumeSource.RBD != nil { + SetDefaults_RBDVolumeSource(a.VolumeSource.RBD) + } + if a.VolumeSource.DownwardAPI != nil { + SetDefaults_DownwardAPIVolumeSource(a.VolumeSource.DownwardAPI) + for j := range a.VolumeSource.DownwardAPI.Items { + b := &a.VolumeSource.DownwardAPI.Items[j] + if b.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.FieldRef) + } + } + } + if a.VolumeSource.ConfigMap != nil { + SetDefaults_ConfigMapVolumeSource(a.VolumeSource.ConfigMap) + } + if a.VolumeSource.AzureDisk != nil { + SetDefaults_AzureDiskVolumeSource(a.VolumeSource.AzureDisk) + } + if a.VolumeSource.Projected != nil { + SetDefaults_ProjectedVolumeSource(a.VolumeSource.Projected) + for j := range a.VolumeSource.Projected.Sources { + b := &a.VolumeSource.Projected.Sources[j] + if b.DownwardAPI != nil { + for k := range b.DownwardAPI.Items { + c := &b.DownwardAPI.Items[k] + if c.FieldRef != nil { + SetDefaults_ObjectFieldSelector(c.FieldRef) + } + } + } + if b.ServiceAccountToken != nil { + SetDefaults_ServiceAccountTokenProjection(b.ServiceAccountToken) + } + } + } + if a.VolumeSource.ScaleIO != nil { + SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO) + } + } + for i := range in.Spec.Template.Spec.InitContainers { + a := &in.Spec.Template.Spec.InitContainers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + for i := range in.Spec.Template.Spec.Containers { + a := &in.Spec.Template.Spec.Containers[i] + SetDefaults_Container(a) + for j := range a.Ports { + b := &a.Ports[j] + SetDefaults_ContainerPort(b) + } + for j := range a.Env { + b := &a.Env[j] + if b.ValueFrom != nil { + if b.ValueFrom.FieldRef != nil { + SetDefaults_ObjectFieldSelector(b.ValueFrom.FieldRef) + } + } + } + SetDefaults_ResourceList(&a.Resources.Limits) + SetDefaults_ResourceList(&a.Resources.Requests) + if a.LivenessProbe != nil { + SetDefaults_Probe(a.LivenessProbe) + if a.LivenessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.LivenessProbe.Handler.HTTPGet) + } + } + if a.ReadinessProbe != nil { + SetDefaults_Probe(a.ReadinessProbe) + if a.ReadinessProbe.Handler.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.ReadinessProbe.Handler.HTTPGet) + } + } + if a.Lifecycle != nil { + if a.Lifecycle.PostStart != nil { + if a.Lifecycle.PostStart.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PostStart.HTTPGet) + } + } + if a.Lifecycle.PreStop != nil { + if a.Lifecycle.PreStop.HTTPGet != nil { + SetDefaults_HTTPGetAction(a.Lifecycle.PreStop.HTTPGet) + } + } + } + } + } +} + +func SetObjectDefaults_ReplicationControllerList(in *v1.ReplicationControllerList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ReplicationController(a) + } +} + +func SetObjectDefaults_ResourceQuota(in *v1.ResourceQuota) { + SetDefaults_ResourceList(&in.Spec.Hard) + SetDefaults_ResourceList(&in.Status.Hard) + SetDefaults_ResourceList(&in.Status.Used) +} + +func SetObjectDefaults_ResourceQuotaList(in *v1.ResourceQuotaList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_ResourceQuota(a) + } +} + +func SetObjectDefaults_Secret(in *v1.Secret) { + SetDefaults_Secret(in) +} + +func SetObjectDefaults_SecretList(in *v1.SecretList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Secret(a) + } +} + +func SetObjectDefaults_Service(in *v1.Service) { + SetDefaults_Service(in) +} + +func SetObjectDefaults_ServiceList(in *v1.ServiceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_Service(a) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go new file mode 100644 index 000000000..0c1cfaab5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation has functions for validating the correctness of api +// objects and explaining what is wrong with them when they aren't valid. +package validation // import "k8s.io/kubernetes/pkg/apis/core/validation" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go new file mode 100644 index 000000000..8a860aba9 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go @@ -0,0 +1,94 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + ReportingInstanceLengthLimit = 128 + ActionLengthLimit = 128 + ReasonLengthLimit = 128 + NoteLengthLimit = 1024 +) + +// ValidateEvent makes sure that the event makes sense. +func ValidateEvent(event *core.Event) field.ErrorList { + allErrs := field.ErrorList{} + // Because go + zeroTime := time.Time{} + + // "New" Events need to have EventTime set, so it's validating old object. + if event.EventTime.Time == zeroTime { + // Make sure event.Namespace and the involvedInvolvedObject.Namespace agree + if len(event.InvolvedObject.Namespace) == 0 { + // event.Namespace must also be empty (or "default", for compatibility with old clients) + if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } else { + // event namespace must match + if event.Namespace != event.InvolvedObject.Namespace { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } + + } else { + if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceSystem { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + if len(event.ReportingController) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingController"), "")) + } + for _, msg := range validation.IsQualifiedName(event.ReportingController) { + allErrs = append(allErrs, field.Invalid(field.NewPath("reportingController"), event.ReportingController, msg)) + } + if len(event.ReportingInstance) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingInstance"), "")) + } + if len(event.ReportingInstance) > ReportingInstanceLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("repotingIntance"), "", fmt.Sprintf("can have at most %v characters", ReportingInstanceLengthLimit))) + } + if len(event.Action) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("action"), "")) + } + if len(event.Action) > ActionLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("action"), "", fmt.Sprintf("can have at most %v characters", ActionLengthLimit))) + } + if len(event.Reason) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reason"), "")) + } + if len(event.Reason) > ReasonLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("reason"), "", fmt.Sprintf("can have at most %v characters", ReasonLengthLimit))) + } + if len(event.Message) > NoteLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("message"), "", fmt.Sprintf("can have at most %v characters", NoteLengthLimit))) + } + } + + for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { + allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) + } + return allErrs +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go new file mode 100644 index 000000000..6f0302c37 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -0,0 +1,5378 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "encoding/json" + "fmt" + "math" + "net" + "path" + "path/filepath" + "reflect" + "regexp" + "strings" + + "k8s.io/klog" + + "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/resource" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + utilfeature "k8s.io/apiserver/pkg/util/feature" + apiservice "k8s.io/kubernetes/pkg/api/service" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + podshelper "k8s.io/kubernetes/pkg/apis/core/pods" + corev1 "k8s.io/kubernetes/pkg/apis/core/v1" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + "k8s.io/kubernetes/pkg/capabilities" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/fieldpath" + "k8s.io/kubernetes/pkg/master/ports" + "k8s.io/kubernetes/pkg/security/apparmor" +) + +const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg +const isInvalidQuotaResource string = `must be a standard resource for quota` +const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg +const isNotIntegerErrorMsg string = `must be an integer` +const isNotPositiveErrorMsg string = `must be greater than zero` +const csiDriverNameRexpErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const csiDriverNameRexpFmt string = `^[a-zA-Z0-9][-a-zA-Z0-9_.]{0,61}[a-zA-Z-0-9]$` + +var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255) +var fileModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" + +// BannedOwners is a black list of object that are not allowed to be owners. +var BannedOwners = apimachineryvalidation.BannedOwners + +var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.]+)(:[^,;*&$|\s]+)$`) +var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`) +var iscsiInitiatorNaaRegex = regexp.MustCompile(`^naa.[[:alnum:]]{32}$`) + +var csiDriverNameRexp = regexp.MustCompile(csiDriverNameRexpFmt) + +// ValidateHasLabel requires that metav1.ObjectMeta has a Label with key and expectedValue +func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expectedValue string) field.ErrorList { + allErrs := field.ErrorList{} + actualValue, found := meta.Labels[key] + if !found { + allErrs = append(allErrs, field.Required(fldPath.Child("labels").Key(key), + fmt.Sprintf("must be '%s'", expectedValue))) + return allErrs + } + if actualValue != expectedValue { + allErrs = append(allErrs, field.Invalid(fldPath.Child("labels").Key(key), meta.Labels, + fmt.Sprintf("must be '%s'", expectedValue))) + } + return allErrs +} + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateAnnotations(annotations, fldPath) +} + +func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsDNS1123Label(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + return allErrs +} + +// ValidateDNS1123Subdomain validates that a name is a proper DNS subdomain. +func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsDNS1123Subdomain(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + return allErrs +} + +func ValidatePodSpecificAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if value, isMirror := annotations[core.MirrorPodAnnotationKey]; isMirror { + if len(spec.NodeName) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set")) + } + } + + if annotations[core.TolerationsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) + } + + allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) + allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...) + + return allErrs +} + +// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data +func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, core.TolerationsAnnotationKey, err.Error())) + return allErrs + } + + if len(tolerations) > 0 { + allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(core.TolerationsAnnotationKey))...) + } + + return allErrs +} + +func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *core.Pod, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + newAnnotations := newPod.Annotations + oldAnnotations := oldPod.Annotations + for k, oldVal := range oldAnnotations { + if newVal, exists := newAnnotations[k]; exists && newVal == oldVal { + continue // No change. + } + if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations")) + } + if k == core.MirrorPodAnnotationKey { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation")) + } + } + // Check for additions + for k := range newAnnotations { + if _, ok := oldAnnotations[k]; ok { + continue // No change. + } + if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations")) + } + if k == core.MirrorPodAnnotationKey { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation")) + } + } + allErrs = append(allErrs, ValidatePodSpecificAnnotations(newAnnotations, &newPod.Spec, fldPath)...) + return allErrs +} + +func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + return allErrs +} + +// ValidateNameFunc validates that the provided name is valid for a given resource type. +// Not all resources have the same validation rules for names. Prefix is true +// if the name will have a value appended to it. If the name is not valid, +// this returns a list of descriptions of individual characteristics of the +// value that were not valid. Otherwise this returns an empty list or nil. +type ValidateNameFunc apimachineryvalidation.ValidateNameFunc + +// ValidatePodName can be used to check whether the given pod name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidatePodName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateReplicationControllerName can be used to check whether the given replication +// controller name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateReplicationControllerName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateServiceName can be used to check whether the given service name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceName = apimachineryvalidation.NameIsDNS1035Label + +// ValidateNodeName can be used to check whether the given node name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNodeName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateNamespaceName can be used to check whether the given namespace name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateNamespaceName = apimachineryvalidation.ValidateNamespaceName + +// ValidateLimitRangeName can be used to check whether the given limit range name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateLimitRangeName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateResourceQuotaName can be used to check whether the given +// resource quota name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateResourceQuotaName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateSecretName can be used to check whether the given secret name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateSecretName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateServiceAccountName can be used to check whether the given service account name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountName + +// ValidateEndpointsName can be used to check whether the given endpoints name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateEndpointsName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateClusterName can be used to check whether the given cluster name is valid. +var ValidateClusterName = apimachineryvalidation.ValidateClusterName + +// ValidateClassName can be used to check whether the given class name is valid. +// It is defined here to avoid import cycle between pkg/apis/storage/validation +// (where it should be) and this file. +var ValidateClassName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidatePiorityClassName can be used to check whether the given priority +// class name is valid. +var ValidatePriorityClassName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateRuntimeClassName can be used to check whether the given RuntimeClass name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +func ValidateRuntimeClassName(name string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for _, msg := range apimachineryvalidation.NameIsDNSSubdomain(name, false) { + allErrs = append(allErrs, field.Invalid(fldPath, name, msg)) + } + return allErrs +} + +// Validates that given value is not negative. +func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateNonnegativeField(value, fldPath) +} + +// Validates that a Quantity is not negative +func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNegativeErrorMsg)) + } + return allErrs +} + +// Validates that a Quantity is positive +func ValidatePositiveQuantityValue(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNotPositiveErrorMsg)) + } + return allErrs +} + +func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { + return apimachineryvalidation.ValidateImmutableField(newVal, oldVal, fldPath) +} + +func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if oldVal != newVal { + allErrs = append(allErrs, field.Invalid(fldPath.Child("annotations", annotation), newVal, fieldImmutableErrorMsg)) + } + return allErrs +} + +// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already +// been performed. +// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. +// TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. +func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) + // run additional checks for the finalizer name + for i := range meta.Finalizers { + allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) + } + return allErrs +} + +// ValidateObjectMetaUpdate validates an object's metadata when updated +func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) + // run additional checks for the finalizer name + for i := range newMeta.Finalizers { + allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) + } + + return allErrs +} + +func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) { + allErrs := field.ErrorList{} + + allNames := sets.String{} + vols := make(map[string]core.VolumeSource) + for i, vol := range volumes { + idxPath := fldPath.Index(i) + namePath := idxPath.Child("name") + el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name) + if len(vol.Name) == 0 { + el = append(el, field.Required(namePath, "")) + } else { + el = append(el, ValidateDNS1123Label(vol.Name, namePath)...) + } + if allNames.Has(vol.Name) { + el = append(el, field.Duplicate(namePath, vol.Name)) + } + if len(el) == 0 { + allNames.Insert(vol.Name) + vols[vol.Name] = vol.VolumeSource + } else { + allErrs = append(allErrs, el...) + } + + } + return vols, allErrs +} + +func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool { + if _, ok := volumes[name]; ok { + return true + } + return false +} + +func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) { + if source, ok := volumes[name]; ok { + if source.PersistentVolumeClaim != nil { + return true, true + } + return true, false + } + return false, false +} + +func mountNameAlreadyExists(name string, devices map[string]string) bool { + if _, ok := devices[name]; ok { + return true + } + return false +} + +func mountPathAlreadyExists(mountPath string, devices map[string]string) bool { + for _, devPath := range devices { + if mountPath == devPath { + return true + } + } + + return false +} + +func deviceNameAlreadyExists(name string, mounts map[string]string) bool { + if _, ok := mounts[name]; ok { + return true + } + return false +} + +func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool { + for _, mountPath := range mounts { + if mountPath == devicePath { + return true + } + } + + return false +} + +func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string) field.ErrorList { + numVolumes := 0 + allErrs := field.ErrorList{} + if source.EmptyDir != nil { + numVolumes++ + if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field disabled by feature-gate for EmptyDir volumes")) + } + } else { + if source.EmptyDir.SizeLimit != nil && source.EmptyDir.SizeLimit.Cmp(resource.Quantity{}) < 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity")) + } + } + if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) && source.EmptyDir.Medium == core.StorageMediumHugePages { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("medium"), "HugePages medium is disabled by feature-gate for EmptyDir volumes")) + } + } + if source.HostPath != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("hostPath"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateHostPathVolumeSource(source.HostPath, fldPath.Child("hostPath"))...) + } + } + if source.GitRepo != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("gitRepo"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGitRepoVolumeSource(source.GitRepo, fldPath.Child("gitRepo"))...) + } + } + if source.GCEPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(source.GCEPersistentDisk, fldPath.Child("persistentDisk"))...) + } + } + if source.AWSElasticBlockStore != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(source.AWSElasticBlockStore, fldPath.Child("awsElasticBlockStore"))...) + } + } + if source.Secret != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("secret"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateSecretVolumeSource(source.Secret, fldPath.Child("secret"))...) + } + } + if source.NFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("nfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateNFSVolumeSource(source.NFS, fldPath.Child("nfs"))...) + } + } + if source.ISCSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("iscsi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateISCSIVolumeSource(source.ISCSI, fldPath.Child("iscsi"))...) + } + if source.ISCSI.InitiatorName != nil && len(volName+":"+source.ISCSI.TargetPortal) > 64 { + tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, tooLongErr)) + } + } + if source.Glusterfs != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("glusterfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGlusterfsVolumeSource(source.Glusterfs, fldPath.Child("glusterfs"))...) + } + } + if source.Flocker != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("flocker"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlockerVolumeSource(source.Flocker, fldPath.Child("flocker"))...) + } + } + if source.PersistentVolumeClaim != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("persistentVolumeClaim"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePersistentClaimVolumeSource(source.PersistentVolumeClaim, fldPath.Child("persistentVolumeClaim"))...) + } + } + if source.RBD != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("rbd"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateRBDVolumeSource(source.RBD, fldPath.Child("rbd"))...) + } + } + if source.Cinder != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("cinder"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCinderVolumeSource(source.Cinder, fldPath.Child("cinder"))...) + } + } + if source.CephFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("cephFS"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCephFSVolumeSource(source.CephFS, fldPath.Child("cephfs"))...) + } + } + if source.Quobyte != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("quobyte"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateQuobyteVolumeSource(source.Quobyte, fldPath.Child("quobyte"))...) + } + } + if source.DownwardAPI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("downwarAPI"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateDownwardAPIVolumeSource(source.DownwardAPI, fldPath.Child("downwardAPI"))...) + } + } + if source.FC != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("fc"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFCVolumeSource(source.FC, fldPath.Child("fc"))...) + } + } + if source.FlexVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("flexVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlexVolumeSource(source.FlexVolume, fldPath.Child("flexVolume"))...) + } + } + if source.ConfigMap != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("configMap"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...) + } + } + + if source.AzureFile != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...) + } + } + + if source.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(source.VsphereVolume, fldPath.Child("vsphereVolume"))...) + } + } + if source.PhotonPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(source.PhotonPersistentDisk, fldPath.Child("photonPersistentDisk"))...) + } + } + if source.PortworxVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("portworxVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePortworxVolumeSource(source.PortworxVolume, fldPath.Child("portworxVolume"))...) + } + } + if source.AzureDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...) + } + } + if source.StorageOS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("storageos"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateStorageOSVolumeSource(source.StorageOS, fldPath.Child("storageos"))...) + } + } + if source.Projected != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("projected"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateProjectedVolumeSource(source.Projected, fldPath.Child("projected"))...) + } + } + if source.ScaleIO != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("scaleIO"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateScaleIOVolumeSource(source.ScaleIO, fldPath.Child("scaleIO"))...) + } + } + + if numVolumes == 0 { + allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type")) + } + + return allErrs +} + +func validateHostPathVolumeSource(hostPath *core.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(hostPath.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + return allErrs + } + + allErrs = append(allErrs, validatePathNoBacksteps(hostPath.Path, fldPath.Child("path"))...) + allErrs = append(allErrs, validateHostPathType(hostPath.Type, fldPath.Child("type"))...) + return allErrs +} + +func validateGitRepoVolumeSource(gitRepo *core.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(gitRepo.Repository) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) + } + + pathErrs := validateLocalDescendingPath(gitRepo.Directory, fldPath.Child("directory")) + allErrs = append(allErrs, pathErrs...) + return allErrs +} + +func validateISCSIVolumeSource(iscsi *core.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(iscsi.TargetPortal) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) + } + if len(iscsi.IQN) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) + } else { + if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format starting with iqn, eui, or naa")) + } else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } + } + if iscsi.Lun < 0 || iscsi.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) + } + if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) + } + if iscsi.InitiatorName != nil { + initiator := *iscsi.InitiatorName + if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format starting with iqn, eui, or naa")) + } + if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + } + return allErrs +} + +func validateISCSIPersistentVolumeSource(iscsi *core.ISCSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(iscsi.TargetPortal) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) + } + if len(iscsi.IQN) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) + } else { + if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } + } + if iscsi.Lun < 0 || iscsi.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) + } + if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) + } + if iscsi.SecretRef != nil { + if len(iscsi.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + } + if iscsi.InitiatorName != nil { + initiator := *iscsi.InitiatorName + if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + } + return allErrs +} + +func validateFCVolumeSource(fc *core.FCVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fc.TargetWWNs) < 1 && len(fc.WWIDs) < 1 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "must specify either targetWWNs or wwids, but not both")) + } + + if len(fc.TargetWWNs) != 0 && len(fc.WWIDs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("targetWWNs"), fc.TargetWWNs, "targetWWNs and wwids can not be specified simultaneously")) + } + + if len(fc.TargetWWNs) != 0 { + if fc.Lun == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("lun"), "lun is required if targetWWNs is specified")) + } else { + if *fc.Lun < 0 || *fc.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), fc.Lun, validation.InclusiveRangeError(0, 255))) + } + } + } + return allErrs +} + +func validateGCEPersistentDiskVolumeSource(pd *core.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(pd.PDName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) + } + if pd.Partition < 0 || pd.Partition > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), pd.Partition, pdPartitionErrorMsg)) + } + return allErrs +} + +func validateAWSElasticBlockStoreVolumeSource(PD *core.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(PD.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + if PD.Partition < 0 || PD.Partition > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("partition"), PD.Partition, pdPartitionErrorMsg)) + } + return allErrs +} + +func validateSecretVolumeSource(secretSource *core.SecretVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(secretSource.SecretName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + + secretMode := secretSource.DefaultMode + if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, fileModeErrorMsg)) + } + + itemsPath := fldPath.Child("items") + for i, kp := range secretSource.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + } + return allErrs +} + +func validateConfigMapVolumeSource(configMapSource *core.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(configMapSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } + + configMapMode := configMapSource.DefaultMode + if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, fileModeErrorMsg)) + } + + itemsPath := fldPath.Child("items") + for i, kp := range configMapSource.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + } + return allErrs +} + +func validateKeyToPath(kp *core.KeyToPath, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(kp.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } + if len(kp.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...) + if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, fileModeErrorMsg)) + } + + return allErrs +} + +func validatePersistentClaimVolumeSource(claim *core.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(claim.ClaimName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) + } + return allErrs +} + +func validateNFSVolumeSource(nfs *core.NFSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(nfs.Server) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) + } + if len(nfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + if !path.IsAbs(nfs.Path) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("path"), nfs.Path, "must be an absolute path")) + } + return allErrs +} + +func validateQuobyteVolumeSource(quobyte *core.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(quobyte.Registry) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas")) + } else { + for _, hostPortPair := range strings.Split(quobyte.Registry, ",") { + if _, _, err := net.SplitHostPort(hostPortPair); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("registry"), quobyte.Registry, "must be a host:port pair or multiple pairs separated by commas")) + } + } + } + + if len(quobyte.Volume) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volume"), "")) + } + return allErrs +} + +func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(glusterfs.EndpointsName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) + } + if len(glusterfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + return allErrs +} +func validateGlusterfsPersistentVolumeSource(glusterfs *core.GlusterfsPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(glusterfs.EndpointsName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) + } + if len(glusterfs.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + if glusterfs.EndpointsNamespace != nil { + endpointNs := glusterfs.EndpointsNamespace + if *endpointNs == "" { + allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, "if the endpointnamespace is set, it must be a valid namespace name")) + } else { + for _, msg := range ValidateNamespaceName(*endpointNs, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("endpointsNamespace"), *endpointNs, msg)) + } + } + } + return allErrs +} + +func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 { + //TODO: consider adding a RequiredOneOf() error for this and similar cases + allErrs = append(allErrs, field.Required(fldPath, "one of datasetName and datasetUUID is required")) + } + if len(flocker.DatasetName) != 0 && len(flocker.DatasetUUID) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "datasetName and datasetUUID can not be specified simultaneously")) + } + if strings.Contains(flocker.DatasetName, "/") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("datasetName"), flocker.DatasetName, "must not contain '/'")) + } + return allErrs +} + +var validVolumeDownwardAPIFieldPathExpressions = sets.NewString( + "metadata.name", + "metadata.namespace", + "metadata.labels", + "metadata.annotations", + "metadata.uid") + +func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(file.Path) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) + if file.FieldRef != nil { + allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validVolumeDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + if file.ResourceFieldRef != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) + } + } else if file.ResourceFieldRef != nil { + allErrs = append(allErrs, validateContainerResourceFieldSelector(file.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), true)...) + } else { + allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) + } + if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, fileModeErrorMsg)) + } + + return allErrs +} + +func validateDownwardAPIVolumeSource(downwardAPIVolume *core.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + downwardAPIMode := downwardAPIVolume.DefaultMode + if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, fileModeErrorMsg)) + } + + for _, file := range downwardAPIVolume.Items { + allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, fldPath)...) + } + return allErrs +} + +func validateProjectionSources(projection *core.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allPaths := sets.String{} + + for i, source := range projection.Sources { + numSources := 0 + srcPath := fldPath.Child("sources").Index(i) + if projPath := srcPath.Child("secret"); source.Secret != nil { + numSources++ + if len(source.Secret.Name) == 0 { + allErrs = append(allErrs, field.Required(projPath.Child("name"), "")) + } + itemsPath := projPath.Child("items") + for i, kp := range source.Secret.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + if len(kp.Path) > 0 { + curPath := kp.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, source.Secret.Name, "conflicting duplicate paths")) + } + } + } + } + if projPath := srcPath.Child("configMap"); source.ConfigMap != nil { + numSources++ + if len(source.ConfigMap.Name) == 0 { + allErrs = append(allErrs, field.Required(projPath.Child("name"), "")) + } + itemsPath := projPath.Child("items") + for i, kp := range source.ConfigMap.Items { + itemPath := itemsPath.Index(i) + allErrs = append(allErrs, validateKeyToPath(&kp, itemPath)...) + if len(kp.Path) > 0 { + curPath := kp.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, source.ConfigMap.Name, "conflicting duplicate paths")) + } + } + } + } + if projPath := srcPath.Child("downwardAPI"); source.DownwardAPI != nil { + numSources++ + for _, file := range source.DownwardAPI.Items { + allErrs = append(allErrs, validateDownwardAPIVolumeFile(&file, projPath)...) + if len(file.Path) > 0 { + curPath := file.Path + if !allPaths.Has(curPath) { + allPaths.Insert(curPath) + } else { + allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths")) + } + } + } + } + if projPath := srcPath.Child("serviceAccountToken"); source.ServiceAccountToken != nil { + numSources++ + if !utilfeature.DefaultFeatureGate.Enabled(features.TokenRequestProjection) { + allErrs = append(allErrs, field.Forbidden(projPath, "TokenRequestProjection feature is not enabled")) + } + if source.ServiceAccountToken.ExpirationSeconds < 10*60 { + allErrs = append(allErrs, field.Invalid(projPath.Child("expirationSeconds"), source.ServiceAccountToken.ExpirationSeconds, "may not specify a duration less than 10 minutes")) + } + if source.ServiceAccountToken.ExpirationSeconds > 1<<32 { + allErrs = append(allErrs, field.Invalid(projPath.Child("expirationSeconds"), source.ServiceAccountToken.ExpirationSeconds, "may not specify a duration larger than 2^32 seconds")) + } + if source.ServiceAccountToken.Path == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + } + } + if numSources > 1 { + allErrs = append(allErrs, field.Forbidden(srcPath, "may not specify more than 1 volume type")) + } + } + return allErrs +} + +func validateProjectedVolumeSource(projection *core.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + projectionMode := projection.DefaultMode + if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, fileModeErrorMsg)) + } + + allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath)...) + return allErrs +} + +var supportedHostPathTypes = sets.NewString( + string(core.HostPathUnset), + string(core.HostPathDirectoryOrCreate), + string(core.HostPathDirectory), + string(core.HostPathFileOrCreate), + string(core.HostPathFile), + string(core.HostPathSocket), + string(core.HostPathCharDev), + string(core.HostPathBlockDev)) + +func validateHostPathType(hostPathType *core.HostPathType, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if hostPathType != nil && !supportedHostPathTypes.Has(string(*hostPathType)) { + allErrs = append(allErrs, field.NotSupported(fldPath, hostPathType, supportedHostPathTypes.List())) + } + + return allErrs +} + +// This validate will make sure targetPath: +// 1. is not abs path +// 2. does not have any element which is ".." +func validateLocalDescendingPath(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if path.IsAbs(targetPath) { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must be a relative path")) + } + + allErrs = append(allErrs, validatePathNoBacksteps(targetPath, fldPath)...) + + return allErrs +} + +// validatePathNoBacksteps makes sure the targetPath does not have any `..` path elements when split +// +// This assumes the OS of the apiserver and the nodes are the same. The same check should be done +// on the node to ensure there are no backsteps. +func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + parts := strings.Split(filepath.ToSlash(targetPath), "/") + for _, item := range parts { + if item == ".." { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not contain '..'")) + break // even for `../../..`, one error is sufficient to make the point + } + } + return allErrs +} + +// validateMountPropagation verifies that MountPropagation field is valid and +// allowed for given container. +func validateMountPropagation(mountPropagation *core.MountPropagationMode, container *core.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if mountPropagation == nil { + return allErrs + } + + supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer), string(core.MountPropagationNone)) + if !supportedMountPropagations.Has(string(*mountPropagation)) { + allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, supportedMountPropagations.List())) + } + + if container == nil { + // The container is not available yet, e.g. during validation of + // PodPreset. Stop validation now, Pod validation will refuse final + // Pods with Bidirectional propagation in non-privileged containers. + return allErrs + } + + privileged := container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged + if *mountPropagation == core.MountPropagationBidirectional && !privileged { + allErrs = append(allErrs, field.Forbidden(fldPath, "Bidirectional mount propagation is available only to privileged containers")) + } + return allErrs +} + +// This validate will make sure targetPath: +// 1. is not abs path +// 2. does not contain any '..' elements +// 3. does not start with '..' +func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, validateLocalDescendingPath(targetPath, fldPath)...) + // Don't report this error if the check for .. elements already caught it. + if strings.HasPrefix(targetPath, "..") && !strings.HasPrefix(targetPath, "../") { + allErrs = append(allErrs, field.Invalid(fldPath, targetPath, "must not start with '..'")) + } + return allErrs +} + +func validateRBDVolumeSource(rbd *core.RBDVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(rbd.CephMonitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + if len(rbd.RBDImage) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) + } + return allErrs +} + +func validateRBDPersistentVolumeSource(rbd *core.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(rbd.CephMonitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + if len(rbd.RBDImage) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("image"), "")) + } + return allErrs +} + +func validateCinderVolumeSource(cd *core.CinderVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + if cd.SecretRef != nil { + if len(cd.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + } + return allErrs +} + +func validateCinderPersistentVolumeSource(cd *core.CinderPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + if cd.SecretRef != nil { + if len(cd.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + if len(cd.SecretRef.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), "")) + } + } + return allErrs +} + +func validateCephFSVolumeSource(cephfs *core.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cephfs.Monitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + return allErrs +} + +func validateCephFSPersistentVolumeSource(cephfs *core.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cephfs.Monitors) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) + } + return allErrs +} + +func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fv.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + + // Make sure user-specified options don't use kubernetes namespaces + for k := range fv.Options { + namespace := k + if parts := strings.SplitN(k, "/", 2); len(parts) == 2 { + namespace = parts[0] + } + normalized := "." + strings.ToLower(namespace) + if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved")) + } + } + + return allErrs +} + +func validateFlexPersistentVolumeSource(fv *core.FlexPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(fv.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + + // Make sure user-specified options don't use kubernetes namespaces + for k := range fv.Options { + namespace := k + if parts := strings.SplitN(k, "/", 2); len(parts) == 2 { + namespace = parts[0] + } + normalized := "." + strings.ToLower(namespace) + if strings.HasSuffix(normalized, ".kubernetes.io") || strings.HasSuffix(normalized, ".k8s.io") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("options").Key(k), k, "kubernetes.io and k8s.io namespaces are reserved")) + } + } + + return allErrs +} + +func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if azure.SecretName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + if azure.ShareName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) + } + return allErrs +} + +func validateAzureFilePV(azure *core.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if azure.SecretName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) + } + if azure.ShareName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("shareName"), "")) + } + if azure.SecretNamespace != nil { + if len(*azure.SecretNamespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretNamespace"), "")) + } + } + return allErrs +} + +func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { + var supportedCachingModes = sets.NewString(string(core.AzureDataDiskCachingNone), string(core.AzureDataDiskCachingReadOnly), string(core.AzureDataDiskCachingReadWrite)) + var supportedDiskKinds = sets.NewString(string(core.AzureSharedBlobDisk), string(core.AzureDedicatedBlobDisk), string(core.AzureManagedDisk)) + + diskUriSupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"} + diskUriSupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"} + + allErrs := field.ErrorList{} + if azure.DiskName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), "")) + } + + if azure.DataDiskURI == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), "")) + } + + if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List())) + } + + if azure.Kind != nil && !supportedDiskKinds.Has(string(*azure.Kind)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, supportedDiskKinds.List())) + } + + // validate that DiskUri is the correct format + if azure.Kind != nil && *azure.Kind == core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedManaged)) + } + + if azure.Kind != nil && *azure.Kind != core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedblob)) + } + + return allErrs +} + +func validateVsphereVolumeSource(cd *core.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.VolumePath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) + } + return allErrs +} + +func validatePhotonPersistentDiskVolumeSource(cd *core.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(cd.PdID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), "")) + } + return allErrs +} + +func validatePortworxVolumeSource(pwx *core.PortworxVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(pwx.VolumeID) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) + } + return allErrs +} + +func validateScaleIOVolumeSource(sio *core.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if sio.Gateway == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) + } + if sio.System == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("system"), "")) + } + if sio.VolumeName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } + return allErrs +} + +func validateScaleIOPersistentVolumeSource(sio *core.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if sio.Gateway == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) + } + if sio.System == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("system"), "")) + } + if sio.VolumeName == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } + return allErrs +} + +func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if ls.Path == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) + return allErrs + } + + allErrs = append(allErrs, validatePathNoBacksteps(ls.Path, fldPath.Child("path"))...) + return allErrs +} + +func validateStorageOSVolumeSource(storageos *core.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(storageos.VolumeName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...) + } + if len(storageos.VolumeNamespace) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...) + } + if storageos.SecretRef != nil { + if len(storageos.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + } + return allErrs +} + +func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(storageos.VolumeName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeName, fldPath.Child("volumeName"))...) + } + if len(storageos.VolumeNamespace) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(storageos.VolumeNamespace, fldPath.Child("volumeNamespace"))...) + } + if storageos.SecretRef != nil { + if len(storageos.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + if len(storageos.SecretRef.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "namespace"), "")) + } + } + return allErrs +} + +func ValidateCSIDriverName(driverName string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(driverName) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } + + if len(driverName) > 63 { + allErrs = append(allErrs, field.TooLong(fldPath, driverName, 63)) + } + + if !csiDriverNameRexp.MatchString(driverName) { + allErrs = append(allErrs, field.Invalid(fldPath, driverName, validation.RegexError(csiDriverNameRexpErrMsg, csiDriverNameRexpFmt, "csi-hostpath"))) + } + return allErrs +} + +func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, ValidateCSIDriverName(csi.Driver, fldPath.Child("driver"))...) + + if len(csi.VolumeHandle) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), "")) + } + + if csi.ControllerPublishSecretRef != nil { + if len(csi.ControllerPublishSecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "name"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Name, fldPath.Child("name"))...) + } + if len(csi.ControllerPublishSecretRef.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("controllerPublishSecretRef", "namespace"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.ControllerPublishSecretRef.Namespace, fldPath.Child("namespace"))...) + } + } + + if csi.NodePublishSecretRef != nil { + if len(csi.NodePublishSecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "name"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Name, fldPath.Child("name"))...) + } + if len(csi.NodePublishSecretRef.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("nodePublishSecretRef ", "namespace"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.NodePublishSecretRef.Namespace, fldPath.Child("namespace"))...) + } + } + + if csi.NodeStageSecretRef != nil { + if len(csi.NodeStageSecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("nodeStageSecretRef", "name"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.NodeStageSecretRef.Name, fldPath.Child("name"))...) + } + if len(csi.NodeStageSecretRef.Namespace) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("nodeStageSecretRef", "namespace"), "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(csi.NodeStageSecretRef.Namespace, fldPath.Child("namespace"))...) + } + } + + return allErrs +} + +// ValidatePersistentVolumeName checks that a name is appropriate for a +// PersistentVolumeName object. +var ValidatePersistentVolumeName = apimachineryvalidation.NameIsDNSSubdomain + +var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany)) + +var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimDelete), string(core.PersistentVolumeReclaimRecycle), string(core.PersistentVolumeReclaimRetain)) + +var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem)) + +var supportedDataSourceAPIGroupKinds = map[schema.GroupKind]bool{ + {Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshot"}: true, +} + +func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { + metaPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath) + + specPath := field.NewPath("spec") + if len(pv.Spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("accessModes"), "")) + } + for _, mode := range pv.Spec.AccessModes { + if !supportedAccessModes.Has(string(mode)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("accessModes"), mode, supportedAccessModes.List())) + } + } + + if len(pv.Spec.Capacity) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) + } + + if _, ok := pv.Spec.Capacity[core.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { + allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(core.ResourceStorage)})) + } + capPath := specPath.Child("capacity") + for r, qty := range pv.Spec.Capacity { + allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(qty, capPath.Key(string(r)))...) + } + if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 { + if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("persistentVolumeReclaimPolicy"), pv.Spec.PersistentVolumeReclaimPolicy, supportedReclaimPolicy.List())) + } + } + + nodeAffinitySpecified, errs := validateVolumeNodeAffinity(pv.Spec.NodeAffinity, specPath.Child("nodeAffinity")) + allErrs = append(allErrs, errs...) + + numVolumes := 0 + if pv.Spec.HostPath != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("hostPath"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateHostPathVolumeSource(pv.Spec.HostPath, specPath.Child("hostPath"))...) + } + } + if pv.Spec.GCEPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("gcePersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGCEPersistentDiskVolumeSource(pv.Spec.GCEPersistentDisk, specPath.Child("persistentDisk"))...) + } + } + if pv.Spec.AWSElasticBlockStore != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("awsElasticBlockStore"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAWSElasticBlockStoreVolumeSource(pv.Spec.AWSElasticBlockStore, specPath.Child("awsElasticBlockStore"))...) + } + } + if pv.Spec.Glusterfs != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("glusterfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateGlusterfsPersistentVolumeSource(pv.Spec.Glusterfs, specPath.Child("glusterfs"))...) + } + } + if pv.Spec.Flocker != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("flocker"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFlockerVolumeSource(pv.Spec.Flocker, specPath.Child("flocker"))...) + } + } + if pv.Spec.NFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("nfs"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateNFSVolumeSource(pv.Spec.NFS, specPath.Child("nfs"))...) + } + } + if pv.Spec.RBD != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("rbd"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateRBDPersistentVolumeSource(pv.Spec.RBD, specPath.Child("rbd"))...) + } + } + if pv.Spec.Quobyte != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("quobyte"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateQuobyteVolumeSource(pv.Spec.Quobyte, specPath.Child("quobyte"))...) + } + } + if pv.Spec.CephFS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("cephFS"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCephFSPersistentVolumeSource(pv.Spec.CephFS, specPath.Child("cephfs"))...) + } + } + if pv.Spec.ISCSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateISCSIPersistentVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) + } + if pv.Spec.ISCSI.InitiatorName != nil && len(pv.ObjectMeta.Name+":"+pv.Spec.ISCSI.TargetPortal) > 64 { + tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." + allErrs = append(allErrs, field.Invalid(metaPath.Child("name"), pv.ObjectMeta.Name, tooLongErr)) + } + } + if pv.Spec.Cinder != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("cinder"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCinderPersistentVolumeSource(pv.Spec.Cinder, specPath.Child("cinder"))...) + } + } + if pv.Spec.FC != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("fc"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateFCVolumeSource(pv.Spec.FC, specPath.Child("fc"))...) + } + } + if pv.Spec.FlexVolume != nil { + numVolumes++ + allErrs = append(allErrs, validateFlexPersistentVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...) + } + if pv.Spec.AzureFile != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("azureFile"), "may not specify more than 1 volume type")) + + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureFilePV(pv.Spec.AzureFile, specPath.Child("azureFile"))...) + } + } + + if pv.Spec.VsphereVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateVsphereVolumeSource(pv.Spec.VsphereVolume, specPath.Child("vsphereVolume"))...) + } + } + if pv.Spec.PhotonPersistentDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("photonPersistentDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePhotonPersistentDiskVolumeSource(pv.Spec.PhotonPersistentDisk, specPath.Child("photonPersistentDisk"))...) + } + } + if pv.Spec.PortworxVolume != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("portworxVolume"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validatePortworxVolumeSource(pv.Spec.PortworxVolume, specPath.Child("portworxVolume"))...) + } + } + if pv.Spec.AzureDisk != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("azureDisk"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...) + } + } + if pv.Spec.ScaleIO != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("scaleIO"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateScaleIOPersistentVolumeSource(pv.Spec.ScaleIO, specPath.Child("scaleIO"))...) + } + } + if pv.Spec.Local != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("local"), "Local volumes are disabled by feature-gate")) + } + allErrs = append(allErrs, validateLocalVolumeSource(pv.Spec.Local, specPath.Child("local"))...) + + // NodeAffinity is required + if !nodeAffinitySpecified { + allErrs = append(allErrs, field.Required(metaPath.Child("annotations"), "Local volume requires node affinity")) + } + } + } + if pv.Spec.StorageOS != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("storageos"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateStorageOSPersistentVolumeSource(pv.Spec.StorageOS, specPath.Child("storageos"))...) + } + } + + if pv.Spec.CSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("csi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCSIPersistentVolumeSource(pv.Spec.CSI, specPath.Child("csi"))...) + } + } + + if numVolumes == 0 { + allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) + } + + // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy + if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == core.PersistentVolumeReclaimRecycle { + allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'")) + } + + if len(pv.Spec.StorageClassName) > 0 { + for _, msg := range ValidateClassName(pv.Spec.StorageClassName, false) { + allErrs = append(allErrs, field.Invalid(specPath.Child("storageClassName"), pv.Spec.StorageClassName, msg)) + } + } + if pv.Spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("volumeMode"), "PersistentVolume volumeMode is disabled by feature-gate")) + } else if pv.Spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*pv.Spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("volumeMode"), *pv.Spec.VolumeMode, supportedVolumeModes.List())) + } + return allErrs +} + +// ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. +// newPv is updated with fields that cannot be changed. +func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = ValidatePersistentVolume(newPv) + + // PersistentVolumeSource should be immutable after creation. + if !apiequality.Semantic.DeepEqual(newPv.Spec.PersistentVolumeSource, oldPv.Spec.PersistentVolumeSource) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "persistentvolumesource"), "is immutable after creation")) + } + + newPv.Status = oldPv.Status + + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + // Allow setting NodeAffinity if oldPv NodeAffinity was not set + if oldPv.Spec.NodeAffinity != nil { + allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.NodeAffinity, oldPv.Spec.NodeAffinity, field.NewPath("nodeAffinity"))...) + } + } + + return allErrs +} + +// ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. +// newPv is updated with fields that cannot be changed. +func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) + if len(newPv.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + newPv.Spec = oldPv.Spec + return allErrs +} + +// ValidatePersistentVolumeClaim validates a PersistentVolumeClaim +func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec +func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required")) + } + if spec.Selector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) + } + for _, mode := range spec.AccessModes { + if mode != core.ReadWriteOnce && mode != core.ReadOnlyMany && mode != core.ReadWriteMany { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) + } + } + storageValue, ok := spec.Resources.Requests[core.ResourceStorage] + if !ok { + allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(core.ResourceStorage)), "")) + } else { + allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) + } + + if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 { + for _, msg := range ValidateClassName(*spec.StorageClassName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg)) + } + } + if spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeMode"), "PersistentVolumeClaim volumeMode is disabled by feature-gate")) + } else if spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, supportedVolumeModes.List())) + } + + if spec.DataSource != nil && !utilfeature.DefaultFeatureGate.Enabled(features.VolumeSnapshotDataSource) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("dataSource"), "VolumeSnapshotDataSource is disabled by feature-gate")) + } else if spec.DataSource != nil { + if len(spec.DataSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("dataSource", "name"), "")) + } + + groupKind := schema.GroupKind{Group: "", Kind: spec.DataSource.Kind} + if spec.DataSource.APIGroup != nil { + groupKind.Group = string(*spec.DataSource.APIGroup) + } + groupKindList := make([]string, 0, len(supportedDataSourceAPIGroupKinds)) + for grp := range supportedDataSourceAPIGroupKinds { + groupKindList = append(groupKindList, grp.String()) + } + if !supportedDataSourceAPIGroupKinds[groupKind] { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("dataSource"), groupKind.String(), groupKindList)) + } + } + + return allErrs +} + +// ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim +func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) + newPvcClone := newPvc.DeepCopy() + oldPvcClone := oldPvc.DeepCopy() + + // PVController needs to update PVC.Spec w/ VolumeName. + // Claims are immutable in order to enforce quota, range limits, etc. without gaming the system. + if len(oldPvc.Spec.VolumeName) == 0 { + // volumeName changes are allowed once. + oldPvcClone.Spec.VolumeName = newPvcClone.Spec.VolumeName + } + + if validateStorageClassUpgrade(oldPvcClone.Annotations, newPvcClone.Annotations, + oldPvcClone.Spec.StorageClassName, newPvcClone.Spec.StorageClassName) { + newPvcClone.Spec.StorageClassName = nil + metav1.SetMetaDataAnnotation(&newPvcClone.ObjectMeta, core.BetaStorageClassAnnotation, oldPvcClone.Annotations[core.BetaStorageClassAnnotation]) + } else { + // storageclass annotation should be immutable after creation + // TODO: remove Beta when no longer needed + allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...) + } + + if utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) { + // lets make sure storage values are same. + if newPvc.Status.Phase == core.ClaimBound && newPvcClone.Spec.Resources.Requests != nil { + newPvcClone.Spec.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] + } + + oldSize := oldPvc.Spec.Resources.Requests["storage"] + newSize := newPvc.Spec.Resources.Requests["storage"] + + if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "is immutable after creation except resources.requests for bound claims")) + } + if newSize.Cmp(oldSize) < 0 { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "resources", "requests", "storage"), "field can not be less than previous value")) + } + + } else { + // changes to Spec are not allowed, but updates to label/and some annotations are OK. + // no-op updates pass validation. + if !apiequality.Semantic.DeepEqual(newPvcClone.Spec, oldPvcClone.Spec) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec"), "field is immutable after creation")) + } + } + + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + return allErrs +} + +// Provide an upgrade path from PVC with storage class specified in beta +// annotation to storage class specified in attribute. We allow update of +// StorageClassName only if following four conditions are met at the same time: +// 1. The old pvc's StorageClassAnnotation is set +// 2. The old pvc's StorageClassName is not set +// 3. The new pvc's StorageClassName is set and equal to the old value in annotation +// 4. If the new pvc's StorageClassAnnotation is set,it must be equal to the old pv/pvc's StorageClassAnnotation +func validateStorageClassUpgrade(oldAnnotations, newAnnotations map[string]string, oldScName, newScName *string) bool { + oldSc, oldAnnotationExist := oldAnnotations[core.BetaStorageClassAnnotation] + newScInAnnotation, newAnnotationExist := newAnnotations[core.BetaStorageClassAnnotation] + return oldAnnotationExist /* condition 1 */ && + oldScName == nil /* condition 2*/ && + (newScName != nil && *newScName == oldSc) /* condition 3 */ && + (!newAnnotationExist || newScInAnnotation == oldSc) /* condition 4 */ +} + +// ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim +func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) + if len(newPvc.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + if len(newPvc.Spec.AccessModes) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("Spec", "accessModes"), "")) + } + if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) && len(newPvc.Status.Conditions) > 0 { + conditionPath := field.NewPath("status", "conditions") + allErrs = append(allErrs, field.Forbidden(conditionPath, "invalid field")) + } + capPath := field.NewPath("status", "capacity") + for r, qty := range newPvc.Status.Capacity { + allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + } + newPvc.Spec = oldPvc.Spec + return allErrs +} + +var supportedPortProtocols = sets.NewString(string(core.ProtocolTCP), string(core.ProtocolUDP), string(core.ProtocolSCTP)) + +func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allNames := sets.String{} + for i, port := range ports { + idxPath := fldPath.Index(i) + if len(port.Name) > 0 { + if msgs := validation.IsValidPortName(port.Name); len(msgs) != 0 { + for i = range msgs { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), port.Name, msgs[i])) + } + } else if allNames.Has(port.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), port.Name)) + } else { + allNames.Insert(port.Name) + } + } + if port.ContainerPort == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("containerPort"), "")) + } else { + for _, msg := range validation.IsValidPortNum(int(port.ContainerPort)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, msg)) + } + } + if port.HostPort != 0 { + for _, msg := range validation.IsValidPortNum(int(port.HostPort)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostPort"), port.HostPort, msg)) + } + } + if len(port.Protocol) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("protocol"), "")) + } else if !utilfeature.DefaultFeatureGate.Enabled(features.SCTPSupport) && port.Protocol == core.ProtocolSCTP { + allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, []string{string(core.ProtocolTCP), string(core.ProtocolUDP)})) + } else if !supportedPortProtocols.Has(string(port.Protocol)) { + allErrs = append(allErrs, field.NotSupported(idxPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } + } + return allErrs +} + +// ValidateEnv validates env vars +func ValidateEnv(vars []core.EnvVar, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, ev := range vars { + idxPath := fldPath.Index(i) + if len(ev.Name) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } else { + for _, msg := range validation.IsEnvVarName(ev.Name) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ev.Name, msg)) + } + } + allErrs = append(allErrs, validateEnvVarValueFrom(ev, idxPath.Child("valueFrom"))...) + } + return allErrs +} + +var validEnvDownwardAPIFieldPathExpressions = sets.NewString( + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.serviceAccountName", + "status.hostIP", + "status.podIP") +var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage") + +func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if ev.ValueFrom == nil { + return allErrs + } + + numSources := 0 + + if ev.ValueFrom.FieldRef != nil { + numSources++ + allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + } + if ev.ValueFrom.ResourceFieldRef != nil { + numSources++ + allErrs = append(allErrs, validateContainerResourceFieldSelector(ev.ValueFrom.ResourceFieldRef, &validContainerResourceFieldPathExpressions, fldPath.Child("resourceFieldRef"), false)...) + } + if ev.ValueFrom.ConfigMapKeyRef != nil { + numSources++ + allErrs = append(allErrs, validateConfigMapKeySelector(ev.ValueFrom.ConfigMapKeyRef, fldPath.Child("configMapKeyRef"))...) + } + if ev.ValueFrom.SecretKeyRef != nil { + numSources++ + allErrs = append(allErrs, validateSecretKeySelector(ev.ValueFrom.SecretKeyRef, fldPath.Child("secretKeyRef"))...) + } + + if numSources == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `fieldRef`, `resourceFieldRef`, `configMapKeyRef` or `secretKeyRef`")) + } else if len(ev.Value) != 0 { + if numSources != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not be specified when `value` is not empty")) + } + } else if numSources > 1 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) + } + + return allErrs +} + +func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(fs.APIVersion) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) + return allErrs + } + if len(fs.FieldPath) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) + return allErrs + } + + internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "") + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) + return allErrs + } + + if path, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(internalFieldPath); ok { + switch path { + case "metadata.annotations": + for _, msg := range validation.IsQualifiedName(strings.ToLower(subscript)) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + case "metadata.labels": + for _, msg := range validation.IsQualifiedName(subscript) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath, path, "does not support subscript")) + } + } else if !expressions.Has(path) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, expressions.List())) + return allErrs + } + + return allErrs +} + +func fsResourceIsEphemeralStorage(resource string) bool { + if resource == "limits.ephemeral-storage" || resource == "requests.ephemeral-storage" { + return true + } + return false +} + +func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { + allErrs := field.ErrorList{} + + if volume && len(fs.ContainerName) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("containerName"), "")) + } else if len(fs.Resource) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("resource"), "")) + } else if !expressions.Has(fs.Resource) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("resource"), fs.Resource, expressions.List())) + } else if fsResourceIsEphemeralStorage(fs.Resource) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + allErrs = append(allErrs, field.Forbidden(fldPath, "Containers' ephemeral storage requests/limits disabled by feature-gate for Downward API")) + } + allErrs = append(allErrs, validateContainerResourceDivisor(fs.Resource, fs.Divisor, fldPath)...) + return allErrs +} + +func ValidateEnvFrom(vars []core.EnvFromSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, ev := range vars { + idxPath := fldPath.Index(i) + if len(ev.Prefix) > 0 { + for _, msg := range validation.IsEnvVarName(ev.Prefix) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("prefix"), ev.Prefix, msg)) + } + } + + numSources := 0 + if ev.ConfigMapRef != nil { + numSources++ + allErrs = append(allErrs, validateConfigMapEnvSource(ev.ConfigMapRef, idxPath.Child("configMapRef"))...) + } + if ev.SecretRef != nil { + numSources++ + allErrs = append(allErrs, validateSecretEnvSource(ev.SecretRef, idxPath.Child("secretRef"))...) + } + + if numSources == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "must specify one of: `configMapRef` or `secretRef`")) + } else if numSources > 1 { + allErrs = append(allErrs, field.Invalid(fldPath, "", "may not have more than one field specified at a time")) + } + } + return allErrs +} + +func validateConfigMapEnvSource(configMapSource *core.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(configMapSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else { + for _, msg := range ValidateConfigMapName(configMapSource.Name, true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), configMapSource.Name, msg)) + } + } + return allErrs +} + +func validateSecretEnvSource(secretSource *core.SecretEnvSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(secretSource.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else { + for _, msg := range ValidateSecretName(secretSource.Name, true) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), secretSource.Name, msg)) + } + } + return allErrs +} + +var validContainerResourceDivisorForCPU = sets.NewString("1m", "1") +var validContainerResourceDivisorForMemory = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") +var validContainerResourceDivisorForEphemeralStorage = sets.NewString("1", "1k", "1M", "1G", "1T", "1P", "1E", "1Ki", "1Mi", "1Gi", "1Ti", "1Pi", "1Ei") + +func validateContainerResourceDivisor(rName string, divisor resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + unsetDivisor := resource.Quantity{} + if unsetDivisor.Cmp(divisor) == 0 { + return allErrs + } + switch rName { + case "limits.cpu", "requests.cpu": + if !validContainerResourceDivisorForCPU.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1m and 1 are supported with the cpu resource")) + } + case "limits.memory", "requests.memory": + if !validContainerResourceDivisorForMemory.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the memory resource")) + } + case "limits.ephemeral-storage", "requests.ephemeral-storage": + if !validContainerResourceDivisorForEphemeralStorage.Has(divisor.String()) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("divisor"), rName, "only divisor's values 1, 1k, 1M, 1G, 1T, 1P, 1E, 1Ki, 1Mi, 1Gi, 1Ti, 1Pi, 1Ei are supported with the local ephemeral storage resource")) + } + } + return allErrs +} + +func validateConfigMapKeySelector(s *core.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + nameFn := ValidateNameFunc(ValidateSecretName) + for _, msg := range nameFn(s.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg)) + } + if len(s.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } else { + for _, msg := range validation.IsConfigMapKey(s.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) + } + } + + return allErrs +} + +func validateSecretKeySelector(s *core.SecretKeySelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + nameFn := ValidateNameFunc(ValidateSecretName) + for _, msg := range nameFn(s.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), s.Name, msg)) + } + if len(s.Key) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) + } else { + for _, msg := range validation.IsConfigMapKey(s.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), s.Key, msg)) + } + } + + return allErrs +} + +func GetVolumeMountMap(mounts []core.VolumeMount) map[string]string { + volmounts := make(map[string]string) + + for _, mnt := range mounts { + volmounts[mnt.Name] = mnt.MountPath + } + + return volmounts +} + +func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string { + voldevices := make(map[string]string) + + for _, dev := range devices { + voldevices[dev.Name] = dev.DevicePath + } + + return voldevices +} + +func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + mountpoints := sets.NewString() + + for i, mnt := range mounts { + idxPath := fldPath.Index(i) + if len(mnt.Name) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } + if !IsMatchedVolume(mnt.Name, volumes) { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) + } + if len(mnt.MountPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("mountPath"), "")) + } + if mountpoints.Has(mnt.MountPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) + } + mountpoints.Insert(mnt.MountPath) + + // check for overlap with VolumeDevice + if mountNameAlreadyExists(mnt.Name, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), mnt.Name, "must not already exist in volumeDevices")) + } + if mountPathAlreadyExists(mnt.MountPath, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not already exist as a path in volumeDevices")) + } + + if len(mnt.SubPath) > 0 { + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpath) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("subPath"), "subPath is disabled by feature-gate")) + } else { + allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) + } + } + + if mnt.MountPropagation != nil { + allErrs = append(allErrs, validateMountPropagation(mnt.MountPropagation, container, fldPath.Child("mountPropagation"))...) + } + } + return allErrs +} + +func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]string, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + devicepath := sets.NewString() + devicename := sets.NewString() + + if devices != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeDevices"), "Container volumeDevices is disabled by feature-gate")) + return allErrs + } + if devices != nil { + for i, dev := range devices { + idxPath := fldPath.Index(i) + devName := dev.Name + devPath := dev.DevicePath + didMatch, isPVC := isMatchedDevice(devName, volumes) + if len(devName) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } + if devicename.Has(devName) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) + } + // Must be PersistentVolumeClaim volume source + if didMatch && !isPVC { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) + } + if !didMatch { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) + } + if len(devPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), "")) + } + if devicepath.Has(devPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique")) + } + if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')")) + } else { + devicepath.Insert(devPath) + } + // check for overlap with VolumeMount + if deviceNameAlreadyExists(devName, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts")) + } + if devicePathAlreadyExists(devPath, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts")) + } + if len(devName) > 0 { + devicename.Insert(devName) + } + } + } + return allErrs +} + +func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if probe == nil { + return allErrs + } + allErrs = append(allErrs, validateHandler(&probe.Handler, fldPath)...) + + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.InitialDelaySeconds), fldPath.Child("initialDelaySeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.TimeoutSeconds), fldPath.Child("timeoutSeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.PeriodSeconds), fldPath.Child("periodSeconds"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.SuccessThreshold), fldPath.Child("successThreshold"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(probe.FailureThreshold), fldPath.Child("failureThreshold"))...) + return allErrs +} + +func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) + return allErrs + } + if config.ClientIP == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) + return allErrs + } + if config.ClientIP.TimeoutSeconds == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) + return allErrs + } + allErrs = append(allErrs, validateAffinityTimeout(config.ClientIP.TimeoutSeconds, fldPath.Child("clientIP").Child("timeoutSeconds"))...) + + return allErrs +} + +func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if *timeout <= 0 || *timeout > core.MaxClientIPServiceAffinitySeconds { + allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", core.MaxClientIPServiceAffinitySeconds))) + } + return allErrs +} + +// AccumulateUniqueHostPorts extracts each HostPort of each Container, +// accumulating the results and returning an error if any ports conflict. +func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for ci, ctr := range containers { + idxPath := fldPath.Index(ci) + portsPath := idxPath.Child("ports") + for pi := range ctr.Ports { + idxPath := portsPath.Index(pi) + port := ctr.Ports[pi].HostPort + if port == 0 { + continue + } + str := fmt.Sprintf("%s/%s/%d", ctr.Ports[pi].Protocol, ctr.Ports[pi].HostIP, port) + if accumulator.Has(str) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("hostPort"), str)) + } else { + accumulator.Insert(str) + } + } + } + return allErrs +} + +// checkHostPortConflicts checks for colliding Port.HostPort values across +// a slice of containers. +func checkHostPortConflicts(containers []core.Container, fldPath *field.Path) field.ErrorList { + allPorts := sets.String{} + return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) +} + +func validateExecAction(exec *core.ExecAction, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if len(exec.Command) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) + } + return allErrors +} + +var supportedHTTPSchemes = sets.NewString(string(core.URISchemeHTTP), string(core.URISchemeHTTPS)) + +func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if len(http.Path) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) + } + allErrors = append(allErrors, ValidatePortNumOrName(http.Port, fldPath.Child("port"))...) + if !supportedHTTPSchemes.Has(string(http.Scheme)) { + allErrors = append(allErrors, field.NotSupported(fldPath.Child("scheme"), http.Scheme, supportedHTTPSchemes.List())) + } + for _, header := range http.HTTPHeaders { + for _, msg := range validation.IsHTTPHeaderName(header.Name) { + allErrors = append(allErrors, field.Invalid(fldPath.Child("httpHeaders"), header.Name, msg)) + } + } + return allErrors +} + +func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if port.Type == intstr.Int { + for _, msg := range validation.IsValidPortNum(port.IntValue()) { + allErrs = append(allErrs, field.Invalid(fldPath, port.IntValue(), msg)) + } + } else if port.Type == intstr.String { + for _, msg := range validation.IsValidPortName(port.StrVal) { + allErrs = append(allErrs, field.Invalid(fldPath, port.StrVal, msg)) + } + } else { + allErrs = append(allErrs, field.InternalError(fldPath, fmt.Errorf("unknown type: %v", port.Type))) + } + return allErrs +} + +func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) field.ErrorList { + return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) +} + +func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList { + numHandlers := 0 + allErrors := field.ErrorList{} + if handler.Exec != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("exec"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateExecAction(handler.Exec, fldPath.Child("exec"))...) + } + } + if handler.HTTPGet != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("httpGet"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateHTTPGetAction(handler.HTTPGet, fldPath.Child("httpGet"))...) + } + } + if handler.TCPSocket != nil { + if numHandlers > 0 { + allErrors = append(allErrors, field.Forbidden(fldPath.Child("tcpSocket"), "may not specify more than 1 handler type")) + } else { + numHandlers++ + allErrors = append(allErrors, validateTCPSocketAction(handler.TCPSocket, fldPath.Child("tcpSocket"))...) + } + } + if numHandlers == 0 { + allErrors = append(allErrors, field.Required(fldPath, "must specify a handler type")) + } + return allErrors +} + +func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if lifecycle.PostStart != nil { + allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) + } + if lifecycle.PreStop != nil { + allErrs = append(allErrs, validateHandler(lifecycle.PreStop, fldPath.Child("preStop"))...) + } + return allErrs +} + +var supportedPullPolicies = sets.NewString(string(core.PullAlways), string(core.PullIfNotPresent), string(core.PullNever)) + +func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + + switch policy { + case core.PullAlways, core.PullIfNotPresent, core.PullNever: + break + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + allErrors = append(allErrors, field.NotSupported(fldPath, policy, supportedPullPolicies.List())) + } + + return allErrors +} + +func validateInitContainers(containers, otherContainers []core.Container, deviceVolumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if len(containers) > 0 { + allErrs = append(allErrs, validateContainers(containers, true, deviceVolumes, fldPath)...) + } + + allNames := sets.String{} + for _, ctr := range otherContainers { + allNames.Insert(ctr.Name) + } + for i, ctr := range containers { + idxPath := fldPath.Index(i) + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name)) + } + if len(ctr.Name) > 0 { + allNames.Insert(ctr.Name) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers")) + } + if ctr.LivenessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers")) + } + if ctr.ReadinessProbe != nil { + allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers")) + } + } + return allErrs +} + +func validateContainers(containers []core.Container, isInitContainers bool, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(containers) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + + allNames := sets.String{} + for i, ctr := range containers { + idxPath := fldPath.Index(i) + namePath := idxPath.Child("name") + volMounts := GetVolumeMountMap(ctr.VolumeMounts) + volDevices := GetVolumeDeviceMap(ctr.VolumeDevices) + + if len(ctr.Name) == 0 { + allErrs = append(allErrs, field.Required(namePath, "")) + } else { + allErrs = append(allErrs, ValidateDNS1123Label(ctr.Name, namePath)...) + } + if allNames.Has(ctr.Name) { + allErrs = append(allErrs, field.Duplicate(namePath, ctr.Name)) + } else { + allNames.Insert(ctr.Name) + } + // TODO: do not validate leading and trailing whitespace to preserve backward compatibility. + // for example: https://github.com/openshift/origin/issues/14659 image = " " is special token in pod template + // others may have done similar + if len(ctr.Image) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("image"), "")) + } + if ctr.Lifecycle != nil { + allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...) + } + allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...) + // Liveness-specific validation + if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1")) + } + + switch ctr.TerminationMessagePolicy { + case core.TerminationMessageReadFile, core.TerminationMessageFallbackToLogsOnError: + case "": + allErrs = append(allErrs, field.Required(idxPath.Child("terminationMessagePolicy"), "must be 'File' or 'FallbackToLogsOnError'")) + default: + allErrs = append(allErrs, field.Invalid(idxPath.Child("terminationMessagePolicy"), ctr.TerminationMessagePolicy, "must be 'File' or 'FallbackToLogsOnError'")) + } + + allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...) + allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) + allErrs = append(allErrs, ValidateEnv(ctr.Env, idxPath.Child("env"))...) + allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, idxPath.Child("envFrom"))...) + allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, &ctr, idxPath.Child("volumeMounts"))...) + allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, idxPath.Child("volumeDevices"))...) + allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) + allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) + allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) + } + + if isInitContainers { + // check initContainers one by one since they are running in sequential order. + for _, initContainer := range containers { + allErrs = append(allErrs, checkHostPortConflicts([]core.Container{initContainer}, fldPath)...) + } + } else { + // Check for colliding ports across all containers. + allErrs = append(allErrs, checkHostPortConflicts(containers, fldPath)...) + } + + return allErrs +} + +func validateRestartPolicy(restartPolicy *core.RestartPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + switch *restartPolicy { + case core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever: + break + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + validValues := []string{string(core.RestartPolicyAlways), string(core.RestartPolicyOnFailure), string(core.RestartPolicyNever)} + allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) + } + + return allErrors +} + +func validateDNSPolicy(dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + switch *dnsPolicy { + case core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault: + case core.DNSNone: + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + allErrors = append(allErrors, field.Invalid(fldPath, dnsPolicy, "DNSPolicy: can not use 'None', custom pod DNS is disabled by feature gate")) + } + case "": + allErrors = append(allErrors, field.Required(fldPath, "")) + default: + validValues := []string{string(core.DNSClusterFirstWithHostNet), string(core.DNSClusterFirst), string(core.DNSDefault)} + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + validValues = append(validValues, string(core.DNSNone)) + } + allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) + } + return allErrors +} + +const ( + // Limits on various DNS parameters. These are derived from + // restrictions in Linux libc name resolution handling. + // Max number of DNS name servers. + MaxDNSNameservers = 3 + // Max number of domains in search path. + MaxDNSSearchPaths = 6 + // Max number of characters in search path. + MaxDNSSearchListChars = 256 +) + +func validateReadinessGates(readinessGates []core.PodReadinessGate, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if !utilfeature.DefaultFeatureGate.Enabled(features.PodReadinessGates) && len(readinessGates) > 0 { + return append(allErrs, field.Forbidden(fldPath, "PodReadinessGates is disabled by feature gate")) + } + for i, value := range readinessGates { + for _, msg := range validation.IsQualifiedName(string(value.ConditionType)) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("conditionType"), string(value.ConditionType), msg)) + } + } + return allErrs +} + +func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // Validate DNSNone case. Must provide at least one DNS name server. + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) && dnsPolicy != nil && *dnsPolicy == core.DNSNone { + if dnsConfig == nil { + return append(allErrs, field.Required(fldPath, fmt.Sprintf("must provide `dnsConfig` when `dnsPolicy` is %s", core.DNSNone))) + } + if len(dnsConfig.Nameservers) == 0 { + return append(allErrs, field.Required(fldPath.Child("nameservers"), fmt.Sprintf("must provide at least one DNS nameserver when `dnsPolicy` is %s", core.DNSNone))) + } + } + + if dnsConfig != nil { + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + return append(allErrs, field.Forbidden(fldPath, "DNSConfig: custom pod DNS is disabled by feature gate")) + } + + // Validate nameservers. + if len(dnsConfig.Nameservers) > MaxDNSNameservers { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers))) + } + for i, ns := range dnsConfig.Nameservers { + if ip := net.ParseIP(ns); ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address")) + } + } + // Validate searches. + if len(dnsConfig.Searches) > MaxDNSSearchPaths { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", MaxDNSSearchPaths))) + } + // Include the space between search paths. + if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, "must not have more than 256 characters (including spaces) in the search list")) + } + for i, search := range dnsConfig.Searches { + allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...) + } + // Validate options. + for i, option := range dnsConfig.Options { + if len(option.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("options").Index(i), "must not be empty")) + } + } + } + return allErrs +} + +func validateHostNetwork(hostNetwork bool, containers []core.Container, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if hostNetwork { + for i, container := range containers { + portsPath := fldPath.Index(i).Child("ports") + for i, port := range container.Ports { + idxPath := portsPath.Index(i) + if port.HostPort != port.ContainerPort { + allErrors = append(allErrors, field.Invalid(idxPath.Child("containerPort"), port.ContainerPort, "must match `hostPort` when `hostNetwork` is true")) + } + } + } + } + return allErrors +} + +// validateImagePullSecrets checks to make sure the pull secrets are well +// formed. Right now, we only expect name to be set (it's the only field). If +// this ever changes and someone decides to set those fields, we'd like to +// know. +func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, currPullSecret := range imagePullSecrets { + idxPath := fldPath.Index(i) + strippedRef := core.LocalObjectReference{Name: currPullSecret.Name} + if !reflect.DeepEqual(strippedRef, currPullSecret) { + allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) + } + } + return allErrors +} + +// validateAffinity checks if given affinities are valid +func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if affinity != nil { + if affinity.NodeAffinity != nil { + allErrs = append(allErrs, validateNodeAffinity(affinity.NodeAffinity, fldPath.Child("nodeAffinity"))...) + } + if affinity.PodAffinity != nil { + allErrs = append(allErrs, validatePodAffinity(affinity.PodAffinity, fldPath.Child("podAffinity"))...) + } + if affinity.PodAntiAffinity != nil { + allErrs = append(allErrs, validatePodAntiAffinity(affinity.PodAntiAffinity, fldPath.Child("podAntiAffinity"))...) + } + } + + return allErrs +} + +func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { + if !allowEmpty && len(*effect) == 0 { + return field.ErrorList{field.Required(fldPath, "")} + } + + allErrors := field.ErrorList{} + switch *effect { + // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit. + case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoExecute: + // case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, core.TaintEffectNoExecute: + default: + validValues := []string{ + string(core.TaintEffectNoSchedule), + string(core.TaintEffectPreferNoSchedule), + string(core.TaintEffectNoExecute), + // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. + // string(core.TaintEffectNoScheduleNoAdmit), + } + allErrors = append(allErrors, field.NotSupported(fldPath, *effect, validValues)) + } + return allErrors +} + +// validateOnlyAddedTolerations validates updated pod tolerations. +func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldTolerations []core.Toleration, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, old := range oldTolerations { + found := false + old.TolerationSeconds = nil + for _, new := range newTolerations { + new.TolerationSeconds = nil + if reflect.DeepEqual(old, new) { + found = true + break + } + } + if !found { + allErrs = append(allErrs, field.Forbidden(fldPath, "existing toleration can not be modified except its tolerationSeconds")) + return allErrs + } + } + + allErrs = append(allErrs, ValidateTolerations(newTolerations, fldPath)...) + return allErrs +} + +func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, hostAlias := range hostAliases { + if ip := net.ParseIP(hostAlias.IP); ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), hostAlias.IP, "must be valid IP address")) + } + for _, hostname := range hostAlias.Hostnames { + allErrs = append(allErrs, ValidateDNS1123Subdomain(hostname, fldPath.Child("hostnames"))...) + } + } + return allErrs +} + +// ValidateTolerations tests if given tolerations have valid data. +func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + for i, toleration := range tolerations { + idxPath := fldPath.Index(i) + // validate the toleration key + if len(toleration.Key) > 0 { + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...) + } + + // empty toleration key with Exists operator and empty value means match all taints + if len(toleration.Key) == 0 && toleration.Operator != core.TolerationOpExists { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator, + "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) + } + + if toleration.TolerationSeconds != nil && toleration.Effect != core.TaintEffectNoExecute { + allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect, + "effect must be 'NoExecute' when `tolerationSeconds` is set")) + } + + // validate toleration operator and value + switch toleration.Operator { + // empty operator means Equal + case core.TolerationOpEqual, "": + if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) + } + case core.TolerationOpExists: + if len(toleration.Value) > 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) + } + default: + validValues := []string{string(core.TolerationOpEqual), string(core.TolerationOpExists)} + allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) + } + + // validate toleration effect, empty toleration effect means match all taint effects + if len(toleration.Effect) > 0 { + allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...) + } + } + return allErrors +} + +func toResourceNames(resources core.ResourceList) []core.ResourceName { + result := []core.ResourceName{} + for resourceName := range resources { + result = append(result, resourceName) + } + return result +} + +func toSet(resourceNames []core.ResourceName) sets.String { + result := sets.NewString() + for _, resourceName := range resourceNames { + result.Insert(string(resourceName)) + } + return result +} + +func toContainerResourcesSet(ctr *core.Container) sets.String { + resourceNames := toResourceNames(ctr.Resources.Requests) + resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...) + return toSet(resourceNames) +} + +// validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template +// it only does additive validation of fields not covered in validateContainers +func validateContainersOnlyForPod(containers []core.Container, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, ctr := range containers { + idxPath := fldPath.Index(i) + if len(ctr.Image) != len(strings.TrimSpace(ctr.Image)) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("image"), ctr.Image, "must not have leading or trailing whitespace")) + } + } + return allErrs +} + +// ValidatePod tests if required fields in the pod are set. +func ValidatePod(pod *core.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...) + + // we do additional validation only pertinent for pods and not pod templates + // this was done to preserve backwards compatibility + specPath := field.NewPath("spec") + + if pod.Spec.ServiceAccountName == "" { + for vi, volume := range pod.Spec.Volumes { + path := specPath.Child("volumes").Index(vi).Child("projected") + if volume.Projected != nil { + for si, source := range volume.Projected.Sources { + saPath := path.Child("sources").Index(si).Child("serviceAccountToken") + if source.ServiceAccountToken != nil { + allErrs = append(allErrs, field.Forbidden(saPath, "must not be specified when serviceAccountName is not set")) + } + } + } + } + } + + allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.Containers, specPath.Child("containers"))...) + allErrs = append(allErrs, validateContainersOnlyForPod(pod.Spec.InitContainers, specPath.Child("initContainers"))...) + + if utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { + hugePageResources := sets.NewString() + for i := range pod.Spec.Containers { + resourceSet := toContainerResourcesSet(&pod.Spec.Containers[i]) + for resourceStr := range resourceSet { + if v1helper.IsHugePageResourceName(v1.ResourceName(resourceStr)) { + hugePageResources.Insert(resourceStr) + } + } + } + if len(hugePageResources) > 1 { + allErrs = append(allErrs, field.Invalid(specPath, hugePageResources, "must use a single hugepage size in a pod spec")) + } + } + + return allErrs +} + +// ValidatePodSpec tests that the specified PodSpec has valid data. +// This includes checking formatting and uniqueness. It also canonicalizes the +// structure by setting default values and implementing any backwards-compatibility +// tricks. +func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + vols, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) + allErrs = append(allErrs, vErrs...) + allErrs = append(allErrs, validateContainers(spec.Containers, false, vols, fldPath.Child("containers"))...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"))...) + allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) + allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) + allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) + allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) + allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...) + allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"))...) + allErrs = append(allErrs, validateReadinessGates(spec.ReadinessGates, fldPath.Child("readinessGates"))...) + if len(spec.ServiceAccountName) > 0 { + for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) + } + } + + if len(spec.NodeName) > 0 { + for _, msg := range ValidateNodeName(spec.NodeName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg)) + } + } + + if spec.ActiveDeadlineSeconds != nil { + value := *spec.ActiveDeadlineSeconds + if value < 1 || value > math.MaxInt32 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("activeDeadlineSeconds"), value, validation.InclusiveRangeError(1, math.MaxInt32))) + } + } + + if len(spec.Hostname) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(spec.Hostname, fldPath.Child("hostname"))...) + } + + if len(spec.Subdomain) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(spec.Subdomain, fldPath.Child("subdomain"))...) + } + + if len(spec.Tolerations) > 0 { + allErrs = append(allErrs, ValidateTolerations(spec.Tolerations, fldPath.Child("tolerations"))...) + } + + if len(spec.HostAliases) > 0 { + allErrs = append(allErrs, ValidateHostAliases(spec.HostAliases, fldPath.Child("hostAliases"))...) + } + + if len(spec.PriorityClassName) > 0 { + if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { + for _, msg := range ValidatePriorityClassName(spec.PriorityClassName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("priorityClassName"), spec.PriorityClassName, msg)) + } + } + } + + if spec.RuntimeClassName != nil && utilfeature.DefaultFeatureGate.Enabled(features.RuntimeClass) { + allErrs = append(allErrs, ValidateRuntimeClassName(*spec.RuntimeClassName, fldPath.Child("runtimeClassName"))...) + } + + return allErrs +} + +// ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data +func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + switch rq.Operator { + case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn: + if len(rq.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) + } + case core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist: + if len(rq.Values) > 0 { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) + } + + case core.NodeSelectorOpGt, core.NodeSelectorOpLt: + if len(rq.Values) != 1 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), rq.Operator, "not a valid selector operator")) + } + + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) + + return allErrs +} + +var nodeFieldSelectorValidators = map[string]func(string, bool) []string{ + core.ObjectNameField: ValidateNodeName, +} + +// ValidateNodeFieldSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data +func ValidateNodeFieldSelectorRequirement(req core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + switch req.Operator { + case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn: + if len(req.Values) != 1 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), + "must be only one value when `operator` is 'In' or 'NotIn' for node field selector")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator")) + } + + if vf, found := nodeFieldSelectorValidators[req.Key]; !found { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), req.Key, "not a valid field selector key")) + } else { + for i, v := range req.Values { + for _, msg := range vf(v, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(i), v, msg)) + } + } + } + + return allErrs +} + +// ValidateNodeSelectorTerm tests that the specified node selector term has valid data +func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for j, req := range term.MatchExpressions { + allErrs = append(allErrs, ValidateNodeSelectorRequirement(req, fldPath.Child("matchExpressions").Index(j))...) + } + + for j, req := range term.MatchFields { + allErrs = append(allErrs, ValidateNodeFieldSelectorRequirement(req, fldPath.Child("matchFields").Index(j))...) + } + + return allErrs +} + +// ValidateNodeSelector tests that the specified nodeSelector fields has valid data +func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + termFldPath := fldPath.Child("nodeSelectorTerms") + if len(nodeSelector.NodeSelectorTerms) == 0 { + return append(allErrs, field.Required(termFldPath, "must have at least one node selector term")) + } + + for i, term := range nodeSelector.NodeSelectorTerms { + allErrs = append(allErrs, ValidateNodeSelectorTerm(term, termFldPath.Index(i))...) + } + + return allErrs +} + +// validateTopologySelectorLabelRequirement tests that the specified TopologySelectorLabelRequirement fields has valid data, +// and constructs a set containing all of its Values. +func validateTopologySelectorLabelRequirement(rq core.TopologySelectorLabelRequirement, fldPath *field.Path) (sets.String, field.ErrorList) { + allErrs := field.ErrorList{} + valueSet := make(sets.String) + valuesPath := fldPath.Child("values") + if len(rq.Values) == 0 { + allErrs = append(allErrs, field.Required(valuesPath, "")) + } + + // Validate set property of Values field + for i, value := range rq.Values { + if valueSet.Has(value) { + allErrs = append(allErrs, field.Duplicate(valuesPath.Index(i), value)) + } + valueSet.Insert(value) + } + + allErrs = append(allErrs, unversionedvalidation.ValidateLabelName(rq.Key, fldPath.Child("key"))...) + + return valueSet, allErrs +} + +// ValidateTopologySelectorTerm tests that the specified topology selector term has valid data, +// and constructs a map representing the term in raw form. +func ValidateTopologySelectorTerm(term core.TopologySelectorTerm, fldPath *field.Path) (map[string]sets.String, field.ErrorList) { + allErrs := field.ErrorList{} + exprMap := make(map[string]sets.String) + exprPath := fldPath.Child("matchLabelExpressions") + + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + // Allow empty MatchLabelExpressions, in case this field becomes optional in the future. + + for i, req := range term.MatchLabelExpressions { + idxPath := exprPath.Index(i) + valueSet, exprErrs := validateTopologySelectorLabelRequirement(req, idxPath) + allErrs = append(allErrs, exprErrs...) + + // Validate no duplicate keys exist. + if _, exists := exprMap[req.Key]; exists { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("key"), req.Key)) + } + exprMap[req.Key] = valueSet + } + } else if len(term.MatchLabelExpressions) != 0 { + allErrs = append(allErrs, field.Forbidden(fldPath, "field is disabled by feature-gate VolumeScheduling")) + } + + return exprMap, allErrs +} + +// ValidateAvoidPodsInNodeAnnotations tests that the serialized AvoidPods in Node.Annotations has valid data +func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + v1Avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) + return allErrs + } + var avoids core.AvoidPods + if err := corev1.Convert_v1_AvoidPods_To_core_AvoidPods(&v1Avoids, &avoids, nil); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) + return allErrs + } + + if len(avoids.PreferAvoidPods) != 0 { + for i, pa := range avoids.PreferAvoidPods { + idxPath := fldPath.Child(core.PreferAvoidPodsAnnotationKey).Index(i) + allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...) + } + } + + return allErrs +} + +// validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data. +func validatePreferAvoidPodsEntry(avoidPodEntry core.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + if avoidPodEntry.PodSignature.PodController == nil { + allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) + } else { + if *(avoidPodEntry.PodSignature.PodController.Controller) != true { + allErrors = append(allErrors, + field.Invalid(fldPath.Child("PodSignature").Child("PodController").Child("Controller"), + *(avoidPodEntry.PodSignature.PodController.Controller), "must point to a controller")) + } + } + return allErrors +} + +// ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data +func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + for i, term := range terms { + if term.Weight <= 0 || term.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("weight"), term.Weight, "must be in the range 1-100")) + } + + allErrs = append(allErrs, ValidateNodeSelectorTerm(term.Preference, fldPath.Index(i).Child("preference"))...) + } + return allErrs +} + +// validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data +func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) + for _, name := range podAffinityTerm.Namespaces { + for _, msg := range ValidateNamespaceName(name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg)) + } + } + if len(podAffinityTerm.TopologyKey) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("topologyKey"), "can not be empty")) + } + return append(allErrs, unversionedvalidation.ValidateLabelName(podAffinityTerm.TopologyKey, fldPath.Child("topologyKey"))...) +} + +// validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data +func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, podAffinityTerm := range podAffinityTerms { + allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...) + } + return allErrs +} + +// validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data +func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for j, weightedTerm := range weightedPodAffinityTerms { + if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { + allErrs = append(allErrs, field.Invalid(fldPath.Index(j).Child("weight"), weightedTerm.Weight, "must be in the range 1-100")) + } + allErrs = append(allErrs, validatePodAffinityTerm(weightedTerm.PodAffinityTerm, fldPath.Index(j).Child("podAffinityTerm"))...) + } + return allErrs +} + +// validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data +func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validatePodAffinityTerms(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +// validateNodeAffinity tests that the specified nodeAffinity fields have valid data +func validateNodeAffinity(na *core.NodeAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO: Uncomment the next three lines once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if na.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingRequiredDuringExecution, fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + // } + if na.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, ValidateNodeSelector(na.RequiredDuringSchedulingIgnoredDuringExecution, fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if len(na.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + allErrs = append(allErrs, ValidatePreferredSchedulingTerms(na.PreferredDuringSchedulingIgnoredDuringExecution, fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +// validatePodAffinity tests that the specified podAffinity fields have valid data +func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. + // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { + // allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingRequiredDuringExecution, false, + // fldPath.Child("requiredDuringSchedulingRequiredDuringExecution"))...) + //} + if podAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validatePodAffinityTerms(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("requiredDuringSchedulingIgnoredDuringExecution"))...) + } + if podAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil { + allErrs = append(allErrs, validateWeightedPodAffinityTerms(podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + fldPath.Child("preferredDuringSchedulingIgnoredDuringExecution"))...) + } + return allErrs +} + +func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { + if p == core.SeccompProfileRuntimeDefault || p == core.DeprecatedSeccompProfileDockerDefault { + return nil + } + if p == "unconfined" { + return nil + } + if strings.HasPrefix(p, "localhost/") { + return validateLocalDescendingPath(strings.TrimPrefix(p, "localhost/"), fldPath) + } + return field.ErrorList{field.Invalid(fldPath, p, "must be a valid seccomp profile")} +} + +func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if p, exists := annotations[core.SeccompPodAnnotationKey]; exists { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(core.SeccompPodAnnotationKey))...) + } + for k, p := range annotations { + if strings.HasPrefix(k, core.SeccompContainerAnnotationKeyPrefix) { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...) + } + } + + return allErrs +} + +func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for k, p := range annotations { + if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { + continue + } + // TODO: this belongs to admission, not general pod validation: + if !utilfeature.DefaultFeatureGate.Enabled(features.AppArmor) { + allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "AppArmor is disabled by feature-gate")) + continue + } + containerName := strings.TrimPrefix(k, apparmor.ContainerAnnotationKeyPrefix) + if !podSpecHasContainer(spec, containerName) { + allErrs = append(allErrs, field.Invalid(fldPath.Key(k), containerName, "container not found")) + } + + if err := apparmor.ValidateProfileFormat(p); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Key(k), p, err.Error())) + } + } + + return allErrs +} + +func podSpecHasContainer(spec *core.PodSpec, containerName string) bool { + for _, c := range spec.InitContainers { + if c.Name == containerName { + return true + } + } + for _, c := range spec.Containers { + if c.Name == containerName { + return true + } + } + return false +} + +const ( + // a sysctl segment regex, concatenated with dots to form a sysctl name + SysctlSegmentFmt string = "[a-z0-9]([-_a-z0-9]*[a-z0-9])?" + + // a sysctl name regex + SysctlFmt string = "(" + SysctlSegmentFmt + "\\.)*" + SysctlSegmentFmt + + // the maximal length of a sysctl name + SysctlMaxLength int = 253 +) + +var sysctlRegexp = regexp.MustCompile("^" + SysctlFmt + "$") + +// IsValidSysctlName checks that the given string is a valid sysctl name, +// i.e. matches SysctlFmt. +func IsValidSysctlName(name string) bool { + if len(name) > SysctlMaxLength { + return false + } + return sysctlRegexp.MatchString(name) +} + +func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + names := make(map[string]struct{}) + for i, s := range sysctls { + if len(s.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("name"), "")) + } else if !IsValidSysctlName(s.Name) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("name"), s.Name, fmt.Sprintf("must have at most %d characters and match regex %s", SysctlMaxLength, SysctlFmt))) + } else if _, ok := names[s.Name]; ok { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("name"), s.Name)) + } + names[s.Name] = struct{}{} + } + return allErrs +} + +// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. +func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if securityContext != nil { + allErrs = append(allErrs, validateHostNetwork(securityContext.HostNetwork, spec.Containers, specPath.Child("containers"))...) + if securityContext.FSGroup != nil { + for _, msg := range validation.IsValidGroupID(*securityContext.FSGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fsGroup"), *(securityContext.FSGroup), msg)) + } + } + if securityContext.RunAsUser != nil { + for _, msg := range validation.IsValidUserID(*securityContext.RunAsUser) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *(securityContext.RunAsUser), msg)) + } + } + if securityContext.RunAsGroup != nil { + for _, msg := range validation.IsValidGroupID(*securityContext.RunAsGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsGroup"), *(securityContext.RunAsGroup), msg)) + } + } + + for g, gid := range securityContext.SupplementalGroups { + for _, msg := range validation.IsValidGroupID(gid) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("supplementalGroups").Index(g), gid, msg)) + } + } + if securityContext.ShareProcessNamespace != nil { + if !utilfeature.DefaultFeatureGate.Enabled(features.PodShareProcessNamespace) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("shareProcessNamespace"), "Process Namespace Sharing is disabled by PodShareProcessNamespace feature-gate")) + } else if securityContext.HostPID && *securityContext.ShareProcessNamespace { + allErrs = append(allErrs, field.Invalid(fldPath.Child("shareProcessNamespace"), *securityContext.ShareProcessNamespace, "ShareProcessNamespace and HostPID cannot both be enabled")) + } + } + + if len(securityContext.Sysctls) != 0 { + if utilfeature.DefaultFeatureGate.Enabled(features.Sysctls) { + allErrs = append(allErrs, validateSysctls(securityContext.Sysctls, fldPath.Child("sysctls"))...) + } else { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("sysctls"), "Sysctls are disabled by Sysctls feature-gate")) + } + } + } + + return allErrs +} + +func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { + allErrs = field.ErrorList{} + if len(newContainers) != len(oldContainers) { + //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff + allErrs = append(allErrs, field.Forbidden(fldPath, "pod updates may not add or remove containers")) + return allErrs, true + } + + // validate updated container images + for i, ctr := range newContainers { + if len(ctr.Image) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Index(i).Child("image"), "")) + } + // this is only called from ValidatePodUpdate so its safe to check leading/trailing whitespace. + if len(strings.TrimSpace(ctr.Image)) != len(ctr.Image) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("image"), ctr.Image, "must not have leading or trailing whitespace")) + } + } + return allErrs, false +} + +// ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodUpdate(newPod, oldPod *core.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) + specPath := field.NewPath("spec") + + // validate updateable fields: + // 1. spec.containers[*].image + // 2. spec.initContainers[*].image + // 3. spec.activeDeadlineSeconds + + containerErrs, stop := ValidateContainerUpdates(newPod.Spec.Containers, oldPod.Spec.Containers, specPath.Child("containers")) + allErrs = append(allErrs, containerErrs...) + if stop { + return allErrs + } + containerErrs, stop = ValidateContainerUpdates(newPod.Spec.InitContainers, oldPod.Spec.InitContainers, specPath.Child("initContainers")) + allErrs = append(allErrs, containerErrs...) + if stop { + return allErrs + } + + // validate updated spec.activeDeadlineSeconds. two types of updates are allowed: + // 1. from nil to a positive value + // 2. from a positive value to a lesser, non-negative value + if newPod.Spec.ActiveDeadlineSeconds != nil { + newActiveDeadlineSeconds := *newPod.Spec.ActiveDeadlineSeconds + if newActiveDeadlineSeconds < 0 || newActiveDeadlineSeconds > math.MaxInt32 { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, validation.InclusiveRangeError(0, math.MaxInt32))) + return allErrs + } + if oldPod.Spec.ActiveDeadlineSeconds != nil { + oldActiveDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds + if oldActiveDeadlineSeconds < newActiveDeadlineSeconds { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newActiveDeadlineSeconds, "must be less than or equal to previous value")) + return allErrs + } + } + } else if oldPod.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(specPath.Child("activeDeadlineSeconds"), newPod.Spec.ActiveDeadlineSeconds, "must not update from a positive integer to nil value")) + } + + // handle updateable fields by munging those fields prior to deep equal comparison. + mungedPod := *newPod + // munge spec.containers[*].image + var newContainers []core.Container + for ix, container := range mungedPod.Spec.Containers { + container.Image = oldPod.Spec.Containers[ix].Image + newContainers = append(newContainers, container) + } + mungedPod.Spec.Containers = newContainers + // munge spec.initContainers[*].image + var newInitContainers []core.Container + for ix, container := range mungedPod.Spec.InitContainers { + container.Image = oldPod.Spec.InitContainers[ix].Image + newInitContainers = append(newInitContainers, container) + } + mungedPod.Spec.InitContainers = newInitContainers + // munge spec.activeDeadlineSeconds + mungedPod.Spec.ActiveDeadlineSeconds = nil + if oldPod.Spec.ActiveDeadlineSeconds != nil { + activeDeadlineSeconds := *oldPod.Spec.ActiveDeadlineSeconds + mungedPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds + } + + // Allow only additions to tolerations updates. + mungedPod.Spec.Tolerations = oldPod.Spec.Tolerations + allErrs = append(allErrs, validateOnlyAddedTolerations(newPod.Spec.Tolerations, oldPod.Spec.Tolerations, specPath.Child("tolerations"))...) + + if !apiequality.Semantic.DeepEqual(mungedPod.Spec, oldPod.Spec) { + // This diff isn't perfect, but it's a helluva lot better an "I'm not going to tell you what the difference is". + //TODO: Pinpoint the specific field that causes the invalid error after we have strategic merge diff + specDiff := diff.ObjectDiff(mungedPod.Spec, oldPod.Spec) + allErrs = append(allErrs, field.Forbidden(specPath, fmt.Sprintf("pod updates may not change fields other than `spec.containers[*].image`, `spec.initContainers[*].image`, `spec.activeDeadlineSeconds` or `spec.tolerations` (only additions to existing tolerations)\n%v", specDiff))) + } + + return allErrs +} + +// ValidateContainerStateTransition test to if any illegal container state transitions are being attempted +func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, restartPolicy core.RestartPolicy) field.ErrorList { + allErrs := field.ErrorList{} + // If we should always restart, containers are allowed to leave the terminated state + if restartPolicy == core.RestartPolicyAlways { + return allErrs + } + for i, oldStatus := range oldStatuses { + // Skip any container that is not terminated + if oldStatus.State.Terminated == nil { + continue + } + // Skip any container that failed but is allowed to restart + if oldStatus.State.Terminated.ExitCode != 0 && restartPolicy == core.RestartPolicyOnFailure { + continue + } + for _, newStatus := range newStatuses { + if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { + allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state")) + } + } + } + return allErrs +} + +// ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) + allErrs = append(allErrs, validatePodConditions(newPod.Status.Conditions, fldPath.Child("conditions"))...) + + fldPath = field.NewPath("status") + if newPod.Spec.NodeName != oldPod.Spec.NodeName { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("nodeName"), "may not be changed directly")) + } + + if newPod.Status.NominatedNodeName != oldPod.Status.NominatedNodeName && len(newPod.Status.NominatedNodeName) > 0 { + for _, msg := range ValidateNodeName(newPod.Status.NominatedNodeName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nominatedNodeName"), newPod.Status.NominatedNodeName, msg)) + } + } + + // If pod should not restart, make sure the status update does not transition + // any terminated containers to a non-terminated state. + allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...) + allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...) + + // For status update we ignore changes to pod spec. + newPod.Spec = oldPod.Spec + + return allErrs +} + +// validatePodConditions tests if the custom pod conditions are valid. +func validatePodConditions(conditions []core.PodCondition, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + systemConditions := sets.NewString(string(core.PodScheduled), string(core.PodReady), string(core.PodInitialized)) + for i, condition := range conditions { + if systemConditions.Has(string(condition.Type)) { + continue + } + for _, msg := range validation.IsQualifiedName(string(condition.Type)) { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i).Child("Type"), string(condition.Type), msg)) + } + } + return allErrs +} + +// ValidatePodBinding tests if required fields in the pod binding are legal. +func ValidatePodBinding(binding *core.Binding) field.ErrorList { + allErrs := field.ErrorList{} + + if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { + // TODO: When validation becomes versioned, this gets more complicated. + allErrs = append(allErrs, field.NotSupported(field.NewPath("target", "kind"), binding.Target.Kind, []string{"Node", ""})) + } + if len(binding.Target.Name) == 0 { + // TODO: When validation becomes versioned, this gets more complicated. + allErrs = append(allErrs, field.Required(field.NewPath("target", "name"), "")) + } + + return allErrs +} + +// ValidatePodTemplate tests if required fields in the pod template are set. +func ValidatePodTemplate(pod *core.PodTemplate) field.ErrorList { + allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) + return allErrs +} + +// ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields +// that cannot be changed. +func ValidatePodTemplateUpdate(newPod, oldPod *core.PodTemplate) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) + return allErrs +} + +var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityClientIP), string(core.ServiceAffinityNone)) +var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort), + string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName)) + +// ValidateService tests if required fields/annotations of a Service are valid. +func ValidateService(service *core.Service) field.ErrorList { + allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) + + specPath := field.NewPath("spec") + isHeadlessService := service.Spec.ClusterIP == core.ClusterIPNone + if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != core.ServiceTypeExternalName { + allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) + } + switch service.Spec.Type { + case core.ServiceTypeLoadBalancer: + for ix := range service.Spec.Ports { + port := &service.Spec.Ports[ix] + // This is a workaround for broken cloud environments that + // over-open firewalls. Hopefully it can go away when more clouds + // understand containers better. + if port.Port == ports.KubeletPort { + portPath := specPath.Child("ports").Index(ix) + allErrs = append(allErrs, field.Invalid(portPath, port.Port, fmt.Sprintf("may not expose port %v externally since it is used by kubelet", ports.KubeletPort))) + } + } + if service.Spec.ClusterIP == "None" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services")) + } + case core.ServiceTypeNodePort: + if service.Spec.ClusterIP == "None" { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for NodePort services")) + } + case core.ServiceTypeExternalName: + if service.Spec.ClusterIP != "" { + allErrs = append(allErrs, field.Forbidden(specPath.Child("clusterIP"), "must be empty for ExternalName services")) + } + if len(service.Spec.ExternalName) > 0 { + allErrs = append(allErrs, ValidateDNS1123Subdomain(service.Spec.ExternalName, specPath.Child("externalName"))...) + } else { + allErrs = append(allErrs, field.Required(specPath.Child("externalName"), "")) + } + } + + allPortNames := sets.String{} + portsPath := specPath.Child("ports") + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, isHeadlessService, &allPortNames, portPath)...) + } + + if service.Spec.Selector != nil { + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(service.Spec.Selector, specPath.Child("selector"))...) + } + + if len(service.Spec.SessionAffinity) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("sessionAffinity"), "")) + } else if !supportedSessionAffinityType.Has(string(service.Spec.SessionAffinity)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) + } + + if service.Spec.SessionAffinity == core.ServiceAffinityClientIP { + allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...) + } else if service.Spec.SessionAffinity == core.ServiceAffinityNone { + if service.Spec.SessionAffinityConfig != nil { + allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(core.ServiceAffinityNone)))) + } + } + + if helper.IsServiceIPSet(service) { + if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil { + allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty, 'None', or a valid IP address")) + } + } + + ipPath := specPath.Child("externalIPs") + for i, ip := range service.Spec.ExternalIPs { + idxPath := ipPath.Index(i) + if msgs := validation.IsValidIP(ip); len(msgs) != 0 { + for i := range msgs { + allErrs = append(allErrs, field.Invalid(idxPath, ip, msgs[i])) + } + } else { + allErrs = append(allErrs, validateNonSpecialIP(ip, idxPath)...) + } + } + + if len(service.Spec.Type) == 0 { + allErrs = append(allErrs, field.Required(specPath.Child("type"), "")) + } else if !supportedServiceType.Has(string(service.Spec.Type)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) + } + + if service.Spec.Type == core.ServiceTypeLoadBalancer { + portsPath := specPath.Child("ports") + includeProtocols := sets.NewString() + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + if !utilfeature.DefaultFeatureGate.Enabled(features.SCTPSupport) && service.Spec.Ports[i].Protocol == core.ProtocolSCTP { + allErrs = append(allErrs, field.NotSupported(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, []string{string(core.ProtocolTCP), string(core.ProtocolUDP)})) + } else if !supportedPortProtocols.Has(string(service.Spec.Ports[i].Protocol)) { + allErrs = append(allErrs, field.Invalid(portPath.Child("protocol"), service.Spec.Ports[i].Protocol, "cannot create an external load balancer with non-TCP/UDP/SCTP ports")) + } else { + includeProtocols.Insert(string(service.Spec.Ports[i].Protocol)) + } + } + if includeProtocols.Len() > 1 { + allErrs = append(allErrs, field.Invalid(portsPath, service.Spec.Ports, "cannot create an external load balancer with mix protocols")) + } + } + + if service.Spec.Type == core.ServiceTypeClusterIP { + portsPath := specPath.Child("ports") + for i := range service.Spec.Ports { + portPath := portsPath.Index(i) + if service.Spec.Ports[i].NodePort != 0 { + allErrs = append(allErrs, field.Forbidden(portPath.Child("nodePort"), "may not be used when `type` is 'ClusterIP'")) + } + } + } + + // Check for duplicate NodePorts, considering (protocol,port) pairs + portsPath = specPath.Child("ports") + nodePorts := make(map[core.ServicePort]bool) + for i := range service.Spec.Ports { + port := &service.Spec.Ports[i] + if port.NodePort == 0 { + continue + } + portPath := portsPath.Index(i) + var key core.ServicePort + key.Protocol = port.Protocol + key.NodePort = port.NodePort + _, found := nodePorts[key] + if found { + allErrs = append(allErrs, field.Duplicate(portPath.Child("nodePort"), port.NodePort)) + } + nodePorts[key] = true + } + + // Check for duplicate Ports, considering (protocol,port) pairs + portsPath = specPath.Child("ports") + ports := make(map[core.ServicePort]bool) + for i, port := range service.Spec.Ports { + portPath := portsPath.Index(i) + key := core.ServicePort{Protocol: port.Protocol, Port: port.Port} + _, found := ports[key] + if found { + allErrs = append(allErrs, field.Duplicate(portPath, key)) + } + ports[key] = true + } + + // Validate SourceRange field and annotation + _, ok := service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] + if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { + var fieldPath *field.Path + var val string + if len(service.Spec.LoadBalancerSourceRanges) > 0 { + fieldPath = specPath.Child("LoadBalancerSourceRanges") + val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) + } else { + fieldPath = field.NewPath("metadata", "annotations").Key(core.AnnotationLoadBalancerSourceRangesKey) + val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] + } + if service.Spec.Type != core.ServiceTypeLoadBalancer { + allErrs = append(allErrs, field.Forbidden(fieldPath, "may only be used when `type` is 'LoadBalancer'")) + } + _, err := apiservice.GetLoadBalancerSourceRanges(service) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath, val, "must be a list of IP ranges. For example, 10.240.0.0/24,10.250.0.0/24 ")) + } + } + + allErrs = append(allErrs, validateServiceExternalTrafficFieldsValue(service)...) + + return allErrs +} + +func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if requireName && len(sp.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if len(sp.Name) != 0 { + allErrs = append(allErrs, ValidateDNS1123Label(sp.Name, fldPath.Child("name"))...) + if allNames.Has(sp.Name) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name)) + } else { + allNames.Insert(sp.Name) + } + } + + for _, msg := range validation.IsValidPortNum(int(sp.Port)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), sp.Port, msg)) + } + + if len(sp.Protocol) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) + } else if !utilfeature.DefaultFeatureGate.Enabled(features.SCTPSupport) && sp.Protocol == core.ProtocolSCTP { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, []string{string(core.ProtocolTCP), string(core.ProtocolUDP)})) + } else if !supportedPortProtocols.Has(string(sp.Protocol)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), sp.Protocol, supportedPortProtocols.List())) + } + + allErrs = append(allErrs, ValidatePortNumOrName(sp.TargetPort, fldPath.Child("targetPort"))...) + + // in the v1 API, targetPorts on headless services were tolerated. + // once we have version-specific validation, we can reject this on newer API versions, but until then, we have to tolerate it for compatibility. + // + // if isHeadlessService { + // if sp.TargetPort.Type == intstr.String || (sp.TargetPort.Type == intstr.Int && sp.Port != sp.TargetPort.IntValue()) { + // allErrs = append(allErrs, field.Invalid(fldPath.Child("targetPort"), sp.TargetPort, "must be equal to the value of 'port' when clusterIP = None")) + // } + // } + + return allErrs +} + +// validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations +// have legal value. +func validateServiceExternalTrafficFieldsValue(service *core.Service) field.ErrorList { + allErrs := field.ErrorList{} + + // Check first class fields. + if service.Spec.ExternalTrafficPolicy != "" && + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeCluster && + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeLocal { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, + fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", core.ServiceExternalTrafficPolicyTypeCluster, core.ServiceExternalTrafficPolicyTypeLocal))) + } + if service.Spec.HealthCheckNodePort < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, + "HealthCheckNodePort must be not less than 0")) + } + + return allErrs +} + +// ValidateServiceExternalTrafficFieldsCombination validates if ExternalTrafficPolicy, +// HealthCheckNodePort and Type combination are legal. For update, it should be called +// after clearing externalTraffic related fields for the ease of transitioning between +// different service types. +func ValidateServiceExternalTrafficFieldsCombination(service *core.Service) field.ErrorList { + allErrs := field.ErrorList{} + + if service.Spec.Type != core.ServiceTypeLoadBalancer && + service.Spec.Type != core.ServiceTypeNodePort && + service.Spec.ExternalTrafficPolicy != "" { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, + "ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service")) + } + + if !apiservice.NeedsHealthCheck(service) && + service.Spec.HealthCheckNodePort != 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "healthCheckNodePort"), service.Spec.HealthCheckNodePort, + "HealthCheckNodePort can only be set on LoadBalancer service with ExternalTrafficPolicy=Local")) + } + + return allErrs +} + +// ValidateServiceUpdate tests if required fields in the service are set during an update +func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) + + // ClusterIP should be immutable for services using it (every type other than ExternalName) + // which do not have ClusterIP assigned yet (empty string value) + if service.Spec.Type != core.ServiceTypeExternalName { + if oldService.Spec.Type != core.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { + allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) + } + } + + allErrs = append(allErrs, ValidateService(service)...) + return allErrs +} + +// ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. +func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) + return allErrs +} + +// ValidateReplicationController tests if required fields in the replication controller are set. +func ValidateReplicationController(controller *core.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. +func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. +func ValidateReplicationControllerStatusUpdate(controller, oldController *core.ReplicationController) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...) + return allErrs +} + +func ValidateReplicationControllerStatus(status core.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ReadyReplicas), statusPath.Child("readyReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.AvailableReplicas), statusPath.Child("availableReplicas"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(status.ObservedGeneration), statusPath.Child("observedGeneration"))...) + msg := "cannot be greater than status.replicas" + if status.FullyLabeledReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("fullyLabeledReplicas"), status.FullyLabeledReplicas, msg)) + } + if status.ReadyReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("readyReplicas"), status.ReadyReplicas, msg)) + } + if status.AvailableReplicas > status.Replicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, msg)) + } + if status.AvailableReplicas > status.ReadyReplicas { + allErrs = append(allErrs, field.Invalid(statusPath.Child("availableReplicas"), status.AvailableReplicas, "cannot be greater than readyReplicas")) + } + return allErrs +} + +// Validates that the given selector is non-empty. +func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + selector := labels.Set(selectorMap).AsSelector() + if selector.Empty() { + allErrs = append(allErrs, field.Required(fldPath, "")) + } + return allErrs +} + +// Validates the given template and ensures that it is in accordance with the desired selector and replicas. +func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if template == nil { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + selector := labels.Set(selectorMap).AsSelector() + if !selector.Empty() { + // Verify that the RC selector matches the labels in template. + labels := labels.Set(template.Labels) + if !selector.Matches(labels) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("metadata", "labels"), template.Labels, "`selector` does not match template `labels`")) + } + } + allErrs = append(allErrs, ValidatePodTemplateSpec(template, fldPath)...) + if replicas > 1 { + allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) + } + // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). + if template.Spec.RestartPolicy != core.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(core.RestartPolicyAlways)})) + } + if template.Spec.ActiveDeadlineSeconds != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) + } + } + return allErrs +} + +// ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. +func ValidateReplicationControllerSpec(spec *core.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) + allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) + allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.Replicas), fldPath.Child("replicas"))...) + allErrs = append(allErrs, ValidatePodTemplateSpecForRC(spec.Template, spec.Selector, spec.Replicas, fldPath.Child("template"))...) + return allErrs +} + +// ValidatePodTemplateSpec validates the spec of a pod template +func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) + allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...) + allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...) + return allErrs +} + +func ValidateReadOnlyPersistentDisks(volumes []core.Volume, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i := range volumes { + vol := &volumes[i] + idxPath := fldPath.Index(i) + if vol.GCEPersistentDisk != nil { + if vol.GCEPersistentDisk.ReadOnly == false { + allErrs = append(allErrs, field.Invalid(idxPath.Child("gcePersistentDisk", "readOnly"), false, "must be true for replicated pods > 1; GCE PD can only be mounted on multiple machines if it is read-only")) + } + } + // TODO: What to do for AWS? It doesn't support replicas + } + return allErrs +} + +// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data +func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + taints, err := helper.GetTaintsFromNodeAnnotations(annotations) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, core.TaintsAnnotationKey, err.Error())) + return allErrs + } + + if len(taints) > 0 { + allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(core.TaintsAnnotationKey))...) + } + + return allErrs +} + +// validateNodeTaints tests if given taints have valid data. +func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorList { + allErrors := field.ErrorList{} + + uniqueTaints := map[core.TaintEffect]sets.String{} + + for i, currTaint := range taints { + idxPath := fldPath.Index(i) + // validate the taint key + allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...) + // validate the taint value + if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 { + allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";"))) + } + // validate the taint effect + allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...) + + // validate if taint is unique by + if len(uniqueTaints[currTaint.Effect]) > 0 && uniqueTaints[currTaint.Effect].Has(currTaint.Key) { + duplicatedError := field.Duplicate(idxPath, currTaint) + duplicatedError.Detail = "taints must be unique by key and effect pair" + allErrors = append(allErrors, duplicatedError) + continue + } + + // add taint to existingTaints for uniqueness check + if len(uniqueTaints[currTaint.Effect]) == 0 { + uniqueTaints[currTaint.Effect] = sets.String{} + } + uniqueTaints[currTaint.Effect].Insert(currTaint.Key) + } + return allErrors +} + +func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if annotations[core.TaintsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...) + } + + if annotations[core.PreferAvoidPodsAnnotationKey] != "" { + allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...) + } + return allErrs +} + +// ValidateNode tests if required fields in the node are set. +func ValidateNode(node *core.Node) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) + if len(node.Spec.Taints) > 0 { + allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...) + } + + // Only validate spec. + // All status fields are optional and can be updated later. + // That said, if specified, we need to ensure they are valid. + allErrs = append(allErrs, ValidateNodeResources(node)...) + + // Only allow Spec.ConfigSource and Status.Config to be set if the DynamicKubeletConfig feature gate is enabled + if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + if node.Spec.ConfigSource != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)")) + } + if node.Status.Config != nil { + allErrs = append(allErrs, field.Forbidden(field.NewPath("status", "config"), "config may only be set if the DynamicKubeletConfig feature gate is enabled)")) + } + } + + if len(node.Spec.PodCIDR) != 0 { + _, err := ValidateCIDR(node.Spec.PodCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "podCIDR"), node.Spec.PodCIDR, "not a valid CIDR")) + } + } + return allErrs +} + +// ValidateNodeResources is used to make sure a node has valid capacity and allocatable values. +func ValidateNodeResources(node *core.Node) field.ErrorList { + allErrs := field.ErrorList{} + // Validate resource quantities in capacity. + hugePageSizes := sets.NewString() + for k, v := range node.Status.Capacity { + resPath := field.NewPath("status", "capacity", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + // track any huge page size that has a positive value + if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { + hugePageSizes.Insert(string(k)) + } + if len(hugePageSizes) > 1 { + allErrs = append(allErrs, field.Invalid(resPath, v, "may not have pre-allocated hugepages for multiple page sizes")) + } + } + // Validate resource quantities in allocatable. + hugePageSizes = sets.NewString() + for k, v := range node.Status.Allocatable { + resPath := field.NewPath("status", "allocatable", string(k)) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + // track any huge page size that has a positive value + if helper.IsHugePageResourceName(k) && v.Value() > int64(0) { + hugePageSizes.Insert(string(k)) + } + if len(hugePageSizes) > 1 { + allErrs = append(allErrs, field.Invalid(resPath, v, "may not have pre-allocated hugepages for multiple page sizes")) + } + } + return allErrs +} + +// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. +func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { + fldPath := field.NewPath("metadata") + allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) + allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) + + // TODO: Enable the code once we have better core object.status update model. Currently, + // anyone can update node status. + // if !apiequality.Semantic.DeepEqual(node.Status, core.NodeStatus{}) { + // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) + // } + + allErrs = append(allErrs, ValidateNodeResources(node)...) + + // Validate no duplicate addresses in node status. + addresses := make(map[core.NodeAddress]bool) + for i, address := range node.Status.Addresses { + if _, ok := addresses[address]; ok { + allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) + } + addresses[address] = true + } + + if len(oldNode.Spec.PodCIDR) == 0 { + // Allow the controller manager to assign a CIDR to a node if it doesn't have one. + oldNode.Spec.PodCIDR = node.Spec.PodCIDR + } else { + if oldNode.Spec.PodCIDR != node.Spec.PodCIDR { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "podCIDR"), "node updates may not change podCIDR except from \"\" to valid")) + } + } + + // Allow controller manager updating provider ID when not set + if len(oldNode.Spec.ProviderID) == 0 { + oldNode.Spec.ProviderID = node.Spec.ProviderID + } else { + if oldNode.Spec.ProviderID != node.Spec.ProviderID { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "providerID"), "node updates may not change providerID except from \"\" to valid")) + } + } + + // Allow and validate updates to Node.Spec.ConfigSource and Node.Status.Config if DynamicKubeletConfig feature gate is enabled + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) { + if node.Spec.ConfigSource != nil { + allErrs = append(allErrs, validateNodeConfigSourceSpec(node.Spec.ConfigSource, field.NewPath("spec", "configSource"))...) + } + oldNode.Spec.ConfigSource = node.Spec.ConfigSource + if node.Status.Config != nil { + allErrs = append(allErrs, validateNodeConfigStatus(node.Status.Config, field.NewPath("status", "config"))...) + } + oldNode.Status.Config = node.Status.Config + } + + // TODO: move reset function to its own location + // Ignore metadata changes now that they have been tested + oldNode.ObjectMeta = node.ObjectMeta + // Allow users to update capacity + oldNode.Status.Capacity = node.Status.Capacity + // Allow users to unschedule node + oldNode.Spec.Unschedulable = node.Spec.Unschedulable + // Clear status + oldNode.Status = node.Status + + // update taints + if len(node.Spec.Taints) > 0 { + allErrs = append(allErrs, validateNodeTaints(node.Spec.Taints, fldPath.Child("taints"))...) + } + oldNode.Spec.Taints = node.Spec.Taints + + // We made allowed changes to oldNode, and now we compare oldNode to node. Any remaining differences indicate changes to protected fields. + // TODO: Add a 'real' error type for this error and provide print actual diffs. + if !apiequality.Semantic.DeepEqual(oldNode, node) { + klog.V(4).Infof("Update failed validation %#v vs %#v", oldNode, node) + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "node updates may only change labels, taints, or capacity (or configSource, if the DynamicKubeletConfig feature gate is enabled)")) + } + + return allErrs +} + +// validation specific to Node.Spec.ConfigSource +func validateNodeConfigSourceSpec(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + count := int(0) + if source.ConfigMap != nil { + count++ + allErrs = append(allErrs, validateConfigMapNodeConfigSourceSpec(source.ConfigMap, fldPath.Child("configMap"))...) + } + // add more subfields here in the future as they are added to NodeConfigSource + + // exactly one reference subfield must be non-nil + if count != 1 { + allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil")) + } + return allErrs +} + +// validation specific to Node.Spec.ConfigSource.ConfigMap +func validateConfigMapNodeConfigSourceSpec(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // uid and resourceVersion must not be set in spec + if string(source.UID) != "" { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("uid"), "uid must not be set in spec")) + } + if source.ResourceVersion != "" { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("resourceVersion"), "resourceVersion must not be set in spec")) + } + return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...) +} + +// validation specififc to Node.Status.Config +func validateNodeConfigStatus(status *core.NodeConfigStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if status.Assigned != nil { + allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Assigned, fldPath.Child("assigned"))...) + } + if status.Active != nil { + allErrs = append(allErrs, validateNodeConfigSourceStatus(status.Active, fldPath.Child("active"))...) + } + if status.LastKnownGood != nil { + allErrs = append(allErrs, validateNodeConfigSourceStatus(status.LastKnownGood, fldPath.Child("lastKnownGood"))...) + } + return allErrs +} + +// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood) +func validateNodeConfigSourceStatus(source *core.NodeConfigSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + count := int(0) + if source.ConfigMap != nil { + count++ + allErrs = append(allErrs, validateConfigMapNodeConfigSourceStatus(source.ConfigMap, fldPath.Child("configMap"))...) + } + // add more subfields here in the future as they are added to NodeConfigSource + + // exactly one reference subfield must be non-nil + if count != 1 { + allErrs = append(allErrs, field.Invalid(fldPath, source, "exactly one reference subfield must be non-nil")) + } + return allErrs +} + +// validation specific to Node.Status.Config.(Active|Assigned|LastKnownGood).ConfigMap +func validateConfigMapNodeConfigSourceStatus(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // uid and resourceVersion must be set in status + if string(source.UID) == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("uid"), "uid must be set in status")) + } + if source.ResourceVersion == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("resourceVersion"), "resourceVersion must be set in status")) + } + return append(allErrs, validateConfigMapNodeConfigSource(source, fldPath)...) +} + +// common validation +func validateConfigMapNodeConfigSource(source *core.ConfigMapNodeConfigSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // validate target configmap namespace + if source.Namespace == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), "namespace must be set")) + } else { + for _, msg := range ValidateNameFunc(ValidateNamespaceName)(source.Namespace, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), source.Namespace, msg)) + } + } + // validate target configmap name + if source.Name == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name must be set")) + } else { + for _, msg := range ValidateNameFunc(ValidateConfigMapName)(source.Name, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), source.Name, msg)) + } + } + // validate kubeletConfigKey against rules for configMap key names + if source.KubeletConfigKey == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("kubeletConfigKey"), "kubeletConfigKey must be set")) + } else { + for _, msg := range validation.IsConfigMapKey(source.KubeletConfigKey) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("kubeletConfigKey"), source.KubeletConfigKey, msg)) + } + } + return allErrs +} + +// Validate compute resource typename. +// Refer to docs/design/resources.md for more details. +func validateResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource type or fully qualified")) + } + } + + return allErrs +} + +// Validate container resource name +// Refer to docs/design/resources.md for more details. +func validateContainerResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardContainerResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard resource for containers")) + } + } else if !helper.IsNativeResource(core.ResourceName(value)) { + if !helper.IsExtendedResourceName(core.ResourceName(value)) { + return append(allErrs, field.Invalid(fldPath, value, "doesn't follow extended resource name standard")) + } + } + return allErrs +} + +// isLocalStorageResource checks whether the resource is local ephemeral storage +func isLocalStorageResource(name string) bool { + if name == string(core.ResourceEphemeralStorage) || name == string(core.ResourceRequestsEphemeralStorage) || + name == string(core.ResourceLimitsEphemeralStorage) { + return true + } + return false +} + +// Validate resource names that can go in a resource quota +// Refer to docs/design/resources.md for more details. +func ValidateResourceQuotaResourceName(value string, fldPath *field.Path) field.ErrorList { + allErrs := validateResourceName(value, fldPath) + if isLocalStorageResource(value) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceQuota")) + } + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardQuotaResourceName(value) { + return append(allErrs, field.Invalid(fldPath, value, isInvalidQuotaResource)) + } + } + return allErrs +} + +// Validate limit range types +func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsQualifiedName(value) { + allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) + } + if len(allErrs) != 0 { + return allErrs + } + + if len(strings.Split(value, "/")) == 1 { + if !helper.IsStandardLimitRangeType(value) { + return append(allErrs, field.Invalid(fldPath, value, "must be a standard limit type or fully qualified")) + } + } + + return allErrs +} + +// Validate limit range resource name +// limit types (other than Pod/Container) could contain storage not just cpu or memory +func validateLimitRangeResourceName(limitType core.LimitType, value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value == string(core.ResourceEphemeralStorage) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for Resource LimitRange")) + } + switch limitType { + case core.LimitTypePod, core.LimitTypeContainer: + return validateContainerResourceName(value, fldPath) + default: + return validateResourceName(value, fldPath) + } +} + +// ValidateLimitRange tests if required fields in the LimitRange are set. +func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { + allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) + + // ensure resource names are properly qualified per docs/design/resources.md + limitTypeSet := map[core.LimitType]bool{} + fldPath := field.NewPath("spec", "limits") + for i := range limitRange.Spec.Limits { + idxPath := fldPath.Index(i) + limit := &limitRange.Spec.Limits[i] + allErrs = append(allErrs, validateLimitRangeTypeName(string(limit.Type), idxPath.Child("type"))...) + + _, found := limitTypeSet[limit.Type] + if found { + allErrs = append(allErrs, field.Duplicate(idxPath.Child("type"), limit.Type)) + } + limitTypeSet[limit.Type] = true + + keys := sets.String{} + min := map[string]resource.Quantity{} + max := map[string]resource.Quantity{} + defaults := map[string]resource.Quantity{} + defaultRequests := map[string]resource.Quantity{} + maxLimitRequestRatios := map[string]resource.Quantity{} + + for k, q := range limit.Max { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("max").Key(string(k)))...) + keys.Insert(string(k)) + max[string(k)] = q + } + for k, q := range limit.Min { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("min").Key(string(k)))...) + keys.Insert(string(k)) + min[string(k)] = q + } + + if limit.Type == core.LimitTypePod { + if len(limit.Default) > 0 { + allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) + } + if len(limit.DefaultRequest) > 0 { + allErrs = append(allErrs, field.Forbidden(idxPath.Child("defaultRequest"), "may not be specified when `type` is 'Pod'")) + } + } else { + for k, q := range limit.Default { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("default").Key(string(k)))...) + keys.Insert(string(k)) + defaults[string(k)] = q + } + for k, q := range limit.DefaultRequest { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("defaultRequest").Key(string(k)))...) + keys.Insert(string(k)) + defaultRequests[string(k)] = q + } + } + + if limit.Type == core.LimitTypePersistentVolumeClaim { + _, minQuantityFound := limit.Min[core.ResourceStorage] + _, maxQuantityFound := limit.Max[core.ResourceStorage] + if !minQuantityFound && !maxQuantityFound { + allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided")) + } + } + + for k, q := range limit.MaxLimitRequestRatio { + allErrs = append(allErrs, validateLimitRangeResourceName(limit.Type, string(k), idxPath.Child("maxLimitRequestRatio").Key(string(k)))...) + keys.Insert(string(k)) + maxLimitRequestRatios[string(k)] = q + } + + for k := range keys { + minQuantity, minQuantityFound := min[k] + maxQuantity, maxQuantityFound := max[k] + defaultQuantity, defaultQuantityFound := defaults[k] + defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] + maxRatio, maxRatioFound := maxLimitRequestRatios[k] + + if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("min").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) + } + + if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) + } + + if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) + } + + if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) + } + + if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) + } + + if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("default").Key(string(k)), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) + } + if maxRatioFound && maxRatio.Cmp(*resource.NewQuantity(1, resource.DecimalSI)) < 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is less than 1", maxRatio.String()))) + } + if maxRatioFound && minQuantityFound && maxQuantityFound { + maxRatioValue := float64(maxRatio.Value()) + minQuantityValue := minQuantity.Value() + maxQuantityValue := maxQuantity.Value() + if maxRatio.Value() < resource.MaxMilliValue && minQuantityValue < resource.MaxMilliValue && maxQuantityValue < resource.MaxMilliValue { + maxRatioValue = float64(maxRatio.MilliValue()) / 1000 + minQuantityValue = minQuantity.MilliValue() + maxQuantityValue = maxQuantity.MilliValue() + } + maxRatioLimit := float64(maxQuantityValue) / float64(minQuantityValue) + if maxRatioValue > maxRatioLimit { + allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) + } + } + + // for GPU, hugepages and other resources that are not allowed to overcommit, + // the default value and defaultRequest value must match if both are specified + if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k))) + } + } + } + + return allErrs +} + +// ValidateServiceAccount tests if required fields in the ServiceAccount are set. +func ValidateServiceAccount(serviceAccount *core.ServiceAccount) field.ErrorList { + allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) + return allErrs +} + +// ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. +func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *core.ServiceAccount) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) + return allErrs +} + +// ValidateSecret tests if required fields in the Secret are set. +func ValidateSecret(secret *core.Secret) field.ErrorList { + allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) + + dataPath := field.NewPath("data") + totalSize := 0 + for key, value := range secret.Data { + for _, msg := range validation.IsConfigMapKey(key) { + allErrs = append(allErrs, field.Invalid(dataPath.Key(key), key, msg)) + } + totalSize += len(value) + } + if totalSize > core.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(dataPath, "", core.MaxSecretSize)) + } + + switch secret.Type { + case core.SecretTypeServiceAccountToken: + // Only require Annotations[kubernetes.io/service-account.name] + // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop + if value := secret.Annotations[core.ServiceAccountNameKey]; len(value) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(core.ServiceAccountNameKey), "")) + } + case core.SecretTypeOpaque, "": + // no-op + case core.SecretTypeDockercfg: + dockercfgBytes, exists := secret.Data[core.DockerConfigKey] + if !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigKey), "")) + break + } + + // make sure that the content is well-formed json. + if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigKey), "", err.Error())) + } + case core.SecretTypeDockerConfigJson: + dockerConfigJsonBytes, exists := secret.Data[core.DockerConfigJsonKey] + if !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJsonKey), "")) + break + } + + // make sure that the content is well-formed json. + if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJsonKey), "", err.Error())) + } + case core.SecretTypeBasicAuth: + _, usernameFieldExists := secret.Data[core.BasicAuthUsernameKey] + _, passwordFieldExists := secret.Data[core.BasicAuthPasswordKey] + + // username or password might be empty, but the field must be present + if !usernameFieldExists && !passwordFieldExists { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthUsernameKey), "")) + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthPasswordKey), "")) + break + } + case core.SecretTypeSSHAuth: + if len(secret.Data[core.SSHAuthPrivateKey]) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.SSHAuthPrivateKey), "")) + break + } + + case core.SecretTypeTLS: + if _, exists := secret.Data[core.TLSCertKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSCertKey), "")) + } + if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), "")) + } + // TODO: Verify that the key matches the cert. + default: + // no-op + } + + return allErrs +} + +// ValidateSecretUpdate tests if required fields in the Secret are set. +func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) + + if len(newSecret.Type) == 0 { + newSecret.Type = oldSecret.Type + } + + allErrs = append(allErrs, ValidateImmutableField(newSecret.Type, oldSecret.Type, field.NewPath("type"))...) + + allErrs = append(allErrs, ValidateSecret(newSecret)...) + return allErrs +} + +// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid. +// Prefix indicates this name will be used as part of generation, in which case +// trailing dashes are allowed. +var ValidateConfigMapName = apimachineryvalidation.NameIsDNSSubdomain + +// ValidateConfigMap tests whether required fields in the ConfigMap are set. +func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) + + totalSize := 0 + + for key, value := range cfg.Data { + for _, msg := range validation.IsConfigMapKey(key) { + allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg)) + } + // check if we have a duplicate key in the other bag + if _, isValue := cfg.BinaryData[key]; isValue { + msg := "duplicate of key present in binaryData" + allErrs = append(allErrs, field.Invalid(field.NewPath("data").Key(key), key, msg)) + } + totalSize += len(value) + } + for key, value := range cfg.BinaryData { + for _, msg := range validation.IsConfigMapKey(key) { + allErrs = append(allErrs, field.Invalid(field.NewPath("binaryData").Key(key), key, msg)) + } + totalSize += len(value) + } + if totalSize > core.MaxSecretSize { + // pass back "" to indicate that the error refers to the whole object. + allErrs = append(allErrs, field.TooLong(field.NewPath(""), cfg, core.MaxSecretSize)) + } + + return allErrs +} + +// ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. +func ValidateConfigMapUpdate(newCfg, oldCfg *core.ConfigMap) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, ValidateConfigMap(newCfg)...) + + return allErrs +} + +func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) field.ErrorList { + if quantity.Value() < 0 { + return field.ErrorList{field.Invalid(fldPath, quantity.Value(), "must be a valid resource quantity")} + } + return field.ErrorList{} +} + +// Validates resource requirement spec. +func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + limPath := fldPath.Child("limits") + reqPath := fldPath.Child("requests") + limContainsCpuOrMemory := false + reqContainsCpuOrMemory := false + limContainsHugePages := false + reqContainsHugePages := false + supportedQoSComputeResources := sets.NewString(string(core.ResourceCPU), string(core.ResourceMemory)) + for resourceName, quantity := range requirements.Limits { + + fldPath := limPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + if resourceName == core.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements")) + } + if helper.IsHugePageResourceName(resourceName) { + if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { + allErrs = append(allErrs, field.Forbidden(limPath, fmt.Sprintf("%s field disabled by feature-gate for ResourceRequirements", resourceName))) + } else { + limContainsHugePages = true + } + } + + if supportedQoSComputeResources.Has(string(resourceName)) { + limContainsCpuOrMemory = true + } + } + for resourceName, quantity := range requirements.Requests { + fldPath := reqPath.Key(string(resourceName)) + // Validate resource name. + allErrs = append(allErrs, validateContainerResourceName(string(resourceName), fldPath)...) + // Validate resource quantity. + allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) + + // Check that request <= limit. + limitQuantity, exists := requirements.Limits[resourceName] + if exists { + // For non overcommitable resources, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. + if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) + } else if quantity.Cmp(limitQuantity) > 0 { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) + } + } else if !helper.IsOvercommitAllowed(resourceName) { + allErrs = append(allErrs, field.Required(limPath, "Limit must be set for non overcommitable resources")) + } + if helper.IsHugePageResourceName(resourceName) { + reqContainsHugePages = true + } + if supportedQoSComputeResources.Has(string(resourceName)) { + reqContainsCpuOrMemory = true + } + + } + if !limContainsCpuOrMemory && !reqContainsCpuOrMemory && (reqContainsHugePages || limContainsHugePages) { + allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("HugePages require cpu or memory"))) + } + + return allErrs +} + +// validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes +func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(resourceQuotaSpec.Scopes) == 0 { + return allErrs + } + hardLimits := sets.NewString() + for k := range resourceQuotaSpec.Hard { + hardLimits.Insert(string(k)) + } + fldPath := fld.Child("scopes") + scopeSet := sets.NewString() + for _, scope := range resourceQuotaSpec.Scopes { + if !helper.IsStandardResourceQuotaScope(string(scope)) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope")) + } + for _, k := range hardLimits.List() { + if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(scope, k) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "unsupported scope applied to resource")) + } + } + scopeSet.Insert(string(scope)) + } + invalidScopePairs := []sets.String{ + sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)), + sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)), + } + for _, invalidScopePair := range invalidScopePairs { + if scopeSet.HasAll(invalidScopePair.List()...) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) + } + } + return allErrs +} + +// validateScopedResourceSelectorRequirement tests that the match expressions has valid data +func validateScopedResourceSelectorRequirement(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + hardLimits := sets.NewString() + for k := range resourceQuotaSpec.Hard { + hardLimits.Insert(string(k)) + } + fldPath := fld.Child("matchExpressions") + scopeSet := sets.NewString() + for _, req := range resourceQuotaSpec.ScopeSelector.MatchExpressions { + if !helper.IsStandardResourceQuotaScope(string(req.ScopeName)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("scopeName"), req.ScopeName, "unsupported scope")) + } + for _, k := range hardLimits.List() { + if helper.IsStandardQuotaResourceName(k) && !helper.IsResourceQuotaScopeValidForResource(req.ScopeName, k) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.ScopeSelector, "unsupported scope applied to resource")) + } + } + switch req.ScopeName { + case core.ResourceQuotaScopeBestEffort, core.ResourceQuotaScopeNotBestEffort, core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating: + if req.Operator != core.ScopeSelectorOpExists { + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, + "must be 'Exist' only operator when scope is any of ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeBestEffort and ResourceQuotaScopeNotBestEffort")) + } + } + + switch req.Operator { + case core.ScopeSelectorOpIn, core.ScopeSelectorOpNotIn: + if len(req.Values) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("values"), + "must be at least one value when `operator` is 'In' or 'NotIn' for scope selector")) + } + case core.ScopeSelectorOpExists, core.ScopeSelectorOpDoesNotExist: + if len(req.Values) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("values"), req.Values, + "must be no value when `operator` is 'Exist' or 'DoesNotExist' for scope selector")) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), req.Operator, "not a valid selector operator")) + } + scopeSet.Insert(string(req.ScopeName)) + } + invalidScopePairs := []sets.String{ + sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)), + sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)), + } + for _, invalidScopePair := range invalidScopePairs { + if scopeSet.HasAll(invalidScopePair.List()...) { + allErrs = append(allErrs, field.Invalid(fldPath, resourceQuotaSpec.Scopes, "conflicting scopes")) + } + } + + return allErrs +} + +// validateScopeSelector tests that the specified scope selector has valid data +func validateScopeSelector(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if resourceQuotaSpec.ScopeSelector == nil { + return allErrs + } + if !utilfeature.DefaultFeatureGate.Enabled(features.ResourceQuotaScopeSelectors) && resourceQuotaSpec.ScopeSelector != nil { + allErrs = append(allErrs, field.Forbidden(fld.Child("scopeSelector"), "ResourceQuotaScopeSelectors feature-gate is disabled")) + } + allErrs = append(allErrs, validateScopedResourceSelectorRequirement(resourceQuotaSpec, fld.Child("scopeSelector"))...) + return allErrs +} + +// ValidateResourceQuota tests if required fields in the ResourceQuota are set. +func ValidateResourceQuota(resourceQuota *core.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) + + allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...) + allErrs = append(allErrs, ValidateResourceQuotaStatus(&resourceQuota.Status, field.NewPath("status"))...) + + return allErrs +} + +func ValidateResourceQuotaStatus(status *core.ResourceQuotaStatus, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + fldPath := fld.Child("hard") + for k, v := range status.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + fldPath = fld.Child("used") + for k, v := range status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + + return allErrs +} + +func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + fldPath := fld.Child("hard") + for k, v := range resourceQuotaSpec.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + allErrs = append(allErrs, validateResourceQuotaScopes(resourceQuotaSpec, fld)...) + allErrs = append(allErrs, validateScopeSelector(resourceQuotaSpec, fld)...) + + return allErrs +} + +// ValidateResourceQuantityValue enforces that specified quantity is valid for specified resource +func ValidateResourceQuantityValue(resource string, value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + allErrs = append(allErrs, ValidateNonnegativeQuantity(value, fldPath)...) + if helper.IsIntegerResourceName(resource) { + if value.MilliValue()%int64(1000) != int64(0) { + allErrs = append(allErrs, field.Invalid(fldPath, value, isNotIntegerErrorMsg)) + } + } + return allErrs +} + +// ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. +// newResourceQuota is updated with fields that cannot be changed. +func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...) + + // ensure scopes cannot change, and that resources are still valid for scope + fldPath := field.NewPath("spec", "scopes") + oldScopes := sets.NewString() + newScopes := sets.NewString() + for _, scope := range newResourceQuota.Spec.Scopes { + newScopes.Insert(string(scope)) + } + for _, scope := range oldResourceQuota.Spec.Scopes { + oldScopes.Insert(string(scope)) + } + if !oldScopes.Equal(newScopes) { + allErrs = append(allErrs, field.Invalid(fldPath, newResourceQuota.Spec.Scopes, fieldImmutableErrorMsg)) + } + + newResourceQuota.Status = oldResourceQuota.Status + return allErrs +} + +// ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. +// newResourceQuota is updated with fields that cannot be changed. +func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) + if len(newResourceQuota.ResourceVersion) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) + } + fldPath := field.NewPath("status", "hard") + for k, v := range newResourceQuota.Status.Hard { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + fldPath = field.NewPath("status", "used") + for k, v := range newResourceQuota.Status.Used { + resPath := fldPath.Key(string(k)) + allErrs = append(allErrs, ValidateResourceQuotaResourceName(string(k), resPath)...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(k), v, resPath)...) + } + newResourceQuota.Spec = oldResourceQuota.Spec + return allErrs +} + +// ValidateNamespace tests if required fields are set. +func ValidateNamespace(namespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) + for i := range namespace.Spec.Finalizers { + allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) + } + return allErrs +} + +// Validate finalizer names +func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := apimachineryvalidation.ValidateFinalizerName(stringValue, fldPath) + for _, err := range validateKubeFinalizerName(stringValue, fldPath) { + allErrs = append(allErrs, err) + } + + return allErrs +} + +// validateKubeFinalizerName checks for "standard" names of legacy finalizer +func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(strings.Split(stringValue, "/")) == 1 { + if !helper.IsStandardFinalizerName(stringValue) { + return append(allErrs, field.Invalid(fldPath, stringValue, "name is neither a standard finalizer name nor is it fully qualified")) + } + } + + return allErrs +} + +// ValidateNamespaceUpdate tests to make sure a namespace update can be applied. +// newNamespace is updated with fields that cannot be changed +func ValidateNamespaceUpdate(newNamespace *core.Namespace, oldNamespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers + newNamespace.Status = oldNamespace.Status + return allErrs +} + +// ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields +// that cannot be changed. +func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + newNamespace.Spec = oldNamespace.Spec + if newNamespace.DeletionTimestamp.IsZero() { + if newNamespace.Status.Phase != core.NamespaceActive { + allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) + } + } else { + if newNamespace.Status.Phase != core.NamespaceTerminating { + allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) + } + } + return allErrs +} + +// ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. +// newNamespace is updated with fields that cannot be changed. +func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) + + fldPath := field.NewPath("spec", "finalizers") + for i := range newNamespace.Spec.Finalizers { + idxPath := fldPath.Index(i) + allErrs = append(allErrs, validateFinalizerName(string(newNamespace.Spec.Finalizers[i]), idxPath)...) + } + newNamespace.Status = oldNamespace.Status + return allErrs +} + +// ValidateEndpoints tests if required fields are set. +func ValidateEndpoints(endpoints *core.Endpoints) field.ErrorList { + allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) + allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, field.NewPath("subsets"))...) + return allErrs +} + +func validateEndpointSubsets(subsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i := range subsets { + ss := &subsets[i] + idxPath := fldPath.Index(i) + + // EndpointSubsets must include endpoint address. For headless service, we allow its endpoints not to have ports. + if len(ss.Addresses) == 0 && len(ss.NotReadyAddresses) == 0 { + //TODO: consider adding a RequiredOneOf() error for this and similar cases + allErrs = append(allErrs, field.Required(idxPath, "must specify `addresses` or `notReadyAddresses`")) + } + for addr := range ss.Addresses { + allErrs = append(allErrs, validateEndpointAddress(&ss.Addresses[addr], idxPath.Child("addresses").Index(addr))...) + } + for addr := range ss.NotReadyAddresses { + allErrs = append(allErrs, validateEndpointAddress(&ss.NotReadyAddresses[addr], idxPath.Child("notReadyAddresses").Index(addr))...) + } + for port := range ss.Ports { + allErrs = append(allErrs, validateEndpointPort(&ss.Ports[port], len(ss.Ports) > 1, idxPath.Child("ports").Index(port))...) + } + } + + return allErrs +} + +func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for _, msg := range validation.IsValidIP(address.IP) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) + } + if len(address.Hostname) > 0 { + allErrs = append(allErrs, ValidateDNS1123Label(address.Hostname, fldPath.Child("hostname"))...) + } + // During endpoint update, verify that NodeName is a DNS subdomain and transition rules allow the update + if address.NodeName != nil { + for _, msg := range ValidateNodeName(*address.NodeName, false) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), *address.NodeName, msg)) + } + } + allErrs = append(allErrs, validateNonSpecialIP(address.IP, fldPath.Child("ip"))...) + return allErrs +} + +func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList { + // We disallow some IPs as endpoints or external-ips. Specifically, + // unspecified and loopback addresses are nonsensical and link-local + // addresses tend to be used for node-centric purposes (e.g. metadata + // service). + allErrs := field.ErrorList{} + ip := net.ParseIP(ipAddress) + if ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "must be a valid IP address")) + return allErrs + } + if ip.IsUnspecified() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be unspecified (0.0.0.0)")) + } + if ip.IsLoopback() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the loopback range (127.0.0.0/8)")) + } + if ip.IsLinkLocalUnicast() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local range (169.254.0.0/16)")) + } + if ip.IsLinkLocalMulticast() { + allErrs = append(allErrs, field.Invalid(fldPath, ipAddress, "may not be in the link-local multicast range (224.0.0.0/24)")) + } + return allErrs +} + +func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if requireName && len(port.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if len(port.Name) != 0 { + allErrs = append(allErrs, ValidateDNS1123Label(port.Name, fldPath.Child("name"))...) + } + for _, msg := range validation.IsValidPortNum(int(port.Port)) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("port"), port.Port, msg)) + } + if len(port.Protocol) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("protocol"), "")) + } else if !utilfeature.DefaultFeatureGate.Enabled(features.SCTPSupport) && port.Protocol == core.ProtocolSCTP { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, []string{string(core.ProtocolTCP), string(core.ProtocolUDP)})) + } else if !supportedPortProtocols.Has(string(port.Protocol)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("protocol"), port.Protocol, supportedPortProtocols.List())) + } + return allErrs +} + +// ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. +// NodeName changes are allowed during update to accommodate the case where nodeIP or PodCIDR is reused. +// An existing endpoint ip will have a different nodeName if this happens. +func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList { + allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) + allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, field.NewPath("subsets"))...) + allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) + return allErrs +} + +// ValidateSecurityContext ensure the security context contains valid settings +func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + //this should only be true for testing since SecurityContext is defaulted by the core + if sc == nil { + return allErrs + } + + if sc.Privileged != nil { + if *sc.Privileged && !capabilities.Get().AllowPrivileged { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("privileged"), "disallowed by cluster policy")) + } + } + + if sc.RunAsUser != nil { + for _, msg := range validation.IsValidUserID(*sc.RunAsUser) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsUser"), *sc.RunAsUser, msg)) + } + } + + if sc.RunAsGroup != nil { + for _, msg := range validation.IsValidGroupID(*sc.RunAsGroup) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("runAsGroup"), *sc.RunAsGroup, msg)) + } + } + + if sc.AllowPrivilegeEscalation != nil && !*sc.AllowPrivilegeEscalation { + if sc.Privileged != nil && *sc.Privileged { + allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `privileged` to true")) + } + + if sc.Capabilities != nil { + for _, cap := range sc.Capabilities.Add { + if string(cap) == "CAP_SYS_ADMIN" { + allErrs = append(allErrs, field.Invalid(fldPath, sc, "cannot set `allowPrivilegeEscalation` to false and `capabilities.Add` CAP_SYS_ADMIN")) + } + } + } + } + + return allErrs +} + +func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList { + allErrs := field.ErrorList{} + if opts.TailLines != nil && *opts.TailLines < 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) + } + if opts.LimitBytes != nil && *opts.LimitBytes < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("limitBytes"), *opts.LimitBytes, "must be greater than 0")) + } + switch { + case opts.SinceSeconds != nil && opts.SinceTime != nil: + allErrs = append(allErrs, field.Forbidden(field.NewPath(""), "at most one of `sinceTime` or `sinceSeconds` may be specified")) + case opts.SinceSeconds != nil: + if *opts.SinceSeconds < 1 { + allErrs = append(allErrs, field.Invalid(field.NewPath("sinceSeconds"), *opts.SinceSeconds, "must be greater than 0")) + } + } + return allErrs +} + +// ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus +func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + for i, ingress := range status.Ingress { + idxPath := fldPath.Child("ingress").Index(i) + if len(ingress.IP) > 0 { + if isIP := (net.ParseIP(ingress.IP) != nil); !isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("ip"), ingress.IP, "must be a valid IP address")) + } + } + if len(ingress.Hostname) > 0 { + for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg)) + } + if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP { + allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address")) + } + } + } + return allErrs +} + +// validateVolumeNodeAffinity tests that the PersistentVolume.NodeAffinity has valid data +// returns: +// - true if volumeNodeAffinity is set +// - errorList if there are validation errors +func validateVolumeNodeAffinity(nodeAffinity *core.VolumeNodeAffinity, fldPath *field.Path) (bool, field.ErrorList) { + allErrs := field.ErrorList{} + + if nodeAffinity == nil { + return false, allErrs + } + + if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + allErrs = append(allErrs, field.Forbidden(fldPath, "Volume node affinity is disabled by feature-gate")) + } + + if nodeAffinity.Required != nil { + allErrs = append(allErrs, ValidateNodeSelector(nodeAffinity.Required, fldPath.Child("required"))...) + } else { + allErrs = append(allErrs, field.Required(fldPath.Child("required"), "must specify required node constraints")) + } + + return true, allErrs +} + +// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR +func ValidateCIDR(cidr string) (*net.IPNet, error) { + _, net, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + return net, nil +} + +func IsDecremented(update, old *int32) bool { + if update == nil && old != nil { + return true + } + if update == nil || old == nil { + return false + } + return *update < *old +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go new file mode 100644 index 000000000..a4801c2e3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -0,0 +1,5415 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package core + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { + if in == nil { + return nil + } + out := new(AWSElasticBlockStoreVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. +func (in *AttachedVolume) DeepCopy() *AttachedVolume { + if in == nil { + return nil + } + out := new(AttachedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. +func (in *AvoidPods) DeepCopy() *AvoidPods { + if in == nil { + return nil + } + out := new(AvoidPods) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(AzureDataDiskKind) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. +func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { + if in == nil { + return nil + } + out := new(AzureDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { + *out = *in + if in.SecretNamespace != nil { + in, out := &in.SecretNamespace, &out.SecretNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. +func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { + if in == nil { + return nil + } + out := new(AzureFilePersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. +func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { + if in == nil { + return nil + } + out := new(AzureFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ControllerPublishSecretRef != nil { + in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodeStageSecretRef != nil { + in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. +func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CephFSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. +func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { + if in == nil { + return nil + } + out := new(CephFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. +func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CinderPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. +func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { + if in == nil { + return nil + } + out := new(CinderVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. +func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { + if in == nil { + return nil + } + out := new(ClientIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. +func (in *ComponentCondition) DeepCopy() *ComponentCondition { + if in == nil { + return nil + } + out := new(ComponentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. +func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { + if in == nil { + return nil + } + out := new(ComponentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.BinaryData != nil { + in, out := &in.BinaryData, &out.BinaryData + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. +func (in *ConfigMap) DeepCopy() *ConfigMap { + if in == nil { + return nil + } + out := new(ConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. +func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { + if in == nil { + return nil + } + out := new(ConfigMapKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. +func (in *ConfigMapList) DeepCopy() *ConfigMapList { + if in == nil { + return nil + } + out := new(ConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. +func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { + if in == nil { + return nil + } + out := new(ConfigMapNodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. +func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { + if in == nil { + return nil + } + out := new(ConfigMapProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. +func (in *ContainerImage) DeepCopy() *ContainerImage { + if in == nil { + return nil + } + out := new(ContainerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. +func (in *ContainerPort) DeepCopy() *ContainerPort { + if in == nil { + return nil + } + out := new(ContainerPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerState) DeepCopyInto(out *ContainerState) { + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. +func (in *ContainerState) DeepCopy() *ContainerState { + if in == nil { + return nil + } + out := new(ContainerState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. +func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { + if in == nil { + return nil + } + out := new(ContainerStateRunning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. +func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { + if in == nil { + return nil + } + out := new(ContainerStateTerminated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. +func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { + if in == nil { + return nil + } + out := new(ContainerStateWaiting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. +func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { + if in == nil { + return nil + } + out := new(DaemonEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. +func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { + if in == nil { + return nil + } + out := new(DownwardAPIProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. +func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { + *out = *in + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. +func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { + if in == nil { + return nil + } + out := new(EmptyDirVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. +func (in *EndpointSubset) DeepCopy() *EndpointSubset { + if in == nil { + return nil + } + out := new(EndpointSubset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. +func (in *EndpointsList) DeepCopy() *EndpointsList { + if in == nil { + return nil + } + out := new(EndpointsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. +func (in *EnvFromSource) DeepCopy() *EnvFromSource { + if in == nil { + return nil + } + out := new(EnvFromSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. +func (in *EnvVarSource) DeepCopy() *EnvVarSource { + if in == nil { + return nil + } + out := new(EnvVarSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InvolvedObject = in.InvolvedObject + out.Source = in.Source + in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) + in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + if in.Related != nil { + in, out := &in.Related, &out.Related + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSource) DeepCopyInto(out *EventSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. +func (in *EventSource) DeepCopy() *EventSource { + if in == nil { + return nil + } + out := new(EventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + if in.WWIDs != nil { + in, out := &in.WWIDs, &out.WWIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. +func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { + if in == nil { + return nil + } + out := new(FCVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. +func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { + if in == nil { + return nil + } + out := new(FlexVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. +func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { + if in == nil { + return nil + } + out := new(FlockerVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. +func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(GCEPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. +func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { + if in == nil { + return nil + } + out := new(GitRepoVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. +func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { + *out = *in + out.Port = in.Port + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. +func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { + if in == nil { + return nil + } + out := new(HTTPGetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(HostPathType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. +func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { + if in == nil { + return nil + } + out := new(HostPathVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. +func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. +func (in *KeyToPath) DeepCopy() *KeyToPath { + if in == nil { + return nil + } + out := new(KeyToPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRange) DeepCopyInto(out *LimitRange) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. +func (in *LimitRange) DeepCopy() *LimitRange { + if in == nil { + return nil + } + out := new(LimitRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRange) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. +func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { + if in == nil { + return nil + } + out := new(LimitRangeItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. +func (in *LimitRangeList) DeepCopy() *LimitRangeList { + if in == nil { + return nil + } + out := new(LimitRangeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRangeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. +func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { + if in == nil { + return nil + } + out := new(LimitRangeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. +func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { + if in == nil { + return nil + } + out := new(LoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { + *out = *in + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. +func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { + if in == nil { + return nil + } + out := new(LocalVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. +func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { + if in == nil { + return nil + } + out := new(NFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespace) DeepCopyInto(out *Namespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. +func (in *Namespace) DeepCopy() *Namespace { + if in == nil { + return nil + } + out := new(Namespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Namespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. +func (in *NamespaceList) DeepCopy() *NamespaceList { + if in == nil { + return nil + } + out := new(NamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. +func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { + if in == nil { + return nil + } + out := new(NamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. +func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { + if in == nil { + return nil + } + out := new(NamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. +func (in *NodeAddress) DeepCopy() *NodeAddress { + if in == nil { + return nil + } + out := new(NodeAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. +func (in *NodeCondition) DeepCopy() *NodeCondition { + if in == nil { + return nil + } + out := new(NodeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapNodeConfigSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. +func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { + if in == nil { + return nil + } + out := new(NodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { + *out = *in + if in.Assigned != nil { + in, out := &in.Assigned, &out.Assigned + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.LastKnownGood != nil { + in, out := &in.LastKnownGood, &out.LastKnownGood + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. +func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { + if in == nil { + return nil + } + out := new(NodeConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { + *out = *in + out.KubeletEndpoint = in.KubeletEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. +func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { + if in == nil { + return nil + } + out := new(NodeDaemonEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. +func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { + if in == nil { + return nil + } + out := new(NodeProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResources) DeepCopyInto(out *NodeResources) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. +func (in *NodeResources) DeepCopy() *NodeResources { + if in == nil { + return nil + } + out := new(NodeResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. +func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { + if in == nil { + return nil + } + out := new(NodeSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchFields != nil { + in, out := &in.MatchFields, &out.MatchFields + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + out.DaemonEndpoints = in.DaemonEndpoints + out.NodeInfo = in.NodeInfo + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(NodeConfigStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. +func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { + if in == nil { + return nil + } + out := new(NodeSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. +func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { + if in == nil { + return nil + } + out := new(ObjectFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. +func (in *PersistentVolume) DeepCopy() *PersistentVolume { + if in == nil { + return nil + } + out := new(PersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. +func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { + if in == nil { + return nil + } + out := new(PersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. +func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. +func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. +func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PersistentVolumeClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. +func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. +func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { + if in == nil { + return nil + } + out := new(PersistentVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. +func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(VolumeNodeAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. +func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. +func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. +func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(PhotonPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pod) DeepCopyInto(out *Pod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. +func (in *Pod) DeepCopy() *Pod { + if in == nil { + return nil + } + out := new(Pod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. +func (in *PodAffinity) DeepCopy() *PodAffinity { + if in == nil { + return nil + } + out := new(PodAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. +func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { + if in == nil { + return nil + } + out := new(PodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. +func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { + if in == nil { + return nil + } + out := new(PodAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. +func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { + if in == nil { + return nil + } + out := new(PodAttachOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodAttachOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCondition) DeepCopyInto(out *PodCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. +func (in *PodCondition) DeepCopy() *PodCondition { + if in == nil { + return nil + } + out := new(PodCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. +func (in *PodExecOptions) DeepCopy() *PodExecOptions { + if in == nil { + return nil + } + out := new(PodExecOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodExecOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodList) DeepCopyInto(out *PodList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. +func (in *PodList) DeepCopy() *PodList { + if in == nil { + return nil + } + out := new(PodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. +func (in *PodLogOptions) DeepCopy() *PodLogOptions { + if in == nil { + return nil + } + out := new(PodLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. +func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { + if in == nil { + return nil + } + out := new(PodPortForwardOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. +func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { + if in == nil { + return nil + } + out := new(PodProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. +func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { + if in == nil { + return nil + } + out := new(PodReadinessGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { + *out = *in + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + if in.Sysctls != nil { + in, out := &in.Sysctls, &out.Sysctls + *out = make([]Sysctl, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. +func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { + if in == nil { + return nil + } + out := new(PodSecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSignature) DeepCopyInto(out *PodSignature) { + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + *out = new(v1.OwnerReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. +func (in *PodSignature) DeepCopy() *PodSignature { + if in == nil { + return nil + } + out := new(PodSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]PodReadinessGate, len(*in)) + copy(*out, *in) + } + if in.RuntimeClassName != nil { + in, out := &in.RuntimeClassName, &out.RuntimeClassName + *out = new(string) + **out = **in + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatus) DeepCopyInto(out *PodStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. +func (in *PodStatus) DeepCopy() *PodStatus { + if in == nil { + return nil + } + out := new(PodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. +func (in *PodStatusResult) DeepCopy() *PodStatusResult { + if in == nil { + return nil + } + out := new(PodStatusResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodStatusResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. +func (in *PodTemplateList) DeepCopy() *PodTemplateList { + if in == nil { + return nil + } + out := new(PodTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. +func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { + if in == nil { + return nil + } + out := new(PortworxVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { + *out = *in + in.PodSignature.DeepCopyInto(&out.PodSignature) + in.EvictionTime.DeepCopyInto(&out.EvictionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. +func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { + if in == nil { + return nil + } + out := new(PreferAvoidPodsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { + *out = *in + in.Preference.DeepCopyInto(&out.Preference) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. +func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { + if in == nil { + return nil + } + out := new(PreferredSchedulingTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. +func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { + if in == nil { + return nil + } + out := new(ProjectedVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. +func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { + if in == nil { + return nil + } + out := new(QuobyteVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. +func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { + if in == nil { + return nil + } + out := new(RBDVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. +func (in *ReplicationController) DeepCopy() *ReplicationController { + if in == nil { + return nil + } + out := new(ReplicationController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. +func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { + if in == nil { + return nil + } + out := new(ReplicationControllerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. +func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { + if in == nil { + return nil + } + out := new(ReplicationControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. +func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { + if in == nil { + return nil + } + out := new(ReplicationControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. +func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { + if in == nil { + return nil + } + out := new(ReplicationControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. +func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { + if in == nil { + return nil + } + out := new(ResourceFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceList) DeepCopyInto(out *ResourceList) { + { + in := &in + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in ResourceList) DeepCopy() ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + if in.ScopeSelector != nil { + in, out := &in.ScopeSelector, &out.ScopeSelector + *out = new(ScopeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. +func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { + if in == nil { + return nil + } + out := new(SELinuxOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. +func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]ScopedResourceSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. +func (in *ScopeSelector) DeepCopy() *ScopeSelector { + if in == nil { + return nil + } + out := new(ScopeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. +func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { + if in == nil { + return nil + } + out := new(ScopedResourceSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. +func (in *SecretProjection) DeepCopy() *SecretProjection { + if in == nil { + return nil + } + out := new(SecretProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.ProcMount != nil { + in, out := &in.ProcMount, &out.ProcMount + *out = new(ProcMountType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. +func (in *SecurityContext) DeepCopy() *SecurityContext { + if in == nil { + return nil + } + out := new(SecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. +func (in *SerializedReference) DeepCopy() *SerializedReference { + if in == nil { + return nil + } + out := new(SerializedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. +func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { + if in == nil { + return nil + } + out := new(ServiceAccountTokenProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePort) DeepCopyInto(out *ServicePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. +func (in *ServicePort) DeepCopy() *ServicePort { + if in == nil { + return nil + } + out := new(ServicePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. +func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { + if in == nil { + return nil + } + out := new(ServiceProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SessionAffinityConfig != nil { + in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { + *out = *in + if in.ClientIP != nil { + in, out := &in.ClientIP, &out.ClientIP + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. +func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { + if in == nil { + return nil + } + out := new(SessionAffinityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. +func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. +func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sysctl) DeepCopyInto(out *Sysctl) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. +func (in *Sysctl) DeepCopy() *Sysctl { + if in == nil { + return nil + } + out := new(Sysctl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. +func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { + if in == nil { + return nil + } + out := new(TCPSocketAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. +func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { + if in == nil { + return nil + } + out := new(TopologySelectorLabelRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { + *out = *in + if in.MatchLabelExpressions != nil { + in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions + *out = make([]TopologySelectorLabelRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. +func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { + if in == nil { + return nil + } + out := new(TopologySelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. +func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { + if in == nil { + return nil + } + out := new(TypedLocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in + if in.MountPropagation != nil { + in, out := &in.MountPropagation, &out.MountPropagation + *out = new(MountPropagationMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. +func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { + if in == nil { + return nil + } + out := new(VolumeNodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(ServiceAccountTokenProjection) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. +func (in *VolumeProjection) DeepCopy() *VolumeProjection { + if in == nil { + return nil + } + out := new(VolumeProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. +func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { + if in == nil { + return nil + } + out := new(VsphereVirtualDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { + *out = *in + in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. +func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { + if in == nil { + return nil + } + out := new(WeightedPodAffinityTerm) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go new file mode 100644 index 000000000..bab0ae332 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=scheduling.k8s.io + +package scheduling // import "k8s.io/kubernetes/pkg/apis/scheduling" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/helpers.go new file mode 100644 index 000000000..58b379975 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/helpers.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduling + +import ( + "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SystemPriorityClasses define system priority classes that are auto-created at cluster bootstrapping. +// Our API validation logic ensures that any priority class that has a system prefix or its value +// is higher than HighestUserDefinablePriority is equal to one of these SystemPriorityClasses. +var systemPriorityClasses = []*PriorityClass{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: SystemNodeCritical, + }, + Value: SystemCriticalPriority + 1000, + Description: "Used for system critical pods that must not be moved from their current node.", + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: SystemClusterCritical, + }, + Value: SystemCriticalPriority, + Description: "Used for system critical pods that must run in the cluster, but can be moved to another node if necessary.", + }, +} + +// SystemPriorityClasses returns the list of system priority classes. +// NOTE: be careful not to modify any of elements of the returned array directly. +func SystemPriorityClasses() []*PriorityClass { + return systemPriorityClasses +} + +// IsKnownSystemPriorityClass checks that "pc" is equal to one of the system PriorityClasses. +// It ignores "description", labels, annotations, etc. of the PriorityClass. +func IsKnownSystemPriorityClass(pc *PriorityClass) (bool, error) { + for _, spc := range systemPriorityClasses { + if spc.Name == pc.Name { + if spc.Value != pc.Value { + return false, fmt.Errorf("value of %v PriorityClass must be %v", spc.Name, spc.Value) + } + if spc.GlobalDefault != pc.GlobalDefault { + return false, fmt.Errorf("globalDefault of %v PriorityClass must be %v", spc.Name, spc.GlobalDefault) + } + return true, nil + } + } + return false, fmt.Errorf("%v is not a known system priority class", pc.Name) +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/register.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/register.go new file mode 100644 index 000000000..c3a611049 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduling + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go new file mode 100644 index 000000000..29c950157 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/types.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduling + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + // DefaultPriorityWhenNoDefaultClassExists is used to set priority of pods + // that do not specify any priority class and there is no priority class + // marked as default. + DefaultPriorityWhenNoDefaultClassExists = 0 + // HighestUserDefinablePriority is the highest priority for user defined priority classes. Priority values larger than 1 billion are reserved for Kubernetes system use. + HighestUserDefinablePriority = int32(1000000000) + // SystemCriticalPriority is the beginning of the range of priority values for critical system components. + SystemCriticalPriority = 2 * HighestUserDefinablePriority + // SystemPriorityClassPrefix is the prefix reserved for system priority class names. Other priority + // classes are not allowed to start with this prefix. + SystemPriorityClassPrefix = "system-" + // NOTE: In order to avoid conflict of names with user-defined priority classes, all the names must + // start with SystemPriorityClassPrefix. + SystemClusterCritical = SystemPriorityClassPrefix + "cluster-critical" + SystemNodeCritical = SystemPriorityClassPrefix + "node-critical" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClass defines the mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool + + // Description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta + + // Items is the list of PriorityClasses. + Items []PriorityClass +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go new file mode 100644 index 000000000..8584caa58 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/scheduling/zz_generated.deepcopy.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package scheduling + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go new file mode 100644 index 000000000..0da7b9c8b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/capabilities.go @@ -0,0 +1,95 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capabilities + +import ( + "sync" +) + +// Capabilities defines the set of capabilities available within the system. +// For now these are global. Eventually they may be per-user +type Capabilities struct { + AllowPrivileged bool + + // Pod sources from which to allow privileged capabilities like host networking, sharing the host + // IPC namespace, and sharing the host PID namespace. + PrivilegedSources PrivilegedSources + + // PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach) + PerConnectionBandwidthLimitBytesPerSec int64 +} + +// PrivilegedSources defines the pod sources allowed to make privileged requests for certain types +// of capabilities like host networking, sharing the host IPC namespace, and sharing the host PID namespace. +type PrivilegedSources struct { + // List of pod sources for which using host network is allowed. + HostNetworkSources []string + + // List of pod sources for which using host pid namespace is allowed. + HostPIDSources []string + + // List of pod sources for which using host ipc is allowed. + HostIPCSources []string +} + +var capInstance struct { + once sync.Once + lock sync.Mutex + capabilities *Capabilities +} + +// Initialize the capability set. This can only be done once per binary, subsequent calls are ignored. +func Initialize(c Capabilities) { + // Only do this once + capInstance.once.Do(func() { + capInstance.capabilities = &c + }) +} + +// Setup the capability set. It wraps Initialize for improving usability. +func Setup(allowPrivileged bool, privilegedSources PrivilegedSources, perConnectionBytesPerSec int64) { + Initialize(Capabilities{ + AllowPrivileged: allowPrivileged, + PrivilegedSources: privilegedSources, + PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec, + }) +} + +// SetForTests sets capabilities for tests. Convenience method for testing. This should only be called from tests. +func SetForTests(c Capabilities) { + capInstance.lock.Lock() + defer capInstance.lock.Unlock() + capInstance.capabilities = &c +} + +// Returns a read-only copy of the system capabilities. +func Get() Capabilities { + capInstance.lock.Lock() + defer capInstance.lock.Unlock() + // This check prevents clobbering of capabilities that might've been set via SetForTests + if capInstance.capabilities == nil { + Initialize(Capabilities{ + AllowPrivileged: false, + PrivilegedSources: PrivilegedSources{ + HostNetworkSources: []string{}, + HostPIDSources: []string{}, + HostIPCSources: []string{}, + }, + }) + } + return *capInstance.capabilities +} diff --git a/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go new file mode 100644 index 000000000..bbdc89d50 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/capabilities/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package capabilities manages system level capabilities +package capabilities // import "k8s.io/kubernetes/pkg/capabilities" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go new file mode 100644 index 000000000..caac5649d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go @@ -0,0 +1,261 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "time" + + v1authenticationapi "k8s.io/api/authentication/v1" + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" + clientset "k8s.io/client-go/kubernetes" + v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/serviceaccount" + + "k8s.io/klog" +) + +// ControllerClientBuilder allows you to get clients and configs for controllers +// Please note a copy also exists in staging/src/k8s.io/cloud-provider/cloud.go +// TODO: Extract this into a separate controller utilities repo (issues/68947) +type ControllerClientBuilder interface { + Config(name string) (*restclient.Config, error) + ConfigOrDie(name string) *restclient.Config + Client(name string) (clientset.Interface, error) + ClientOrDie(name string) clientset.Interface +} + +// SimpleControllerClientBuilder returns a fixed client with different user agents +type SimpleControllerClientBuilder struct { + // ClientConfig is a skeleton config to clone and use as the basis for each controller client + ClientConfig *restclient.Config +} + +func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, error) { + clientConfig := *b.ClientConfig + return restclient.AddUserAgent(&clientConfig, name), nil +} + +func (b SimpleControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { + clientConfig, err := b.Config(name) + if err != nil { + klog.Fatal(err) + } + return clientConfig +} + +func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return clientset.NewForConfig(clientConfig) +} + +func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface { + client, err := b.Client(name) + if err != nil { + klog.Fatal(err) + } + return client +} + +// SAControllerClientBuilder is a ControllerClientBuilder that returns clients identifying as +// service accounts +type SAControllerClientBuilder struct { + // ClientConfig is a skeleton config to clone and use as the basis for each controller client + ClientConfig *restclient.Config + + // CoreClient is used to provision service accounts if needed and watch for their associated tokens + // to construct a controller client + CoreClient v1core.CoreV1Interface + + // AuthenticationClient is used to check API tokens to make sure they are valid before + // building a controller client from them + AuthenticationClient v1authentication.AuthenticationV1Interface + + // Namespace is the namespace used to host the service accounts that will back the + // controllers. It must be highly privileged namespace which normal users cannot inspect. + Namespace string +} + +// config returns a complete clientConfig for constructing clients. This is separate in anticipation of composition +// which means that not all clientsets are known here +func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, error) { + sa, err := b.getOrCreateServiceAccount(name) + if err != nil { + return nil, err + } + + var clientConfig *restclient.Config + + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String() + return b.CoreClient.Secrets(b.Namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String() + return b.CoreClient.Secrets(b.Namespace).Watch(options) + }, + } + _, err = watchtools.ListWatchUntil(30*time.Second, lw, + func(event watch.Event) (bool, error) { + switch event.Type { + case watch.Deleted: + return false, nil + case watch.Error: + return false, fmt.Errorf("error watching") + + case watch.Added, watch.Modified: + secret, ok := event.Object.(*v1.Secret) + if !ok { + return false, fmt.Errorf("unexpected object type: %T", event.Object) + } + if !serviceaccount.IsServiceAccountToken(secret, sa) { + return false, nil + } + if len(secret.Data[v1.ServiceAccountTokenKey]) == 0 { + return false, nil + } + validConfig, valid, err := b.getAuthenticatedConfig(sa, string(secret.Data[v1.ServiceAccountTokenKey])) + if err != nil { + klog.Warningf("error validating API token for %s/%s in secret %s: %v", sa.Name, sa.Namespace, secret.Name, err) + // continue watching for good tokens + return false, nil + } + if !valid { + klog.Warningf("secret %s contained an invalid API token for %s/%s", secret.Name, sa.Name, sa.Namespace) + // try to delete the secret containing the invalid token + if err := b.CoreClient.Secrets(secret.Namespace).Delete(secret.Name, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + klog.Warningf("error deleting secret %s containing invalid API token for %s/%s: %v", secret.Name, sa.Name, sa.Namespace, err) + } + // continue watching for good tokens + return false, nil + } + clientConfig = validConfig + return true, nil + + default: + return false, fmt.Errorf("unexpected event type: %v", event.Type) + } + }) + if err != nil { + return nil, fmt.Errorf("unable to get token for service account: %v", err) + } + + return clientConfig, nil +} + +func (b SAControllerClientBuilder) getOrCreateServiceAccount(name string) (*v1.ServiceAccount, error) { + sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name, metav1.GetOptions{}) + if err == nil { + return sa, nil + } + if !apierrors.IsNotFound(err) { + return nil, err + } + + // Create the namespace if we can't verify it exists. + // Tolerate errors, since we don't know whether this component has namespace creation permissions. + if _, err := b.CoreClient.Namespaces().Get(b.Namespace, metav1.GetOptions{}); err != nil { + b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: b.Namespace}}) + } + + // Create the service account + sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: b.Namespace, Name: name}}) + if apierrors.IsAlreadyExists(err) { + // If we're racing to init and someone else already created it, re-fetch + return b.CoreClient.ServiceAccounts(b.Namespace).Get(name, metav1.GetOptions{}) + } + return sa, err +} + +func (b SAControllerClientBuilder) getAuthenticatedConfig(sa *v1.ServiceAccount, token string) (*restclient.Config, bool, error) { + username := apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name) + + clientConfig := restclient.AnonymousClientConfig(b.ClientConfig) + clientConfig.BearerToken = token + restclient.AddUserAgent(clientConfig, username) + + // Try token review first + tokenReview := &v1authenticationapi.TokenReview{Spec: v1authenticationapi.TokenReviewSpec{Token: token}} + if tokenResult, err := b.AuthenticationClient.TokenReviews().Create(tokenReview); err == nil { + if !tokenResult.Status.Authenticated { + klog.Warningf("Token for %s/%s did not authenticate correctly", sa.Name, sa.Namespace) + return nil, false, nil + } + if tokenResult.Status.User.Username != username { + klog.Warningf("Token for %s/%s authenticated as unexpected username: %s", sa.Name, sa.Namespace, tokenResult.Status.User.Username) + return nil, false, nil + } + klog.V(4).Infof("Verified credential for %s/%s", sa.Name, sa.Namespace) + return clientConfig, true, nil + } + + // If we couldn't run the token review, the API might be disabled or we might not have permission. + // Try to make a request to /apis with the token. If we get a 401 we should consider the token invalid. + clientConfigCopy := *clientConfig + clientConfigCopy.NegotiatedSerializer = legacyscheme.Codecs + client, err := restclient.UnversionedRESTClientFor(&clientConfigCopy) + if err != nil { + return nil, false, err + } + err = client.Get().AbsPath("/apis").Do().Error() + if apierrors.IsUnauthorized(err) { + klog.Warningf("Token for %s/%s did not authenticate correctly: %v", sa.Name, sa.Namespace, err) + return nil, false, nil + } + + return clientConfig, true, nil +} + +func (b SAControllerClientBuilder) ConfigOrDie(name string) *restclient.Config { + clientConfig, err := b.Config(name) + if err != nil { + klog.Fatal(err) + } + return clientConfig +} + +func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) { + clientConfig, err := b.Config(name) + if err != nil { + return nil, err + } + return clientset.NewForConfig(clientConfig) +} + +func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface { + client, err := b.Client(name) + if err != nil { + klog.Fatal(err) + } + return client +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go new file mode 100644 index 000000000..f63afaca6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_ref_manager.go @@ -0,0 +1,501 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "fmt" + "sync" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog" +) + +type BaseControllerRefManager struct { + Controller metav1.Object + Selector labels.Selector + + canAdoptErr error + canAdoptOnce sync.Once + CanAdoptFunc func() error +} + +func (m *BaseControllerRefManager) CanAdopt() error { + m.canAdoptOnce.Do(func() { + if m.CanAdoptFunc != nil { + m.canAdoptErr = m.CanAdoptFunc() + } + }) + return m.canAdoptErr +} + +// ClaimObject tries to take ownership of an object for this controller. +// +// It will reconcile the following: +// * Adopt orphans if the match function returns true. +// * Release owned objects if the match function returns false. +// +// A non-nil error is returned if some form of reconciliation was attempted and +// failed. Usually, controllers should try again later in case reconciliation +// is still needed. +// +// If the error is nil, either the reconciliation succeeded, or no +// reconciliation was necessary. The returned boolean indicates whether you now +// own the object. +// +// No reconciliation will be attempted if the controller is being deleted. +func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) { + controllerRef := metav1.GetControllerOf(obj) + if controllerRef != nil { + if controllerRef.UID != m.Controller.GetUID() { + // Owned by someone else. Ignore. + return false, nil + } + if match(obj) { + // We already own it and the selector matches. + // Return true (successfully claimed) before checking deletion timestamp. + // We're still allowed to claim things we already own while being deleted + // because doing so requires taking no actions. + return true, nil + } + // Owned by us but selector doesn't match. + // Try to release, unless we're being deleted. + if m.Controller.GetDeletionTimestamp() != nil { + return false, nil + } + if err := release(obj); err != nil { + // If the pod no longer exists, ignore the error. + if errors.IsNotFound(err) { + return false, nil + } + // Either someone else released it, or there was a transient error. + // The controller should requeue and try again if it's still stale. + return false, err + } + // Successfully released. + return false, nil + } + + // It's an orphan. + if m.Controller.GetDeletionTimestamp() != nil || !match(obj) { + // Ignore if we're being deleted or selector doesn't match. + return false, nil + } + if obj.GetDeletionTimestamp() != nil { + // Ignore if the object is being deleted + return false, nil + } + // Selector matches. Try to adopt. + if err := adopt(obj); err != nil { + // If the pod no longer exists, ignore the error. + if errors.IsNotFound(err) { + return false, nil + } + // Either someone else claimed it first, or there was a transient error. + // The controller should requeue and try again if it's still orphaned. + return false, err + } + // Successfully adopted. + return true, nil +} + +type PodControllerRefManager struct { + BaseControllerRefManager + controllerKind schema.GroupVersionKind + podControl PodControlInterface +} + +// NewPodControllerRefManager returns a PodControllerRefManager that exposes +// methods to manage the controllerRef of pods. +// +// The CanAdopt() function can be used to perform a potentially expensive check +// (such as a live GET from the API server) prior to the first adoption. +// It will only be called (at most once) if an adoption is actually attempted. +// If CanAdopt() returns a non-nil error, all adoptions will fail. +// +// NOTE: Once CanAdopt() is called, it will not be called again by the same +// PodControllerRefManager instance. Create a new instance if it makes +// sense to check CanAdopt() again (e.g. in a different sync pass). +func NewPodControllerRefManager( + podControl PodControlInterface, + controller metav1.Object, + selector labels.Selector, + controllerKind schema.GroupVersionKind, + canAdopt func() error, +) *PodControllerRefManager { + return &PodControllerRefManager{ + BaseControllerRefManager: BaseControllerRefManager{ + Controller: controller, + Selector: selector, + CanAdoptFunc: canAdopt, + }, + controllerKind: controllerKind, + podControl: podControl, + } +} + +// ClaimPods tries to take ownership of a list of Pods. +// +// It will reconcile the following: +// * Adopt orphans if the selector matches. +// * Release owned objects if the selector no longer matches. +// +// Optional: If one or more filters are specified, a Pod will only be claimed if +// all filters return true. +// +// A non-nil error is returned if some form of reconciliation was attempted and +// failed. Usually, controllers should try again later in case reconciliation +// is still needed. +// +// If the error is nil, either the reconciliation succeeded, or no +// reconciliation was necessary. The list of Pods that you now own is returned. +func (m *PodControllerRefManager) ClaimPods(pods []*v1.Pod, filters ...func(*v1.Pod) bool) ([]*v1.Pod, error) { + var claimed []*v1.Pod + var errlist []error + + match := func(obj metav1.Object) bool { + pod := obj.(*v1.Pod) + // Check selector first so filters only run on potentially matching Pods. + if !m.Selector.Matches(labels.Set(pod.Labels)) { + return false + } + for _, filter := range filters { + if !filter(pod) { + return false + } + } + return true + } + adopt := func(obj metav1.Object) error { + return m.AdoptPod(obj.(*v1.Pod)) + } + release := func(obj metav1.Object) error { + return m.ReleasePod(obj.(*v1.Pod)) + } + + for _, pod := range pods { + ok, err := m.ClaimObject(pod, match, adopt, release) + if err != nil { + errlist = append(errlist, err) + continue + } + if ok { + claimed = append(claimed, pod) + } + } + return claimed, utilerrors.NewAggregate(errlist) +} + +// AdoptPod sends a patch to take control of the pod. It returns the error if +// the patching fails. +func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error { + if err := m.CanAdopt(); err != nil { + return fmt.Errorf("can't adopt Pod %v/%v (%v): %v", pod.Namespace, pod.Name, pod.UID, err) + } + // Note that ValidateOwnerReferences() will reject this patch if another + // OwnerReference exists with controller=true. + addControllerPatch := fmt.Sprintf( + `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, + m.controllerKind.GroupVersion(), m.controllerKind.Kind, + m.Controller.GetName(), m.Controller.GetUID(), pod.UID) + return m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(addControllerPatch)) +} + +// ReleasePod sends a patch to free the pod from the control of the controller. +// It returns the error if the patching fails. 404 and 422 errors are ignored. +func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error { + klog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", + pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) + deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pod.UID) + err := m.podControl.PatchPod(pod.Namespace, pod.Name, []byte(deleteOwnerRefPatch)) + if err != nil { + if errors.IsNotFound(err) { + // If the pod no longer exists, ignore it. + return nil + } + if errors.IsInvalid(err) { + // Invalid error will be returned in two cases: 1. the pod + // has no owner reference, 2. the uid of the pod doesn't + // match, which means the pod is deleted and then recreated. + // In both cases, the error can be ignored. + + // TODO: If the pod has owner references, but none of them + // has the owner.UID, server will silently ignore the patch. + // Investigate why. + return nil + } + } + return err +} + +// ReplicaSetControllerRefManager is used to manage controllerRef of ReplicaSets. +// Three methods are defined on this object 1: Classify 2: AdoptReplicaSet and +// 3: ReleaseReplicaSet which are used to classify the ReplicaSets into appropriate +// categories and accordingly adopt or release them. See comments on these functions +// for more details. +type ReplicaSetControllerRefManager struct { + BaseControllerRefManager + controllerKind schema.GroupVersionKind + rsControl RSControlInterface +} + +// NewReplicaSetControllerRefManager returns a ReplicaSetControllerRefManager that exposes +// methods to manage the controllerRef of ReplicaSets. +// +// The CanAdopt() function can be used to perform a potentially expensive check +// (such as a live GET from the API server) prior to the first adoption. +// It will only be called (at most once) if an adoption is actually attempted. +// If CanAdopt() returns a non-nil error, all adoptions will fail. +// +// NOTE: Once CanAdopt() is called, it will not be called again by the same +// ReplicaSetControllerRefManager instance. Create a new instance if it +// makes sense to check CanAdopt() again (e.g. in a different sync pass). +func NewReplicaSetControllerRefManager( + rsControl RSControlInterface, + controller metav1.Object, + selector labels.Selector, + controllerKind schema.GroupVersionKind, + canAdopt func() error, +) *ReplicaSetControllerRefManager { + return &ReplicaSetControllerRefManager{ + BaseControllerRefManager: BaseControllerRefManager{ + Controller: controller, + Selector: selector, + CanAdoptFunc: canAdopt, + }, + controllerKind: controllerKind, + rsControl: rsControl, + } +} + +// ClaimReplicaSets tries to take ownership of a list of ReplicaSets. +// +// It will reconcile the following: +// * Adopt orphans if the selector matches. +// * Release owned objects if the selector no longer matches. +// +// A non-nil error is returned if some form of reconciliation was attempted and +// failed. Usually, controllers should try again later in case reconciliation +// is still needed. +// +// If the error is nil, either the reconciliation succeeded, or no +// reconciliation was necessary. The list of ReplicaSets that you now own is +// returned. +func (m *ReplicaSetControllerRefManager) ClaimReplicaSets(sets []*apps.ReplicaSet) ([]*apps.ReplicaSet, error) { + var claimed []*apps.ReplicaSet + var errlist []error + + match := func(obj metav1.Object) bool { + return m.Selector.Matches(labels.Set(obj.GetLabels())) + } + adopt := func(obj metav1.Object) error { + return m.AdoptReplicaSet(obj.(*apps.ReplicaSet)) + } + release := func(obj metav1.Object) error { + return m.ReleaseReplicaSet(obj.(*apps.ReplicaSet)) + } + + for _, rs := range sets { + ok, err := m.ClaimObject(rs, match, adopt, release) + if err != nil { + errlist = append(errlist, err) + continue + } + if ok { + claimed = append(claimed, rs) + } + } + return claimed, utilerrors.NewAggregate(errlist) +} + +// AdoptReplicaSet sends a patch to take control of the ReplicaSet. It returns +// the error if the patching fails. +func (m *ReplicaSetControllerRefManager) AdoptReplicaSet(rs *apps.ReplicaSet) error { + if err := m.CanAdopt(); err != nil { + return fmt.Errorf("can't adopt ReplicaSet %v/%v (%v): %v", rs.Namespace, rs.Name, rs.UID, err) + } + // Note that ValidateOwnerReferences() will reject this patch if another + // OwnerReference exists with controller=true. + addControllerPatch := fmt.Sprintf( + `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, + m.controllerKind.GroupVersion(), m.controllerKind.Kind, + m.Controller.GetName(), m.Controller.GetUID(), rs.UID) + return m.rsControl.PatchReplicaSet(rs.Namespace, rs.Name, []byte(addControllerPatch)) +} + +// ReleaseReplicaSet sends a patch to free the ReplicaSet from the control of the Deployment controller. +// It returns the error if the patching fails. 404 and 422 errors are ignored. +func (m *ReplicaSetControllerRefManager) ReleaseReplicaSet(replicaSet *apps.ReplicaSet) error { + klog.V(2).Infof("patching ReplicaSet %s_%s to remove its controllerRef to %s/%s:%s", + replicaSet.Namespace, replicaSet.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) + deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), replicaSet.UID) + err := m.rsControl.PatchReplicaSet(replicaSet.Namespace, replicaSet.Name, []byte(deleteOwnerRefPatch)) + if err != nil { + if errors.IsNotFound(err) { + // If the ReplicaSet no longer exists, ignore it. + return nil + } + if errors.IsInvalid(err) { + // Invalid error will be returned in two cases: 1. the ReplicaSet + // has no owner reference, 2. the uid of the ReplicaSet doesn't + // match, which means the ReplicaSet is deleted and then recreated. + // In both cases, the error can be ignored. + return nil + } + } + return err +} + +// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion. +// +// The CanAdopt() function calls getObject() to fetch the latest value, +// and denies adoption attempts if that object has a non-nil DeletionTimestamp. +func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error { + return func() error { + obj, err := getObject() + if err != nil { + return fmt.Errorf("can't recheck DeletionTimestamp: %v", err) + } + if obj.GetDeletionTimestamp() != nil { + return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp()) + } + return nil + } +} + +// ControllerRevisionControllerRefManager is used to manage controllerRef of ControllerRevisions. +// Three methods are defined on this object 1: Classify 2: AdoptControllerRevision and +// 3: ReleaseControllerRevision which are used to classify the ControllerRevisions into appropriate +// categories and accordingly adopt or release them. See comments on these functions +// for more details. +type ControllerRevisionControllerRefManager struct { + BaseControllerRefManager + controllerKind schema.GroupVersionKind + crControl ControllerRevisionControlInterface +} + +// NewControllerRevisionControllerRefManager returns a ControllerRevisionControllerRefManager that exposes +// methods to manage the controllerRef of ControllerRevisions. +// +// The canAdopt() function can be used to perform a potentially expensive check +// (such as a live GET from the API server) prior to the first adoption. +// It will only be called (at most once) if an adoption is actually attempted. +// If canAdopt() returns a non-nil error, all adoptions will fail. +// +// NOTE: Once canAdopt() is called, it will not be called again by the same +// ControllerRevisionControllerRefManager instance. Create a new instance if it +// makes sense to check canAdopt() again (e.g. in a different sync pass). +func NewControllerRevisionControllerRefManager( + crControl ControllerRevisionControlInterface, + controller metav1.Object, + selector labels.Selector, + controllerKind schema.GroupVersionKind, + canAdopt func() error, +) *ControllerRevisionControllerRefManager { + return &ControllerRevisionControllerRefManager{ + BaseControllerRefManager: BaseControllerRefManager{ + Controller: controller, + Selector: selector, + CanAdoptFunc: canAdopt, + }, + controllerKind: controllerKind, + crControl: crControl, + } +} + +// ClaimControllerRevisions tries to take ownership of a list of ControllerRevisions. +// +// It will reconcile the following: +// * Adopt orphans if the selector matches. +// * Release owned objects if the selector no longer matches. +// +// A non-nil error is returned if some form of reconciliation was attempted and +// failed. Usually, controllers should try again later in case reconciliation +// is still needed. +// +// If the error is nil, either the reconciliation succeeded, or no +// reconciliation was necessary. The list of ControllerRevisions that you now own is +// returned. +func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) { + var claimed []*apps.ControllerRevision + var errlist []error + + match := func(obj metav1.Object) bool { + return m.Selector.Matches(labels.Set(obj.GetLabels())) + } + adopt := func(obj metav1.Object) error { + return m.AdoptControllerRevision(obj.(*apps.ControllerRevision)) + } + release := func(obj metav1.Object) error { + return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision)) + } + + for _, h := range histories { + ok, err := m.ClaimObject(h, match, adopt, release) + if err != nil { + errlist = append(errlist, err) + continue + } + if ok { + claimed = append(claimed, h) + } + } + return claimed, utilerrors.NewAggregate(errlist) +} + +// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if +// the patching fails. +func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error { + if err := m.CanAdopt(); err != nil { + return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err) + } + // Note that ValidateOwnerReferences() will reject this patch if another + // OwnerReference exists with controller=true. + addControllerPatch := fmt.Sprintf( + `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`, + m.controllerKind.GroupVersion(), m.controllerKind.Kind, + m.Controller.GetName(), m.Controller.GetUID(), history.UID) + return m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(addControllerPatch)) +} + +// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller. +// It returns the error if the patching fails. 404 and 422 errors are ignored. +func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error { + klog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s", + history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName()) + deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID) + err := m.crControl.PatchControllerRevision(history.Namespace, history.Name, []byte(deleteOwnerRefPatch)) + if err != nil { + if errors.IsNotFound(err) { + // If the ControllerRevision no longer exists, ignore it. + return nil + } + if errors.IsInvalid(err) { + // Invalid error will be returned in two cases: 1. the ControllerRevision + // has no owner reference, 2. the uid of the ControllerRevision doesn't + // match, which means the ControllerRevision is deleted and then recreated. + // In both cases, the error can be ignored. + return nil + } + } + return err +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go new file mode 100644 index 000000000..6ccc32aed --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -0,0 +1,1053 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "hash/fnv" + "sync" + "sync/atomic" + "time" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/rand" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/integer" + clientretry "k8s.io/client-go/util/retry" + podutil "k8s.io/kubernetes/pkg/api/v1/pod" + _ "k8s.io/kubernetes/pkg/apis/core/install" + "k8s.io/kubernetes/pkg/apis/core/validation" + schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" + hashutil "k8s.io/kubernetes/pkg/util/hash" + taintutils "k8s.io/kubernetes/pkg/util/taints" + + "k8s.io/klog" +) + +const ( + // If a watch drops a delete event for a pod, it'll take this long + // before a dormant controller waiting for those packets is woken up anyway. It is + // specifically targeted at the case where some problem prevents an update + // of expectations, without it the controller could stay asleep forever. This should + // be set based on the expected latency of watch events. + // + // Currently a controller can service (create *and* observe the watch events for said + // creation) about 10 pods a second, so it takes about 1 min to service + // 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s + // latency/pod at the scale of 3000 pods over 100 nodes. + ExpectationsTimeout = 5 * time.Minute + // When batching pod creates, SlowStartInitialBatchSize is the size of the + // initial batch. The size of each successive batch is twice the size of + // the previous batch. For example, for a value of 1, batch sizes would be + // 1, 2, 4, 8, ... and for a value of 10, batch sizes would be + // 10, 20, 40, 80, ... Setting the value higher means that quota denials + // will result in more doomed API calls and associated event spam. Setting + // the value lower will result in more API call round trip periods for + // large batches. + // + // Given a number of pods to start "N": + // The number of doomed calls per sync once quota is exceeded is given by: + // min(N,SlowStartInitialBatchSize) + // The number of batches is given by: + // 1+floor(log_2(ceil(N/SlowStartInitialBatchSize))) + SlowStartInitialBatchSize = 1 +) + +var UpdateTaintBackoff = wait.Backoff{ + Steps: 5, + Duration: 100 * time.Millisecond, + Jitter: 1.0, +} + +var ShutdownTaint = &v1.Taint{ + Key: schedulerapi.TaintNodeShutdown, + Effect: v1.TaintEffectNoSchedule, +} + +var ( + KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc +) + +type ResyncPeriodFunc func() time.Duration + +// Returns 0 for resyncPeriod in case resyncing is not needed. +func NoResyncPeriodFunc() time.Duration { + return 0 +} + +// StaticResyncPeriodFunc returns the resync period specified +func StaticResyncPeriodFunc(resyncPeriod time.Duration) ResyncPeriodFunc { + return func() time.Duration { + return resyncPeriod + } +} + +// Expectations are a way for controllers to tell the controller manager what they expect. eg: +// ControllerExpectations: { +// controller1: expects 2 adds in 2 minutes +// controller2: expects 2 dels in 2 minutes +// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met +// } +// +// Implementation: +// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion +// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller +// +// * Once set expectations can only be lowered +// * A controller isn't synced till its expectations are either fulfilled, or expire +// * Controllers that don't set expectations will get woken up for every matching controllee + +// ExpKeyFunc to parse out the key from a ControlleeExpectation +var ExpKeyFunc = func(obj interface{}) (string, error) { + if e, ok := obj.(*ControlleeExpectations); ok { + return e.key, nil + } + return "", fmt.Errorf("Could not find key for obj %#v", obj) +} + +// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations. +// Only abstracted out for testing. +// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different +// types of controllers, because the keys might conflict across types. +type ControllerExpectationsInterface interface { + GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) + SatisfiedExpectations(controllerKey string) bool + DeleteExpectations(controllerKey string) + SetExpectations(controllerKey string, add, del int) error + ExpectCreations(controllerKey string, adds int) error + ExpectDeletions(controllerKey string, dels int) error + CreationObserved(controllerKey string) + DeletionObserved(controllerKey string) + RaiseExpectations(controllerKey string, add, del int) + LowerExpectations(controllerKey string, add, del int) +} + +// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync. +type ControllerExpectations struct { + cache.Store +} + +// GetExpectations returns the ControlleeExpectations of the given controller. +func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) { + if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { + return exp.(*ControlleeExpectations), true, nil + } else { + return nil, false, err + } +} + +// DeleteExpectations deletes the expectations of the given controller from the TTLStore. +func (r *ControllerExpectations) DeleteExpectations(controllerKey string) { + if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists { + if err := r.Delete(exp); err != nil { + klog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err) + } + } +} + +// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed. +// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller +// manager. +func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool { + if exp, exists, err := r.GetExpectations(controllerKey); exists { + if exp.Fulfilled() { + klog.V(4).Infof("Controller expectations fulfilled %#v", exp) + return true + } else if exp.isExpired() { + klog.V(4).Infof("Controller expectations expired %#v", exp) + return true + } else { + klog.V(4).Infof("Controller still waiting on expectations %#v", exp) + return false + } + } else if err != nil { + klog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err) + } else { + // When a new controller is created, it doesn't have expectations. + // When it doesn't see expected watch events for > TTL, the expectations expire. + // - In this case it wakes up, creates/deletes controllees, and sets expectations again. + // When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire. + // - In this case it continues without setting expectations till it needs to create/delete controllees. + klog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey) + } + // Trigger a sync if we either encountered and error (which shouldn't happen since we're + // getting from local store) or this controller hasn't established expectations. + return true +} + +// TODO: Extend ExpirationCache to support explicit expiration. +// TODO: Make this possible to disable in tests. +// TODO: Support injection of clock. +func (exp *ControlleeExpectations) isExpired() bool { + return clock.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout +} + +// SetExpectations registers new expectations for the given controller. Forgets existing expectations. +func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error { + exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: clock.RealClock{}.Now()} + klog.V(4).Infof("Setting expectations %#v", exp) + return r.Add(exp) +} + +func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) error { + return r.SetExpectations(controllerKey, adds, 0) +} + +func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) error { + return r.SetExpectations(controllerKey, 0, dels) +} + +// Decrements the expectation counts of the given controller. +func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) { + if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { + exp.Add(int64(-add), int64(-del)) + // The expectations might've been modified since the update on the previous line. + klog.V(4).Infof("Lowered expectations %#v", exp) + } +} + +// Increments the expectation counts of the given controller. +func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) { + if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists { + exp.Add(int64(add), int64(del)) + // The expectations might've been modified since the update on the previous line. + klog.V(4).Infof("Raised expectations %#v", exp) + } +} + +// CreationObserved atomically decrements the `add` expectation count of the given controller. +func (r *ControllerExpectations) CreationObserved(controllerKey string) { + r.LowerExpectations(controllerKey, 1, 0) +} + +// DeletionObserved atomically decrements the `del` expectation count of the given controller. +func (r *ControllerExpectations) DeletionObserved(controllerKey string) { + r.LowerExpectations(controllerKey, 0, 1) +} + +// Expectations are either fulfilled, or expire naturally. +type Expectations interface { + Fulfilled() bool +} + +// ControlleeExpectations track controllee creates/deletes. +type ControlleeExpectations struct { + // Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms + // See: https://golang.org/pkg/sync/atomic/ for more information + add int64 + del int64 + key string + timestamp time.Time +} + +// Add increments the add and del counters. +func (e *ControlleeExpectations) Add(add, del int64) { + atomic.AddInt64(&e.add, add) + atomic.AddInt64(&e.del, del) +} + +// Fulfilled returns true if this expectation has been fulfilled. +func (e *ControlleeExpectations) Fulfilled() bool { + // TODO: think about why this line being atomic doesn't matter + return atomic.LoadInt64(&e.add) <= 0 && atomic.LoadInt64(&e.del) <= 0 +} + +// GetExpectations returns the add and del expectations of the controllee. +func (e *ControlleeExpectations) GetExpectations() (int64, int64) { + return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del) +} + +// NewControllerExpectations returns a store for ControllerExpectations. +func NewControllerExpectations() *ControllerExpectations { + return &ControllerExpectations{cache.NewStore(ExpKeyFunc)} +} + +// UIDSetKeyFunc to parse out the key from a UIDSet. +var UIDSetKeyFunc = func(obj interface{}) (string, error) { + if u, ok := obj.(*UIDSet); ok { + return u.key, nil + } + return "", fmt.Errorf("Could not find key for obj %#v", obj) +} + +// UIDSet holds a key and a set of UIDs. Used by the +// UIDTrackingControllerExpectations to remember which UID it has seen/still +// waiting for. +type UIDSet struct { + sets.String + key string +} + +// UIDTrackingControllerExpectations tracks the UID of the pods it deletes. +// This cache is needed over plain old expectations to safely handle graceful +// deletion. The desired behavior is to treat an update that sets the +// DeletionTimestamp on an object as a delete. To do so consistently, one needs +// to remember the expected deletes so they aren't double counted. +// TODO: Track creates as well (#22599) +type UIDTrackingControllerExpectations struct { + ControllerExpectationsInterface + // TODO: There is a much nicer way to do this that involves a single store, + // a lock per entry, and a ControlleeExpectationsInterface type. + uidStoreLock sync.Mutex + // Store used for the UIDs associated with any expectation tracked via the + // ControllerExpectationsInterface. + uidStore cache.Store +} + +// GetUIDs is a convenience method to avoid exposing the set of expected uids. +// The returned set is not thread safe, all modifications must be made holding +// the uidStoreLock. +func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String { + if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists { + return uid.(*UIDSet).String + } + return nil +} + +// ExpectDeletions records expectations for the given deleteKeys, against the given controller. +func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) error { + u.uidStoreLock.Lock() + defer u.uidStoreLock.Unlock() + + if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 { + klog.Errorf("Clobbering existing delete keys: %+v", existing) + } + expectedUIDs := sets.NewString() + for _, k := range deletedKeys { + expectedUIDs.Insert(k) + } + klog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys) + if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil { + return err + } + return u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len()) +} + +// DeletionObserved records the given deleteKey as a deletion, for the given rc. +func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) { + u.uidStoreLock.Lock() + defer u.uidStoreLock.Unlock() + + uids := u.GetUIDs(rcKey) + if uids != nil && uids.Has(deleteKey) { + klog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey) + u.ControllerExpectationsInterface.DeletionObserved(rcKey) + uids.Delete(deleteKey) + } +} + +// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the +// underlying ControllerExpectationsInterface. +func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) { + u.uidStoreLock.Lock() + defer u.uidStoreLock.Unlock() + + u.ControllerExpectationsInterface.DeleteExpectations(rcKey) + if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists { + if err := u.uidStore.Delete(uidExp); err != nil { + klog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err) + } + } +} + +// NewUIDTrackingControllerExpectations returns a wrapper around +// ControllerExpectations that is aware of deleteKeys. +func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations { + return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)} +} + +// Reasons for pod events +const ( + // FailedCreatePodReason is added in an event and in a replica set condition + // when a pod for a replica set is failed to be created. + FailedCreatePodReason = "FailedCreate" + // SuccessfulCreatePodReason is added in an event when a pod for a replica set + // is successfully created. + SuccessfulCreatePodReason = "SuccessfulCreate" + // FailedDeletePodReason is added in an event and in a replica set condition + // when a pod for a replica set is failed to be deleted. + FailedDeletePodReason = "FailedDelete" + // SuccessfulDeletePodReason is added in an event when a pod for a replica set + // is successfully deleted. + SuccessfulDeletePodReason = "SuccessfulDelete" +) + +// RSControlInterface is an interface that knows how to add or delete +// ReplicaSets, as well as increment or decrement them. It is used +// by the deployment controller to ease testing of actions that it takes. +type RSControlInterface interface { + PatchReplicaSet(namespace, name string, data []byte) error +} + +// RealRSControl is the default implementation of RSControllerInterface. +type RealRSControl struct { + KubeClient clientset.Interface + Recorder record.EventRecorder +} + +var _ RSControlInterface = &RealRSControl{} + +func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error { + _, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) + return err +} + +// TODO: merge the controller revision interface in controller_history.go with this one +// ControllerRevisionControlInterface is an interface that knows how to patch +// ControllerRevisions, as well as increment or decrement them. It is used +// by the daemonset controller to ease testing of actions that it takes. +type ControllerRevisionControlInterface interface { + PatchControllerRevision(namespace, name string, data []byte) error +} + +// RealControllerRevisionControl is the default implementation of ControllerRevisionControlInterface. +type RealControllerRevisionControl struct { + KubeClient clientset.Interface +} + +var _ ControllerRevisionControlInterface = &RealControllerRevisionControl{} + +func (r RealControllerRevisionControl) PatchControllerRevision(namespace, name string, data []byte) error { + _, err := r.KubeClient.AppsV1beta1().ControllerRevisions(namespace).Patch(name, types.StrategicMergePatchType, data) + return err +} + +// PodControlInterface is an interface that knows how to add or delete pods +// created as an interface to allow testing. +type PodControlInterface interface { + // CreatePods creates new pods according to the spec. + CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error + // CreatePodsOnNode creates a new pod according to the spec on the specified node, + // and sets the ControllerRef. + CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error + // CreatePodsWithControllerRef creates new pods according to the spec, and sets object as the pod's controller. + CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error + // DeletePod deletes the pod identified by podID. + DeletePod(namespace string, podID string, object runtime.Object) error + // PatchPod patches the pod. + PatchPod(namespace, name string, data []byte) error +} + +// RealPodControl is the default implementation of PodControlInterface. +type RealPodControl struct { + KubeClient clientset.Interface + Recorder record.EventRecorder +} + +var _ PodControlInterface = &RealPodControl{} + +func getPodsLabelSet(template *v1.PodTemplateSpec) labels.Set { + desiredLabels := make(labels.Set) + for k, v := range template.Labels { + desiredLabels[k] = v + } + return desiredLabels +} + +func getPodsFinalizers(template *v1.PodTemplateSpec) []string { + desiredFinalizers := make([]string, len(template.Finalizers)) + copy(desiredFinalizers, template.Finalizers) + return desiredFinalizers +} + +func getPodsAnnotationSet(template *v1.PodTemplateSpec) labels.Set { + desiredAnnotations := make(labels.Set) + for k, v := range template.Annotations { + desiredAnnotations[k] = v + } + return desiredAnnotations +} + +func getPodsPrefix(controllerName string) string { + // use the dash (if the name isn't too long) to make the pod name a bit prettier + prefix := fmt.Sprintf("%s-", controllerName) + if len(validation.ValidatePodName(prefix, true)) != 0 { + prefix = controllerName + } + return prefix +} + +func validateControllerRef(controllerRef *metav1.OwnerReference) error { + if controllerRef == nil { + return fmt.Errorf("controllerRef is nil") + } + if len(controllerRef.APIVersion) == 0 { + return fmt.Errorf("controllerRef has empty APIVersion") + } + if len(controllerRef.Kind) == 0 { + return fmt.Errorf("controllerRef has empty Kind") + } + if controllerRef.Controller == nil || *controllerRef.Controller != true { + return fmt.Errorf("controllerRef.Controller is not set to true") + } + if controllerRef.BlockOwnerDeletion == nil || *controllerRef.BlockOwnerDeletion != true { + return fmt.Errorf("controllerRef.BlockOwnerDeletion is not set") + } + return nil +} + +func (r RealPodControl) CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error { + return r.createPods("", namespace, template, object, nil) +} + +func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *metav1.OwnerReference) error { + if err := validateControllerRef(controllerRef); err != nil { + return err + } + return r.createPods("", namespace, template, controllerObject, controllerRef) +} + +func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error { + if err := validateControllerRef(controllerRef); err != nil { + return err + } + return r.createPods(nodeName, namespace, template, object, controllerRef) +} + +func (r RealPodControl) PatchPod(namespace, name string, data []byte) error { + _, err := r.KubeClient.CoreV1().Pods(namespace).Patch(name, types.StrategicMergePatchType, data) + return err +} + +func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *metav1.OwnerReference) (*v1.Pod, error) { + desiredLabels := getPodsLabelSet(template) + desiredFinalizers := getPodsFinalizers(template) + desiredAnnotations := getPodsAnnotationSet(template) + accessor, err := meta.Accessor(parentObject) + if err != nil { + return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err) + } + prefix := getPodsPrefix(accessor.GetName()) + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: desiredLabels, + Annotations: desiredAnnotations, + GenerateName: prefix, + Finalizers: desiredFinalizers, + }, + } + if controllerRef != nil { + pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef) + } + pod.Spec = *template.Spec.DeepCopy() + return pod, nil +} + +func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error { + pod, err := GetPodFromTemplate(template, object, controllerRef) + if err != nil { + return err + } + if len(nodeName) != 0 { + pod.Spec.NodeName = nodeName + } + if labels.Set(pod.Labels).AsSelectorPreValidated().Empty() { + return fmt.Errorf("unable to create pods, no labels") + } + if newPod, err := r.KubeClient.CoreV1().Pods(namespace).Create(pod); err != nil { + r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) + return err + } else { + accessor, err := meta.Accessor(object) + if err != nil { + klog.Errorf("parentObject does not have ObjectMeta, %v", err) + return nil + } + klog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) + r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name) + } + return nil +} + +func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error { + accessor, err := meta.Accessor(object) + if err != nil { + return fmt.Errorf("object does not have ObjectMeta, %v", err) + } + klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) + if err := r.KubeClient.CoreV1().Pods(namespace).Delete(podID, nil); err != nil && !apierrors.IsNotFound(err) { + r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) + return fmt.Errorf("unable to delete pods: %v", err) + } else { + r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID) + } + return nil +} + +type FakePodControl struct { + sync.Mutex + Templates []v1.PodTemplateSpec + ControllerRefs []metav1.OwnerReference + DeletePodName []string + Patches [][]byte + Err error + CreateLimit int + CreateCallCount int +} + +var _ PodControlInterface = &FakePodControl{} + +func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error { + f.Lock() + defer f.Unlock() + f.Patches = append(f.Patches, data) + if f.Err != nil { + return f.Err + } + return nil +} + +func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec, object runtime.Object) error { + f.Lock() + defer f.Unlock() + f.CreateCallCount++ + if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit { + return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount) + } + f.Templates = append(f.Templates, *spec) + if f.Err != nil { + return f.Err + } + return nil +} + +func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error { + f.Lock() + defer f.Unlock() + f.CreateCallCount++ + if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit { + return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount) + } + f.Templates = append(f.Templates, *spec) + f.ControllerRefs = append(f.ControllerRefs, *controllerRef) + if f.Err != nil { + return f.Err + } + return nil +} + +func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error { + f.Lock() + defer f.Unlock() + f.CreateCallCount++ + if f.CreateLimit != 0 && f.CreateCallCount > f.CreateLimit { + return fmt.Errorf("Not creating pod, limit %d already reached (create call %d)", f.CreateLimit, f.CreateCallCount) + } + f.Templates = append(f.Templates, *template) + f.ControllerRefs = append(f.ControllerRefs, *controllerRef) + if f.Err != nil { + return f.Err + } + return nil +} + +func (f *FakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error { + f.Lock() + defer f.Unlock() + f.DeletePodName = append(f.DeletePodName, podID) + if f.Err != nil { + return f.Err + } + return nil +} + +func (f *FakePodControl) Clear() { + f.Lock() + defer f.Unlock() + f.DeletePodName = []string{} + f.Templates = []v1.PodTemplateSpec{} + f.ControllerRefs = []metav1.OwnerReference{} + f.Patches = [][]byte{} + f.CreateLimit = 0 + f.CreateCallCount = 0 +} + +// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. +type ByLogging []*v1.Pod + +func (s ByLogging) Len() int { return len(s) } +func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ByLogging) Less(i, j int) bool { + // 1. assigned < unassigned + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) > 0 + } + // 2. PodRunning < PodUnknown < PodPending + m := map[v1.PodPhase]int{v1.PodRunning: 0, v1.PodUnknown: 1, v1.PodPending: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. ready < not ready + if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) { + return podutil.IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for more time < less time < empty time + if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. older pods < newer pods < empty timestamp pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[j].CreationTimestamp, &s[i].CreationTimestamp) + } + return false +} + +// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. +type ActivePods []*v1.Pod + +func (s ActivePods) Len() int { return len(s) } +func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s ActivePods) Less(i, j int) bool { + // 1. Unassigned < assigned + // If only one of the pods is unassigned, the unassigned one is smaller + if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) { + return len(s[i].Spec.NodeName) == 0 + } + // 2. PodPending < PodUnknown < PodRunning + m := map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // 3. Not ready < ready + // If only one of the pods is not ready, the not ready one is smaller + if podutil.IsPodReady(s[i]) != podutil.IsPodReady(s[j]) { + return !podutil.IsPodReady(s[i]) + } + // TODO: take availability into account when we push minReadySeconds information from deployment into pods, + // see https://github.com/kubernetes/kubernetes/issues/22065 + // 4. Been ready for empty time < less time < more time + // If both pods are ready, the latest ready one is smaller + if podutil.IsPodReady(s[i]) && podutil.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { + return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) + } + // 5. Pods with containers with higher restart counts < lower restart counts + if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) { + return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j]) + } + // 6. Empty creation time pods < newer pods < older pods + if !s[i].CreationTimestamp.Equal(&s[j].CreationTimestamp) { + return afterOrZero(&s[i].CreationTimestamp, &s[j].CreationTimestamp) + } + return false +} + +// afterOrZero checks if time t1 is after time t2; if one of them +// is zero, the zero time is seen as after non-zero time. +func afterOrZero(t1, t2 *metav1.Time) bool { + if t1.Time.IsZero() || t2.Time.IsZero() { + return t1.Time.IsZero() + } + return t1.After(t2.Time) +} + +func podReadyTime(pod *v1.Pod) *metav1.Time { + if podutil.IsPodReady(pod) { + for _, c := range pod.Status.Conditions { + // we only care about pod ready conditions + if c.Type == v1.PodReady && c.Status == v1.ConditionTrue { + return &c.LastTransitionTime + } + } + } + return &metav1.Time{} +} + +func maxContainerRestarts(pod *v1.Pod) int { + maxRestarts := 0 + for _, c := range pod.Status.ContainerStatuses { + maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) + } + return maxRestarts +} + +// FilterActivePods returns pods that have not terminated. +func FilterActivePods(pods []*v1.Pod) []*v1.Pod { + var result []*v1.Pod + for _, p := range pods { + if IsPodActive(p) { + result = append(result, p) + } else { + klog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v", + p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp) + } + } + return result +} + +func IsPodActive(p *v1.Pod) bool { + return v1.PodSucceeded != p.Status.Phase && + v1.PodFailed != p.Status.Phase && + p.DeletionTimestamp == nil +} + +// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods. +func FilterActiveReplicaSets(replicaSets []*apps.ReplicaSet) []*apps.ReplicaSet { + activeFilter := func(rs *apps.ReplicaSet) bool { + return rs != nil && *(rs.Spec.Replicas) > 0 + } + return FilterReplicaSets(replicaSets, activeFilter) +} + +type filterRS func(rs *apps.ReplicaSet) bool + +// FilterReplicaSets returns replica sets that are filtered by filterFn (all returned ones should match filterFn). +func FilterReplicaSets(RSes []*apps.ReplicaSet, filterFn filterRS) []*apps.ReplicaSet { + var filtered []*apps.ReplicaSet + for i := range RSes { + if filterFn(RSes[i]) { + filtered = append(filtered, RSes[i]) + } + } + return filtered +} + +// PodKey returns a key unique to the given pod within a cluster. +// It's used so we consistently use the same key scheme in this module. +// It does exactly what cache.MetaNamespaceKeyFunc would have done +// except there's not possibility for error since we know the exact type. +func PodKey(pod *v1.Pod) string { + return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name) +} + +// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker. +type ControllersByCreationTimestamp []*v1.ReplicationController + +func (o ControllersByCreationTimestamp) Len() int { return len(o) } +func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ControllersByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker. +type ReplicaSetsByCreationTimestamp []*apps.ReplicaSet + +func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) } +func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// ReplicaSetsBySizeOlder sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. +// By using the creation timestamp, this sorts from old to new replica sets. +type ReplicaSetsBySizeOlder []*apps.ReplicaSet + +func (o ReplicaSetsBySizeOlder) Len() int { return len(o) } +func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ReplicaSetsBySizeOlder) Less(i, j int) bool { + if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) { + return ReplicaSetsByCreationTimestamp(o).Less(i, j) + } + return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas) +} + +// ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. +// By using the creation timestamp, this sorts from new to old replica sets. +type ReplicaSetsBySizeNewer []*apps.ReplicaSet + +func (o ReplicaSetsBySizeNewer) Len() int { return len(o) } +func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o ReplicaSetsBySizeNewer) Less(i, j int) bool { + if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) { + return ReplicaSetsByCreationTimestamp(o).Less(j, i) + } + return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas) +} + +// AddOrUpdateTaintOnNode add taints to the node. If taint was added into node, it'll issue API calls +// to update nodes; otherwise, no API calls. Return error if any. +func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taints ...*v1.Taint) error { + if len(taints) == 0 { + return nil + } + firstTry := true + return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error { + var err error + var oldNode *v1.Node + // First we try getting node from the API server cache, as it's cheaper. If it fails + // we get it from etcd to be sure to have fresh data. + if firstTry { + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + firstTry = false + } else { + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + } + if err != nil { + return err + } + + var newNode *v1.Node + oldNodeCopy := oldNode + updated := false + for _, taint := range taints { + curNewNode, ok, err := taintutils.AddOrUpdateTaint(oldNodeCopy, taint) + if err != nil { + return fmt.Errorf("Failed to update taint of node!") + } + updated = updated || ok + newNode = curNewNode + oldNodeCopy = curNewNode + } + if !updated { + return nil + } + return PatchNodeTaints(c, nodeName, oldNode, newNode) + }) +} + +// RemoveTaintOffNode is for cleaning up taints temporarily added to node, +// won't fail if target taint doesn't exist or has been removed. +// If passed a node it'll check if there's anything to be done, if taint is not present it won't issue +// any API calls. +func RemoveTaintOffNode(c clientset.Interface, nodeName string, node *v1.Node, taints ...*v1.Taint) error { + if len(taints) == 0 { + return nil + } + // Short circuit for limiting amount of API calls. + if node != nil { + match := false + for _, taint := range taints { + if taintutils.TaintExists(node.Spec.Taints, taint) { + match = true + break + } + } + if !match { + return nil + } + } + + firstTry := true + return clientretry.RetryOnConflict(UpdateTaintBackoff, func() error { + var err error + var oldNode *v1.Node + // First we try getting node from the API server cache, as it's cheaper. If it fails + // we get it from etcd to be sure to have fresh data. + if firstTry { + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{ResourceVersion: "0"}) + firstTry = false + } else { + oldNode, err = c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + } + if err != nil { + return err + } + + var newNode *v1.Node + oldNodeCopy := oldNode + updated := false + for _, taint := range taints { + curNewNode, ok, err := taintutils.RemoveTaint(oldNodeCopy, taint) + if err != nil { + return fmt.Errorf("Failed to remove taint of node!") + } + updated = updated || ok + newNode = curNewNode + oldNodeCopy = curNewNode + } + if !updated { + return nil + } + return PatchNodeTaints(c, nodeName, oldNode, newNode) + }) +} + +// PatchNodeTaints patches node's taints. +func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { + oldData, err := json.Marshal(oldNode) + if err != nil { + return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) + } + + newTaints := newNode.Spec.Taints + newNodeClone := oldNode.DeepCopy() + newNodeClone.Spec.Taints = newTaints + newData, err := json.Marshal(newNodeClone) + if err != nil { + return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) + } + + patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) + if err != nil { + return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) + } + + _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) + return err +} + +// WaitForCacheSync is a wrapper around cache.WaitForCacheSync that generates log messages +// indicating that the controller identified by controllerName is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s controller", controllerName) + + if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("Unable to sync caches for %s controller", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s controller", controllerName) + return true +} + +// ComputeHash returns a hash value calculated from pod template and +// a collisionCount to avoid hash collision. The hash will be safe encoded to +// avoid bad words. +func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string { + podTemplateSpecHasher := fnv.New32a() + hashutil.DeepHashObject(podTemplateSpecHasher, *template) + + // Add collisionCount in the hash if it exists. + if collisionCount != nil { + collisionCountBytes := make([]byte, 8) + binary.LittleEndian.PutUint32(collisionCountBytes, uint32(*collisionCount)) + podTemplateSpecHasher.Write(collisionCountBytes) + } + + return rand.SafeEncodeString(fmt.Sprint(podTemplateSpecHasher.Sum32())) +} diff --git a/vendor/k8s.io/kubernetes/pkg/controller/doc.go b/vendor/k8s.io/kubernetes/pkg/controller/doc.go new file mode 100644 index 000000000..3c5c943da --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package controller contains code for controllers (like the replication +// controller). +package controller // import "k8s.io/kubernetes/pkg/controller" diff --git a/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go b/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go new file mode 100644 index 000000000..160aa6e08 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go @@ -0,0 +1,92 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "hash/fnv" + "sync" + + "github.com/golang/groupcache/lru" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + hashutil "k8s.io/kubernetes/pkg/util/hash" +) + +type objectWithMeta interface { + metav1.Object +} + +// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object. +// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels, +// they will have the same key. +func keyFunc(obj objectWithMeta) uint64 { + hash := fnv.New32a() + hashutil.DeepHashObject(hash, &equivalenceLabelObj{ + namespace: obj.GetNamespace(), + labels: obj.GetLabels(), + }) + return uint64(hash.Sum32()) +} + +type equivalenceLabelObj struct { + namespace string + labels map[string]string +} + +// MatchingCache save label and selector matching relationship +type MatchingCache struct { + mutex sync.RWMutex + cache *lru.Cache +} + +// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship. +func NewMatchingCache(maxCacheEntries int) *MatchingCache { + return &MatchingCache{ + cache: lru.New(maxCacheEntries), + } +} + +// Add will add matching information to the cache. +func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) { + key := keyFunc(labelObj) + c.mutex.Lock() + defer c.mutex.Unlock() + c.cache.Add(key, selectorObj) +} + +// GetMatchingObject lookup the matching object for a given object. +// Note: the cache information may be invalid since the controller may be deleted or updated, +// we need check in the external request to ensure the cache data is not dirty. +func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) { + key := keyFunc(labelObj) + // NOTE: we use Lock() instead of RLock() here because lru's Get() method also modifies state( + // it need update the least recently usage information). So we can not call it concurrently. + c.mutex.Lock() + defer c.mutex.Unlock() + return c.cache.Get(key) +} + +// Update update the cached matching information. +func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) { + c.Add(labelObj, selectorObj) +} + +// InvalidateAll invalidate the whole cache. +func (c *MatchingCache) InvalidateAll() { + c.mutex.Lock() + defer c.mutex.Unlock() + c.cache = lru.New(c.cache.MaxEntries) +} diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go new file mode 100644 index 000000000..cd35ecb70 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -0,0 +1,483 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + apiextensionsfeatures "k8s.io/apiextensions-apiserver/pkg/features" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.X + // MyFeature utilfeature.Feature = "MyFeature" + + // owner: @tallclair + // beta: v1.4 + AppArmor utilfeature.Feature = "AppArmor" + + // owner: @mtaufen + // alpha: v1.4 + // beta: v1.11 + DynamicKubeletConfig utilfeature.Feature = "DynamicKubeletConfig" + + // owner: @pweil- + // alpha: v1.5 + // + // Default userns=host for containers that are using other host namespaces, host mounts, the pod + // contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE, + // SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon. + ExperimentalHostUserNamespaceDefaultingGate utilfeature.Feature = "ExperimentalHostUserNamespaceDefaulting" + + // owner: @vishh + // alpha: v1.5 + // + // DEPRECATED - This feature is deprecated by Pod Priority and Preemption as of Kubernetes 1.13. + // Ensures guaranteed scheduling of pods marked with a special pod annotation `scheduler.alpha.kubernetes.io/critical-pod` + // and also prevents them from being evicted from a node. + // Note: This feature is not supported for `BestEffort` pods. + ExperimentalCriticalPodAnnotation utilfeature.Feature = "ExperimentalCriticalPodAnnotation" + + // owner: @jiayingz + // beta: v1.10 + // + // Enables support for Device Plugins + DevicePlugins utilfeature.Feature = "DevicePlugins" + + // owner: @Huang-Wei + // beta: v1.13 + // + // Changes the logic behind evicting Pods from not ready Nodes + // to take advantage of NoExecute Taints and Tolerations. + TaintBasedEvictions utilfeature.Feature = "TaintBasedEvictions" + + // owner: @mikedanese + // alpha: v1.7 + // beta: v1.12 + // + // Gets a server certificate for the kubelet from the Certificate Signing + // Request API instead of generating one self signed and auto rotates the + // certificate as expiration approaches. + RotateKubeletServerCertificate utilfeature.Feature = "RotateKubeletServerCertificate" + + // owner: @mikedanese + // beta: v1.8 + // + // Automatically renews the client certificate used for communicating with + // the API server as the certificate approaches expiration. + RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate" + + // owner: @msau42 + // alpha: v1.7 + // + // A new volume type that supports local disks on a node. + PersistentLocalVolumes utilfeature.Feature = "PersistentLocalVolumes" + + // owner: @jinxu + // beta: v1.10 + // + // New local storage types to support local storage capacity isolation + LocalStorageCapacityIsolation utilfeature.Feature = "LocalStorageCapacityIsolation" + + // owner: @gnufied + // beta: v1.11 + // Ability to Expand persistent volumes + ExpandPersistentVolumes utilfeature.Feature = "ExpandPersistentVolumes" + + // owner: @mlmhl + // alpha: v1.11 + // Ability to expand persistent volumes' file system without unmounting volumes. + ExpandInUsePersistentVolumes utilfeature.Feature = "ExpandInUsePersistentVolumes" + + // owner: @verb + // alpha: v1.10 + // + // Allows running a "debug container" in a pod namespaces to troubleshoot a running pod. + DebugContainers utilfeature.Feature = "DebugContainers" + + // owner: @verb + // beta: v1.12 + // + // Allows all containers in a pod to share a process namespace. + PodShareProcessNamespace utilfeature.Feature = "PodShareProcessNamespace" + + // owner: @bsalamat + // alpha: v1.8 + // + // Add priority to pods. Priority affects scheduling and preemption of pods. + PodPriority utilfeature.Feature = "PodPriority" + + // owner: @resouer + // alpha: v1.8 + // + // Enable equivalence class cache for scheduler. + EnableEquivalenceClassCache utilfeature.Feature = "EnableEquivalenceClassCache" + + // owner: @k82cn + // beta: v1.12 + // + // Taint nodes based on their condition status for 'NetworkUnavailable', + // 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'. + TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition" + + // owner: @jsafrane + // GA: v1.12 + // + // Note: This feature gate is unconditionally enabled in v1.13 and will be removed in v1.14. + // Enable mount propagation of volumes. + MountPropagation utilfeature.Feature = "MountPropagation" + + // owner: @sjenning + // alpha: v1.11 + // + // Allows resource reservations at the QoS level preventing pods at lower QoS levels from + // bursting into resources requested at higher QoS levels (memory only for now) + QOSReserved utilfeature.Feature = "QOSReserved" + + // owner: @ConnorDoyle + // alpha: v1.8 + // + // Alternative container-level CPU affinity policies. + CPUManager utilfeature.Feature = "CPUManager" + + // owner: @szuecs + // alpha: v1.12 + // + // Enable nodes to change CPUCFSQuotaPeriod + CPUCFSQuotaPeriod utilfeature.Feature = "CustomCPUCFSQuotaPeriod" + + // owner: @derekwaynecarr + // beta: v1.10 + // + // Enable pods to consume pre-allocated huge pages of varying page sizes + HugePages utilfeature.Feature = "HugePages" + + // owner: @sjenning + // beta: v1.11 + // + // Enable pods to set sysctls on a pod + Sysctls utilfeature.Feature = "Sysctls" + + // owner @brendandburns + // alpha: v1.9 + // + // Enable nodes to exclude themselves from service load balancers + ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" + + // owner: @jsafrane + // alpha: v1.9 + // + // Enable running mount utilities in containers. + MountContainers utilfeature.Feature = "MountContainers" + + // owner: @msau42 + // GA: v1.13 + // + // Extend the default scheduler to be aware of PV topology and handle PV binding + VolumeScheduling utilfeature.Feature = "VolumeScheduling" + + // owner: @vladimirvivien + // GA: v1.13 + // + // Enable mount/attachment of Container Storage Interface (CSI) backed PVs + CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume" + + // owner: @saad-ali + // alpha: v1.12 + // Enable all logic related to the CSIDriver API object in csi.storage.k8s.io + CSIDriverRegistry utilfeature.Feature = "CSIDriverRegistry" + + // owner: @verult + // alpha: v1.12 + // Enable all logic related to the CSINodeInfo API object in csi.storage.k8s.io + CSINodeInfo utilfeature.Feature = "CSINodeInfo" + + // owner @MrHohn + // beta: v1.10 + // + // Support configurable pod DNS parameters. + CustomPodDNS utilfeature.Feature = "CustomPodDNS" + + // owner: @screeley44 + // alpha: v1.9 + // beta: v1.13 + // + // Enable Block volume support in containers. + BlockVolume utilfeature.Feature = "BlockVolume" + + // owner: @pospispa + // GA: v1.11 + // + // Postpone deletion of a PV or a PVC when they are being used + StorageObjectInUseProtection utilfeature.Feature = "StorageObjectInUseProtection" + + // owner: @aveshagarwal + // alpha: v1.9 + // + // Enable resource limits priority function + ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction" + + // owner: @m1093782566 + // GA: v1.11 + // + // Implement IPVS-based in-cluster service load balancing + SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" + + // owner: @dims + // alpha: v1.10 + // + // Implement support for limiting pids in pods + SupportPodPidsLimit utilfeature.Feature = "SupportPodPidsLimit" + + // owner: @feiskyer + // alpha: v1.10 + // + // Enable Hyper-V containers on Windows + HyperVContainer utilfeature.Feature = "HyperVContainer" + + // owner: @k82cn + // beta: v1.12 + // + // Schedule DaemonSet Pods by default scheduler instead of DaemonSet controller + ScheduleDaemonSetPods utilfeature.Feature = "ScheduleDaemonSetPods" + + // owner: @mikedanese + // beta: v1.12 + // + // Implement TokenRequest endpoint on service account resources. + TokenRequest utilfeature.Feature = "TokenRequest" + + // owner: @mikedanese + // beta: v1.12 + // + // Enable ServiceAccountTokenVolumeProjection support in ProjectedVolumes. + TokenRequestProjection utilfeature.Feature = "TokenRequestProjection" + + // owner: @mikedanese + // alpha: v1.13 + // + // Migrate ServiceAccount volumes to use a projected volume consisting of a + // ServiceAccountTokenVolumeProjection. This feature adds new required flags + // to the API server. + BoundServiceAccountTokenVolume utilfeature.Feature = "BoundServiceAccountTokenVolume" + + // owner: @Random-Liu + // beta: v1.11 + // + // Enable container log rotation for cri container runtime + CRIContainerLogRotation utilfeature.Feature = "CRIContainerLogRotation" + + // owner: @verult + // GA: v1.13 + // + // Enables the regional PD feature on GCE. + GCERegionalPersistentDisk utilfeature.Feature = "GCERegionalPersistentDisk" + + // owner: @krmayankk + // alpha: v1.10 + // + // Enables control over the primary group ID of containers' init processes. + RunAsGroup utilfeature.Feature = "RunAsGroup" + + // owner: @saad-ali + // ga + // + // Allow mounting a subpath of a volume in a container + // Do not remove this feature gate even though it's GA + VolumeSubpath utilfeature.Feature = "VolumeSubpath" + + // owner: @gnufied + // beta : v1.12 + // + // Add support for volume plugins to report node specific + // volume limits + AttachVolumeLimit utilfeature.Feature = "AttachVolumeLimit" + + // owner: @ravig + // alpha: v1.11 + // + // Include volume count on node to be considered for balanced resource allocation while scheduling. + // A node which has closer cpu,memory utilization and volume count is favoured by scheduler + // while making decisions. + BalanceAttachedNodeVolumes utilfeature.Feature = "BalanceAttachedNodeVolumes" + + // owner @freehan + // beta: v1.11 + // + // Support Pod Ready++ + PodReadinessGates utilfeature.Feature = "PodReadinessGates" + + // owner: @kevtaylor + // alpha: v1.11 + // + // Allow subpath environment variable substitution + // Only applicable if the VolumeSubpath feature is also enabled + VolumeSubpathEnvExpansion utilfeature.Feature = "VolumeSubpathEnvExpansion" + + // owner: @vikaschoudhary16 + // GA: v1.13 + // + // + // Enable probe based plugin watcher utility for discovering Kubelet plugins + KubeletPluginsWatcher utilfeature.Feature = "KubeletPluginsWatcher" + + // owner: @vikaschoudhary16 + // beta: v1.12 + // + // + // Enable resource quota scope selectors + ResourceQuotaScopeSelectors utilfeature.Feature = "ResourceQuotaScopeSelectors" + + // owner: @vladimirvivien + // alpha: v1.11 + // + // Enables CSI to use raw block storage volumes + CSIBlockVolume utilfeature.Feature = "CSIBlockVolume" + + // owner: @tallclair + // alpha: v1.12 + // + // Enables RuntimeClass, for selecting between multiple runtimes to run a pod. + RuntimeClass utilfeature.Feature = "RuntimeClass" + + // owner: @mtaufen + // alpha: v1.12 + // + // Kubelet uses the new Lease API to report node heartbeats, + // (Kube) Node Lifecycle Controller uses these heartbeats as a node health signal. + NodeLease utilfeature.Feature = "NodeLease" + + // owner: @janosi + // alpha: v1.12 + // + // Enables SCTP as new protocol for Service ports, NetworkPolicy, and ContainerPort in Pod/Containers definition + SCTPSupport utilfeature.Feature = "SCTPSupport" + + // owner: @xing-yang + // alpha: v1.12 + // + // Enable volume snapshot data source support. + VolumeSnapshotDataSource utilfeature.Feature = "VolumeSnapshotDataSource" + + // owner: @jessfraz + // alpha: v1.12 + // + // Enables control over ProcMountType for containers. + ProcMountType utilfeature.Feature = "ProcMountType" + + // owner: @janetkuo + // alpha: v1.12 + // + // Allow TTL controller to clean up Pods and Jobs after they finish. + TTLAfterFinished utilfeature.Feature = "TTLAfterFinished" + + // owner: @dashpole + // alpha: v1.13 + // + // Enables the kubelet's pod resources grpc endpoint + KubeletPodResources utilfeature.Feature = "KubeletPodResources" +) + +func init() { + utilfeature.DefaultFeatureGate.Add(defaultKubernetesFeatureGates) +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ + AppArmor: {Default: true, PreRelease: utilfeature.Beta}, + DynamicKubeletConfig: {Default: true, PreRelease: utilfeature.Beta}, + ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta}, + ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha}, + DevicePlugins: {Default: true, PreRelease: utilfeature.Beta}, + TaintBasedEvictions: {Default: true, PreRelease: utilfeature.Beta}, + RotateKubeletServerCertificate: {Default: true, PreRelease: utilfeature.Beta}, + RotateKubeletClientCertificate: {Default: true, PreRelease: utilfeature.Beta}, + PersistentLocalVolumes: {Default: true, PreRelease: utilfeature.Beta}, + LocalStorageCapacityIsolation: {Default: true, PreRelease: utilfeature.Beta}, + HugePages: {Default: true, PreRelease: utilfeature.Beta}, + Sysctls: {Default: true, PreRelease: utilfeature.Beta}, + DebugContainers: {Default: false, PreRelease: utilfeature.Alpha}, + PodShareProcessNamespace: {Default: true, PreRelease: utilfeature.Beta}, + PodPriority: {Default: true, PreRelease: utilfeature.Beta}, + EnableEquivalenceClassCache: {Default: false, PreRelease: utilfeature.Alpha}, + TaintNodesByCondition: {Default: true, PreRelease: utilfeature.Beta}, + MountPropagation: {Default: true, PreRelease: utilfeature.GA}, + QOSReserved: {Default: false, PreRelease: utilfeature.Alpha}, + ExpandPersistentVolumes: {Default: true, PreRelease: utilfeature.Beta}, + ExpandInUsePersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + AttachVolumeLimit: {Default: true, PreRelease: utilfeature.Beta}, + CPUManager: {Default: true, PreRelease: utilfeature.Beta}, + CPUCFSQuotaPeriod: {Default: false, PreRelease: utilfeature.Alpha}, + ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, + MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: true, PreRelease: utilfeature.GA}, + CSIPersistentVolume: {Default: true, PreRelease: utilfeature.GA}, + CSIDriverRegistry: {Default: false, PreRelease: utilfeature.Alpha}, + CSINodeInfo: {Default: false, PreRelease: utilfeature.Alpha}, + CustomPodDNS: {Default: true, PreRelease: utilfeature.Beta}, + BlockVolume: {Default: true, PreRelease: utilfeature.Beta}, + StorageObjectInUseProtection: {Default: true, PreRelease: utilfeature.GA}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, + SupportIPVSProxyMode: {Default: true, PreRelease: utilfeature.GA}, + SupportPodPidsLimit: {Default: false, PreRelease: utilfeature.Alpha}, + HyperVContainer: {Default: false, PreRelease: utilfeature.Alpha}, + ScheduleDaemonSetPods: {Default: true, PreRelease: utilfeature.Beta}, + TokenRequest: {Default: true, PreRelease: utilfeature.Beta}, + TokenRequestProjection: {Default: true, PreRelease: utilfeature.Beta}, + BoundServiceAccountTokenVolume: {Default: false, PreRelease: utilfeature.Alpha}, + CRIContainerLogRotation: {Default: true, PreRelease: utilfeature.Beta}, + GCERegionalPersistentDisk: {Default: true, PreRelease: utilfeature.GA}, + RunAsGroup: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSubpath: {Default: true, PreRelease: utilfeature.GA}, + BalanceAttachedNodeVolumes: {Default: false, PreRelease: utilfeature.Alpha}, + PodReadinessGates: {Default: true, PreRelease: utilfeature.Beta}, + VolumeSubpathEnvExpansion: {Default: false, PreRelease: utilfeature.Alpha}, + KubeletPluginsWatcher: {Default: true, PreRelease: utilfeature.GA}, + ResourceQuotaScopeSelectors: {Default: true, PreRelease: utilfeature.Beta}, + CSIBlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + RuntimeClass: {Default: false, PreRelease: utilfeature.Alpha}, + NodeLease: {Default: false, PreRelease: utilfeature.Alpha}, + SCTPSupport: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeSnapshotDataSource: {Default: false, PreRelease: utilfeature.Alpha}, + ProcMountType: {Default: false, PreRelease: utilfeature.Alpha}, + TTLAfterFinished: {Default: false, PreRelease: utilfeature.Alpha}, + KubeletPodResources: {Default: false, PreRelease: utilfeature.Alpha}, + + // inherited features from generic apiserver, relisted here to get a conflict if it is changed + // unintentionally on either side: + genericfeatures.StreamingProxyRedirects: {Default: true, PreRelease: utilfeature.Beta}, + genericfeatures.AdvancedAuditing: {Default: true, PreRelease: utilfeature.GA}, + genericfeatures.DynamicAuditing: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.APIResponseCompression: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.Initializers: {Default: false, PreRelease: utilfeature.Alpha}, + genericfeatures.APIListChunking: {Default: true, PreRelease: utilfeature.Beta}, + genericfeatures.DryRun: {Default: true, PreRelease: utilfeature.Beta}, + + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed + // unintentionally on either side: + apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceSubresources: {Default: true, PreRelease: utilfeature.Beta}, + apiextensionsfeatures.CustomResourceWebhookConversion: {Default: false, PreRelease: utilfeature.Alpha}, + + // features that enable backwards compatibility but are scheduled to be removed + // ... +} diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go new file mode 100644 index 000000000..400d001e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fieldpath supplies methods for extracting fields from objects +// given a path to a field. +package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath" diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go new file mode 100644 index 000000000..b997751ec --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go @@ -0,0 +1,109 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := sets.NewString() + for key := range m { + keys.Insert(key) + } + for _, key := range keys.List() { + fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go new file mode 100644 index 000000000..f2f703a8d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go @@ -0,0 +1,32 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +const ( + // system default DNS resolver configuration + ResolvConfDefault = "/etc/resolv.conf" + + // different container runtimes + DockerContainerRuntime = "docker" + RemoteContainerRuntime = "remote" + + // User visible keys for managing node allocatable enforcement on the node. + NodeAllocatableEnforcementKey = "pods" + SystemReservedEnforcementKey = "system-reserved" + KubeReservedEnforcementKey = "kube-reserved" + NodeAllocatableNoneKey = "none" +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go new file mode 100644 index 000000000..88e345636 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Common types in the Kubelet. +package types // import "k8s.io/kubernetes/pkg/kubelet/types" diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go new file mode 100644 index 000000000..67c84f6d6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +const ( + KubernetesPodNameLabel = "io.kubernetes.pod.name" + KubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace" + KubernetesPodUIDLabel = "io.kubernetes.pod.uid" + KubernetesContainerNameLabel = "io.kubernetes.container.name" + KubernetesContainerTypeLabel = "io.kubernetes.container.type" +) + +func GetContainerName(labels map[string]string) string { + return labels[KubernetesContainerNameLabel] +} + +func GetPodName(labels map[string]string) string { + return labels[KubernetesPodNameLabel] +} + +func GetPodUID(labels map[string]string) string { + return labels[KubernetesPodUIDLabel] +} + +func GetPodNamespace(labels map[string]string) string { + return labels[KubernetesPodNamespaceLabel] +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go new file mode 100644 index 000000000..8c46ba39e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "k8s.io/api/core/v1" +) + +// PodConditionsByKubelet is the list of pod conditions owned by kubelet +var PodConditionsByKubelet = []v1.PodConditionType{ + v1.PodScheduled, + v1.PodReady, + v1.PodInitialized, + v1.PodReasonUnschedulable, + v1.ContainersReady, +} + +// PodConditionByKubelet returns if the pod condition type is owned by kubelet +func PodConditionByKubelet(conditionType v1.PodConditionType) bool { + for _, c := range PodConditionsByKubelet { + if c == conditionType { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go new file mode 100644 index 000000000..bdc5ee794 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -0,0 +1,199 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + kubeapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/scheduling" + "k8s.io/kubernetes/pkg/features" +) + +const ( + ConfigSourceAnnotationKey = "kubernetes.io/config.source" + ConfigMirrorAnnotationKey = v1.MirrorPodAnnotationKey + ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen" + ConfigHashAnnotationKey = "kubernetes.io/config.hash" + CriticalPodAnnotationKey = "scheduler.alpha.kubernetes.io/critical-pod" +) + +// PodOperation defines what changes will be made on a pod configuration. +type PodOperation int + +const ( + // This is the current pod configuration + SET PodOperation = iota + // Pods with the given ids are new to this source + ADD + // Pods with the given ids are gracefully deleted from this source + DELETE + // Pods with the given ids have been removed from this source + REMOVE + // Pods with the given ids have been updated in this source + UPDATE + // Pods with the given ids have unexpected status in this source, + // kubelet should reconcile status with this source + RECONCILE + // Pods with the given ids have been restored from a checkpoint. + RESTORE + + // These constants identify the sources of pods + // Updates from a file + FileSource = "file" + // Updates from querying a web page + HTTPSource = "http" + // Updates from Kubernetes API Server + ApiserverSource = "api" + // Updates from all sources + AllSource = "*" + + NamespaceDefault = metav1.NamespaceDefault +) + +// PodUpdate defines an operation sent on the channel. You can add or remove single services by +// sending an array of size one and Op == ADD|REMOVE (with REMOVE, only the ID is required). +// For setting the state of the system to a given state for this source configuration, set +// Pods as desired and Op to SET, which will reset the system state to that specified in this +// operation for this source channel. To remove all pods, set Pods to empty object and Op to SET. +// +// Additionally, Pods should never be nil - it should always point to an empty slice. While +// functionally similar, this helps our unit tests properly check that the correct PodUpdates +// are generated. +type PodUpdate struct { + Pods []*v1.Pod + Op PodOperation + Source string +} + +// Gets all validated sources from the specified sources. +func GetValidatedSources(sources []string) ([]string, error) { + validated := make([]string, 0, len(sources)) + for _, source := range sources { + switch source { + case AllSource: + return []string{FileSource, HTTPSource, ApiserverSource}, nil + case FileSource, HTTPSource, ApiserverSource: + validated = append(validated, source) + break + case "": + break + default: + return []string{}, fmt.Errorf("unknown pod source %q", source) + } + } + return validated, nil +} + +// GetPodSource returns the source of the pod based on the annotation. +func GetPodSource(pod *v1.Pod) (string, error) { + if pod.Annotations != nil { + if source, ok := pod.Annotations[ConfigSourceAnnotationKey]; ok { + return source, nil + } + } + return "", fmt.Errorf("cannot get source of pod %q", pod.UID) +} + +// SyncPodType classifies pod updates, eg: create, update. +type SyncPodType int + +const ( + // SyncPodSync is when the pod is synced to ensure desired state + SyncPodSync SyncPodType = iota + // SyncPodUpdate is when the pod is updated from source + SyncPodUpdate + // SyncPodCreate is when the pod is created from source + SyncPodCreate + // SyncPodKill is when the pod is killed based on a trigger internal to the kubelet for eviction. + // If a SyncPodKill request is made to pod workers, the request is never dropped, and will always be processed. + SyncPodKill +) + +func (sp SyncPodType) String() string { + switch sp { + case SyncPodCreate: + return "create" + case SyncPodUpdate: + return "update" + case SyncPodSync: + return "sync" + case SyncPodKill: + return "kill" + default: + return "unknown" + } +} + +// IsCriticalPod returns true if the pod bears the critical pod annotation key or if pod's priority is greater than +// or equal to SystemCriticalPriority. Both the default scheduler and the kubelet use this function +// to make admission and scheduling decisions. +func IsCriticalPod(pod *v1.Pod) bool { + if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { + if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) { + return true + } + } + if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) { + if IsCritical(pod.Namespace, pod.Annotations) { + return true + } + } + return false +} + +// Preemptable returns true if preemptor pod can preempt preemptee pod +// if preemptee is not critical or if preemptor's priority is greater than preemptee's priority +func Preemptable(preemptor, preemptee *v1.Pod) bool { + if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) { + return true + } + if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) { + if (preemptor != nil && preemptor.Spec.Priority != nil) && + (preemptee != nil && preemptee.Spec.Priority != nil) { + return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority) + } + } + + return false +} + +// IsCritical returns true if parameters bear the critical pod annotation +// key. The DaemonSetController use this key directly to make scheduling decisions. +// TODO: @ravig - Deprecated. Remove this when we move to resolving critical pods based on priorityClassName. +func IsCritical(ns string, annotations map[string]string) bool { + // Critical pods are restricted to "kube-system" namespace as of now. + if ns != kubeapi.NamespaceSystem { + return false + } + val, ok := annotations[CriticalPodAnnotationKey] + if ok && val == "" { + return true + } + return false +} + +// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec. +func IsCriticalPodBasedOnPriority(priority int32) bool { + if priority >= scheduling.SystemCriticalPriority { + return true + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go new file mode 100644 index 000000000..b0dff97a7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "net/http" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +// TODO: Reconcile custom types in kubelet/types and this subpackage + +type HttpGetter interface { + Get(url string) (*http.Response, error) +} + +// Timestamp wraps around time.Time and offers utilities to format and parse +// the time using RFC3339Nano +type Timestamp struct { + time time.Time +} + +// NewTimestamp returns a Timestamp object using the current time. +func NewTimestamp() *Timestamp { + return &Timestamp{time.Now()} +} + +// ConvertToTimestamp takes a string, parses it using the RFC3339Nano layout, +// and converts it to a Timestamp object. +func ConvertToTimestamp(timeString string) *Timestamp { + parsed, _ := time.Parse(time.RFC3339Nano, timeString) + return &Timestamp{parsed} +} + +// Get returns the time as time.Time. +func (t *Timestamp) Get() time.Time { + return t.time +} + +// GetString returns the time in the string format using the RFC3339Nano +// layout. +func (t *Timestamp) GetString() string { + return t.time.Format(time.RFC3339Nano) +} + +// A type to help sort container statuses based on container names. +type SortedContainerStatuses []v1.ContainerStatus + +func (s SortedContainerStatuses) Len() int { return len(s) } +func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s SortedContainerStatuses) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +// SortInitContainerStatuses ensures that statuses are in the order that their +// init container appears in the pod spec +func SortInitContainerStatuses(p *v1.Pod, statuses []v1.ContainerStatus) { + containers := p.Spec.InitContainers + current := 0 + for _, container := range containers { + for j := current; j < len(statuses); j++ { + if container.Name == statuses[j].Name { + statuses[current], statuses[j] = statuses[j], statuses[current] + current++ + break + } + } + } +} + +// Reservation represents reserved resources for non-pod components. +type Reservation struct { + // System represents resources reserved for non-kubernetes components. + System v1.ResourceList + // Kubernetes represents resources reserved for kubernetes system components. + Kubernetes v1.ResourceList +} + +// A pod UID which has been translated/resolved to the representation known to kubelets. +type ResolvedPodUID types.UID + +// A pod UID for a mirror pod. +type MirrorPodUID types.UID diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go new file mode 100644 index 000000000..5e14f82e7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/ports/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ports defines ports used by various pieces of the kubernetes +// infrastructure. +package ports // import "k8s.io/kubernetes/pkg/master/ports" diff --git a/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go new file mode 100644 index 000000000..23faba1d3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/master/ports/ports.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ports + +const ( + // ProxyStatusPort is the default port for the proxy metrics server. + // May be overridden by a flag at startup. + ProxyStatusPort = 10249 + // KubeletPort is the default port for the kubelet server on each host machine. + // May be overridden by a flag at startup. + KubeletPort = 10250 + // InsecureSchedulerPort is the default port for the scheduler status server. + // May be overridden by a flag at startup. + // Deprecated: use the secure KubeSchedulerPort instead. + InsecureSchedulerPort = 10251 + // InsecureKubeControllerManagerPort is the default port for the controller manager status server. + // May be overridden by a flag at startup. + // Deprecated: use the secure KubeControllerManagerPort instead. + InsecureKubeControllerManagerPort = 10252 + // InsecureCloudControllerManagerPort is the default port for the cloud controller manager server. + // This value may be overridden by a flag at startup. + // Deprecated: use the secure CloudControllerManagerPort instead. + InsecureCloudControllerManagerPort = 10253 + // KubeletReadOnlyPort exposes basic read-only services from the kubelet. + // May be overridden by a flag at startup. + // This is necessary for heapster to collect monitoring stats from the kubelet + // until heapster can transition to using the SSL endpoint. + // TODO(roberthbailey): Remove this once we have a better solution for heapster. + KubeletReadOnlyPort = 10255 + // ProxyHealthzPort is the default port for the proxy healthz server. + // May be overridden by a flag at startup. + ProxyHealthzPort = 10256 + // KubeControllerManagerPort is the default port for the controller manager status server. + // May be overridden by a flag at startup. + KubeControllerManagerPort = 10257 + // CloudControllerManagerPort is the default port for the cloud controller manager server. + // This value may be overridden by a flag at startup. + CloudControllerManagerPort = 10258 + + // KubeSchedulerPort is the default port for the scheduler status server. + // May be overridden by a flag at startup. + KubeSchedulerPort = 10259 +) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go b/vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go new file mode 100644 index 000000000..c768a8c92 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package api contains scheduler API objects. +package api // import "k8s.io/kubernetes/pkg/scheduler/api" diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go b/vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go new file mode 100644 index 000000000..4852cd559 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +// TODO: remove this, scheduler should not have its own scheme. +var Scheme = runtime.NewScheme() + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "scheduler" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} + +var ( + // SchemeBuilder defines a SchemeBuilder object. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is used to add stored functions to scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + if err := addKnownTypes(Scheme); err != nil { + // Programmer error. + panic(err) + } +} + +func addKnownTypes(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + scheme.AddKnownTypes(SchemeGroupVersion, + &Policy{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go b/vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go new file mode 100644 index 000000000..3bffc2163 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go @@ -0,0 +1,328 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "time" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + restclient "k8s.io/client-go/rest" +) + +const ( + // MaxUint defines the max unsigned int value. + MaxUint = ^uint(0) + // MaxInt defines the max signed int value. + MaxInt = int(MaxUint >> 1) + // MaxTotalPriority defines the max total priority value. + MaxTotalPriority = MaxInt + // MaxPriority defines the max priority value. + MaxPriority = 10 + // MaxWeight defines the max weight value. + MaxWeight = MaxInt / MaxPriority + // DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes + // that once found feasible, the scheduler stops looking for more nodes. + DefaultPercentageOfNodesToScore = 50 +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy describes a struct of a policy resource in api. +type Policy struct { + metav1.TypeMeta + // Holds the information to configure the fit predicate functions. + // If unspecified, the default predicate functions will be applied. + // If empty list, all predicates (except the mandatory ones) will be + // bypassed. + Predicates []PredicatePolicy + // Holds the information to configure the priority functions. + // If unspecified, the default priority functions will be applied. + // If empty list, all priority functions will be bypassed. + Priorities []PriorityPolicy + // Holds the information to communicate with the extender(s) + ExtenderConfigs []ExtenderConfig + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. + HardPodAffinitySymmetricWeight int32 + + // When AlwaysCheckAllPredicates is set to true, scheduler checks all + // the configured predicates even after one or more of them fails. + // When the flag is set to false, scheduler skips checking the rest + // of the predicates after it finds one predicate that failed. + AlwaysCheckAllPredicates bool +} + +// PredicatePolicy describes a struct of a predicate policy. +type PredicatePolicy struct { + // Identifier of the predicate policy + // For a custom predicate, the name can be user-defined + // For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate + Name string + // Holds the parameters to configure the given predicate + Argument *PredicateArgument +} + +// PriorityPolicy describes a struct of a priority policy. +type PriorityPolicy struct { + // Identifier of the priority policy + // For a custom priority, the name can be user-defined + // For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function + Name string + // The numeric multiplier for the node scores that the priority function generates + // The weight should be a positive integer + Weight int + // Holds the parameters to configure the given priority function + Argument *PriorityArgument +} + +// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration. +// Only one of its members may be specified +type PredicateArgument struct { + // The predicate that provides affinity for pods belonging to a service + // It uses a label to identify nodes that belong to the same "group" + ServiceAffinity *ServiceAffinity + // The predicate that checks whether a particular node has a certain label + // defined or not, regardless of value + LabelsPresence *LabelsPresence +} + +// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration. +// Only one of its members may be specified +type PriorityArgument struct { + // The priority function that ensures a good spread (anti-affinity) for pods belonging to a service + // It uses a label to identify nodes that belong to the same "group" + ServiceAntiAffinity *ServiceAntiAffinity + // The priority function that checks whether a particular node has a certain label + // defined or not, regardless of value + LabelPreference *LabelPreference + // The RequestedToCapacityRatio priority function is parametrized with function shape. + RequestedToCapacityRatioArguments *RequestedToCapacityRatioArguments +} + +// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. +type ServiceAffinity struct { + // The list of labels that identify node "groups" + // All of the labels should match for the node to be considered a fit for hosting the pod + Labels []string +} + +// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. +type LabelsPresence struct { + // The list of labels that identify node "groups" + // All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod + Labels []string + // The boolean flag that indicates whether the labels should be present or absent from the node + Presence bool +} + +// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function +type ServiceAntiAffinity struct { + // Used to identify node "groups" + Label string +} + +// LabelPreference holds the parameters that are used to configure the corresponding priority function +type LabelPreference struct { + // Used to identify node "groups" + Label string + // This is a boolean flag + // If true, higher priority is given to nodes that have the label + // If false, higher priority is given to nodes that do not have the label + Presence bool +} + +// RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function +type RequestedToCapacityRatioArguments struct { + // Array of point defining priority function shape + UtilizationShape []UtilizationShapePoint +} + +// UtilizationShapePoint represents single point of priority function shape +type UtilizationShapePoint struct { + // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. + Utilization int + // Score assigned to given utilization (y axis). Valid values are 0 to 10. + Score int +} + +// ExtenderManagedResource describes the arguments of extended resources +// managed by an extender. +type ExtenderManagedResource struct { + // Name is the extended resource name. + Name v1.ResourceName + // IgnoredByScheduler indicates whether kube-scheduler should ignore this + // resource when applying predicates. + IgnoredByScheduler bool +} + +// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// it is assumed that the extender chose not to provide that extension. +type ExtenderConfig struct { + // URLPrefix at which the extender is available + URLPrefix string + // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. + FilterVerb string + // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. + PreemptVerb string + // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. + PrioritizeVerb string + // The numeric multiplier for the node scores that the prioritize call generates. + // The weight should be a positive integer + Weight int + // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. + // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender + // can implement this function. + BindVerb string + // EnableHTTPS specifies whether https should be used to communicate with the extender + EnableHTTPS bool + // TLSConfig specifies the transport layer security config + TLSConfig *restclient.TLSClientConfig + // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize + // timeout is ignored, k8s/other extenders priorities are used to select the node. + HTTPTimeout time.Duration + // NodeCacheCapable specifies that the extender is capable of caching node information, + // so the scheduler should only send minimal information about the eligible nodes + // assuming that the extender already cached full details of all nodes in the cluster + NodeCacheCapable bool + // ManagedResources is a list of extended resources that are managed by + // this extender. + // - A pod will be sent to the extender on the Filter, Prioritize and Bind + // (if the extender is the binder) phases iff the pod requests at least + // one of the extended resources in this list. If empty or unspecified, + // all pods will be sent to this extender. + // - If IgnoredByScheduler is set to true for a resource, kube-scheduler + // will skip checking the resource in predicates. + // +optional + ManagedResources []ExtenderManagedResource + // Ignorable specifies if the extender is ignorable, i.e. scheduling should not + // fail when the extender returns an error or is not reachable. + Ignorable bool +} + +// ExtenderPreemptionResult represents the result returned by preemption phase of extender. +type ExtenderPreemptionResult struct { + NodeNameToMetaVictims map[string]*MetaVictims +} + +// ExtenderPreemptionArgs represents the arguments needed by the extender to preempt pods on nodes. +type ExtenderPreemptionArgs struct { + // Pod being scheduled + Pod *v1.Pod + // Victims map generated by scheduler preemption phase + // Only set NodeNameToMetaVictims if ExtenderConfig.NodeCacheCapable == true. Otherwise, only set NodeNameToVictims. + NodeNameToVictims map[string]*Victims + NodeNameToMetaVictims map[string]*MetaVictims +} + +// Victims represents: +// pods: a group of pods expected to be preempted. +// numPDBViolations: the count of violations of PodDisruptionBudget +type Victims struct { + Pods []*v1.Pod + NumPDBViolations int +} + +// MetaPod represent identifier for a v1.Pod +type MetaPod struct { + UID string +} + +// MetaVictims represents: +// pods: a group of pods expected to be preempted. +// Only Pod identifiers will be sent and user are expect to get v1.Pod in their own way. +// numPDBViolations: the count of violations of PodDisruptionBudget +type MetaVictims struct { + Pods []*MetaPod + NumPDBViolations int +} + +// ExtenderArgs represents the arguments needed by the extender to filter/prioritize +// nodes for a pod. +type ExtenderArgs struct { + // Pod being scheduled + Pod *v1.Pod + // List of candidate nodes where the pod can be scheduled; to be populated + // only if ExtenderConfig.NodeCacheCapable == false + Nodes *v1.NodeList + // List of candidate node names where the pod can be scheduled; to be + // populated only if ExtenderConfig.NodeCacheCapable == true + NodeNames *[]string +} + +// FailedNodesMap represents the filtered out nodes, with node names and failure messages +type FailedNodesMap map[string]string + +// ExtenderFilterResult represents the results of a filter call to an extender +type ExtenderFilterResult struct { + // Filtered set of nodes where the pod can be scheduled; to be populated + // only if ExtenderConfig.NodeCacheCapable == false + Nodes *v1.NodeList + // Filtered set of nodes where the pod can be scheduled; to be populated + // only if ExtenderConfig.NodeCacheCapable == true + NodeNames *[]string + // Filtered out nodes where the pod can't be scheduled and the failure messages + FailedNodes FailedNodesMap + // Error message indicating failure + Error string +} + +// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node. +type ExtenderBindingArgs struct { + // PodName is the name of the pod being bound + PodName string + // PodNamespace is the namespace of the pod being bound + PodNamespace string + // PodUID is the UID of the pod being bound + PodUID types.UID + // Node selected by the scheduler + Node string +} + +// ExtenderBindingResult represents the result of binding of a pod to a node from an extender. +type ExtenderBindingResult struct { + // Error message indicating failure + Error string +} + +// HostPriority represents the priority of scheduling to a particular host, higher priority is better. +type HostPriority struct { + // Name of the host + Host string + // Score associated with the host + Score int +} + +// HostPriorityList declares a []HostPriority type. +type HostPriorityList []HostPriority + +func (h HostPriorityList) Len() int { + return len(h) +} + +func (h HostPriorityList) Less(i, j int) bool { + if h[i].Score == h[j].Score { + return h[i].Host < h[j].Host + } + return h[i].Score < h[j].Score +} + +func (h HostPriorityList) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go b/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go new file mode 100644 index 000000000..afe64dd50 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/api/well_known_labels.go @@ -0,0 +1,85 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + api "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + // TaintNodeNotReady will be added when node is not ready + // and feature-gate for TaintBasedEvictions flag is enabled, + // and removed when node becomes ready. + TaintNodeNotReady = "node.kubernetes.io/not-ready" + + // DeprecatedTaintNodeNotReady is the deprecated version of TaintNodeNotReady. + // It is deprecated since 1.9 + DeprecatedTaintNodeNotReady = "node.alpha.kubernetes.io/notReady" + + // TaintNodeUnreachable will be added when node becomes unreachable + // (corresponding to NodeReady status ConditionUnknown) + // and feature-gate for TaintBasedEvictions flag is enabled, + // and removed when node becomes reachable (NodeReady status ConditionTrue). + TaintNodeUnreachable = "node.kubernetes.io/unreachable" + + // DeprecatedTaintNodeUnreachable is the deprecated version of TaintNodeUnreachable. + // It is deprecated since 1.9 + DeprecatedTaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable" + + // TaintNodeUnschedulable will be added when node becomes unschedulable + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node becomes scheduable. + TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" + + // TaintNodeOutOfDisk will be added when node becomes out of disk + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough disk. + TaintNodeOutOfDisk = "node.kubernetes.io/out-of-disk" + + // TaintNodeMemoryPressure will be added when node has memory pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough memory. + TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure" + + // TaintNodeDiskPressure will be added when node has disk pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough disk. + TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure" + + // TaintNodeNetworkUnavailable will be added when node's network is unavailable + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when network becomes ready. + TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable" + + // TaintNodePIDPressure will be added when node has pid pressure + // and feature-gate for TaintNodesByCondition flag is enabled, + // and removed when node has enough disk. + TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" + + // TaintExternalCloudProvider sets this taint on a node to mark it as unusable, + // when kubelet is started with the "external" cloud provider, until a controller + // from the cloud-controller-manager intitializes this node, and then removes + // the taint + TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized" + + // TaintNodeShutdown when node is shutdown in external cloud provider + TaintNodeShutdown = "node.cloudprovider.kubernetes.io/shutdown" + + // NodeFieldSelectorKeyNodeName ('metadata.name') uses this as node field selector key + // when selecting node by node's name. + NodeFieldSelectorKeyNodeName = api.ObjectNameField +) diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go new file mode 100644 index 000000000..418c78cd1 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go @@ -0,0 +1,639 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package api + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + rest "k8s.io/client-go/rest" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(v1.Pod) + (*in).DeepCopyInto(*out) + } + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(v1.NodeList) + (*in).DeepCopyInto(*out) + } + if in.NodeNames != nil { + in, out := &in.NodeNames, &out.NodeNames + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs. +func (in *ExtenderArgs) DeepCopy() *ExtenderArgs { + if in == nil { + return nil + } + out := new(ExtenderArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs. +func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs { + if in == nil { + return nil + } + out := new(ExtenderBindingArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult. +func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult { + if in == nil { + return nil + } + out := new(ExtenderBindingResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(rest.TLSClientConfig) + (*in).DeepCopyInto(*out) + } + if in.ManagedResources != nil { + in, out := &in.ManagedResources, &out.ManagedResources + *out = make([]ExtenderManagedResource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderConfig. +func (in *ExtenderConfig) DeepCopy() *ExtenderConfig { + if in == nil { + return nil + } + out := new(ExtenderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = new(v1.NodeList) + (*in).DeepCopyInto(*out) + } + if in.NodeNames != nil { + in, out := &in.NodeNames, &out.NodeNames + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.FailedNodes != nil { + in, out := &in.FailedNodes, &out.FailedNodes + *out = make(FailedNodesMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult. +func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult { + if in == nil { + return nil + } + out := new(ExtenderFilterResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. +func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { + if in == nil { + return nil + } + out := new(ExtenderManagedResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderPreemptionArgs) DeepCopyInto(out *ExtenderPreemptionArgs) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(v1.Pod) + (*in).DeepCopyInto(*out) + } + if in.NodeNameToVictims != nil { + in, out := &in.NodeNameToVictims, &out.NodeNameToVictims + *out = make(map[string]*Victims, len(*in)) + for key, val := range *in { + var outVal *Victims + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(Victims) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.NodeNameToMetaVictims != nil { + in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims + *out = make(map[string]*MetaVictims, len(*in)) + for key, val := range *in { + var outVal *MetaVictims + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(MetaVictims) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionArgs. +func (in *ExtenderPreemptionArgs) DeepCopy() *ExtenderPreemptionArgs { + if in == nil { + return nil + } + out := new(ExtenderPreemptionArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderPreemptionResult) DeepCopyInto(out *ExtenderPreemptionResult) { + *out = *in + if in.NodeNameToMetaVictims != nil { + in, out := &in.NodeNameToMetaVictims, &out.NodeNameToMetaVictims + *out = make(map[string]*MetaVictims, len(*in)) + for key, val := range *in { + var outVal *MetaVictims + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(MetaVictims) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderPreemptionResult. +func (in *ExtenderPreemptionResult) DeepCopy() *ExtenderPreemptionResult { + if in == nil { + return nil + } + out := new(ExtenderPreemptionResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) { + { + in := &in + *out = make(FailedNodesMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap. +func (in FailedNodesMap) DeepCopy() FailedNodesMap { + if in == nil { + return nil + } + out := new(FailedNodesMap) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPriority) DeepCopyInto(out *HostPriority) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority. +func (in *HostPriority) DeepCopy() *HostPriority { + if in == nil { + return nil + } + out := new(HostPriority) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) { + { + in := &in + *out = make(HostPriorityList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList. +func (in HostPriorityList) DeepCopy() HostPriorityList { + if in == nil { + return nil + } + out := new(HostPriorityList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelPreference) DeepCopyInto(out *LabelPreference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelPreference. +func (in *LabelPreference) DeepCopy() *LabelPreference { + if in == nil { + return nil + } + out := new(LabelPreference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelsPresence) DeepCopyInto(out *LabelsPresence) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsPresence. +func (in *LabelsPresence) DeepCopy() *LabelsPresence { + if in == nil { + return nil + } + out := new(LabelsPresence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetaPod) DeepCopyInto(out *MetaPod) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaPod. +func (in *MetaPod) DeepCopy() *MetaPod { + if in == nil { + return nil + } + out := new(MetaPod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetaVictims) DeepCopyInto(out *MetaVictims) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make([]*MetaPod, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(MetaPod) + **out = **in + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetaVictims. +func (in *MetaVictims) DeepCopy() *MetaVictims { + if in == nil { + return nil + } + out := new(MetaVictims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Predicates != nil { + in, out := &in.Predicates, &out.Predicates + *out = make([]PredicatePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priorities != nil { + in, out := &in.Priorities, &out.Priorities + *out = make([]PriorityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtenderConfigs != nil { + in, out := &in.ExtenderConfigs, &out.ExtenderConfigs + *out = make([]ExtenderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) { + *out = *in + if in.ServiceAffinity != nil { + in, out := &in.ServiceAffinity, &out.ServiceAffinity + *out = new(ServiceAffinity) + (*in).DeepCopyInto(*out) + } + if in.LabelsPresence != nil { + in, out := &in.LabelsPresence, &out.LabelsPresence + *out = new(LabelsPresence) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateArgument. +func (in *PredicateArgument) DeepCopy() *PredicateArgument { + if in == nil { + return nil + } + out := new(PredicateArgument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) { + *out = *in + if in.Argument != nil { + in, out := &in.Argument, &out.Argument + *out = new(PredicateArgument) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicatePolicy. +func (in *PredicatePolicy) DeepCopy() *PredicatePolicy { + if in == nil { + return nil + } + out := new(PredicatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) { + *out = *in + if in.ServiceAntiAffinity != nil { + in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity + *out = new(ServiceAntiAffinity) + **out = **in + } + if in.LabelPreference != nil { + in, out := &in.LabelPreference, &out.LabelPreference + *out = new(LabelPreference) + **out = **in + } + if in.RequestedToCapacityRatioArguments != nil { + in, out := &in.RequestedToCapacityRatioArguments, &out.RequestedToCapacityRatioArguments + *out = new(RequestedToCapacityRatioArguments) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityArgument. +func (in *PriorityArgument) DeepCopy() *PriorityArgument { + if in == nil { + return nil + } + out := new(PriorityArgument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) { + *out = *in + if in.Argument != nil { + in, out := &in.Argument, &out.Argument + *out = new(PriorityArgument) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityPolicy. +func (in *PriorityPolicy) DeepCopy() *PriorityPolicy { + if in == nil { + return nil + } + out := new(PriorityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestedToCapacityRatioArguments) DeepCopyInto(out *RequestedToCapacityRatioArguments) { + *out = *in + if in.UtilizationShape != nil { + in, out := &in.UtilizationShape, &out.UtilizationShape + *out = make([]UtilizationShapePoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioArguments. +func (in *RequestedToCapacityRatioArguments) DeepCopy() *RequestedToCapacityRatioArguments { + if in == nil { + return nil + } + out := new(RequestedToCapacityRatioArguments) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity. +func (in *ServiceAffinity) DeepCopy() *ServiceAffinity { + if in == nil { + return nil + } + out := new(ServiceAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity. +func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity { + if in == nil { + return nil + } + out := new(ServiceAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. +func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { + if in == nil { + return nil + } + out := new(UtilizationShapePoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Victims) DeepCopyInto(out *Victims) { + *out = *in + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make([]*v1.Pod, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(v1.Pod) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Victims. +func (in *Victims) DeepCopy() *Victims { + if in == nil { + return nil + } + out := new(Victims) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go new file mode 100644 index 000000000..5352f1332 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/helpers.go @@ -0,0 +1,80 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apparmor + +import ( + "strings" + + "k8s.io/api/core/v1" +) + +// TODO: Move these values into the API package. +const ( + // The prefix to an annotation key specifying a container profile. + ContainerAnnotationKeyPrefix = "container.apparmor.security.beta.kubernetes.io/" + // The annotation key specifying the default AppArmor profile. + DefaultProfileAnnotationKey = "apparmor.security.beta.kubernetes.io/defaultProfileName" + // The annotation key specifying the allowed AppArmor profiles. + AllowedProfilesAnnotationKey = "apparmor.security.beta.kubernetes.io/allowedProfileNames" + + // The profile specifying the runtime default. + ProfileRuntimeDefault = "runtime/default" + // The prefix for specifying profiles loaded on the node. + ProfileNamePrefix = "localhost/" + + // Unconfined profile + ProfileNameUnconfined = "unconfined" +) + +// Checks whether app armor is required for pod to be run. +func isRequired(pod *v1.Pod) bool { + for key, value := range pod.Annotations { + if strings.HasPrefix(key, ContainerAnnotationKeyPrefix) { + return value != ProfileNameUnconfined + } + } + return false +} + +// Returns the name of the profile to use with the container. +func GetProfileName(pod *v1.Pod, containerName string) string { + return GetProfileNameFromPodAnnotations(pod.Annotations, containerName) +} + +// GetProfileNameFromPodAnnotations gets the name of the profile to use with container from +// pod annotations +func GetProfileNameFromPodAnnotations(annotations map[string]string, containerName string) string { + return annotations[ContainerAnnotationKeyPrefix+containerName] +} + +// Sets the name of the profile to use with the container. +func SetProfileName(pod *v1.Pod, containerName, profileName string) error { + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + pod.Annotations[ContainerAnnotationKeyPrefix+containerName] = profileName + return nil +} + +// Sets the name of the profile to use with the container. +func SetProfileNameFromPodAnnotations(annotations map[string]string, containerName, profileName string) error { + if annotations == nil { + return nil + } + annotations[ContainerAnnotationKeyPrefix+containerName] = profileName + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go new file mode 100644 index 000000000..25ea591fc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate.go @@ -0,0 +1,229 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apparmor + +import ( + "bufio" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "k8s.io/api/core/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" + kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + utilfile "k8s.io/kubernetes/pkg/util/file" +) + +// Whether AppArmor should be disabled by default. +// Set to true if the wrong build tags are set (see validate_disabled.go). +var isDisabledBuild bool + +// Interface for validating that a pod with an AppArmor profile can be run by a Node. +type Validator interface { + Validate(pod *v1.Pod) error + ValidateHost() error +} + +func NewValidator(runtime string) Validator { + if err := validateHost(runtime); err != nil { + return &validator{validateHostErr: err} + } + appArmorFS, err := getAppArmorFS() + if err != nil { + return &validator{ + validateHostErr: fmt.Errorf("error finding AppArmor FS: %v", err), + } + } + return &validator{ + appArmorFS: appArmorFS, + } +} + +type validator struct { + validateHostErr error + appArmorFS string +} + +func (v *validator) Validate(pod *v1.Pod) error { + if !isRequired(pod) { + return nil + } + + if v.ValidateHost() != nil { + return v.validateHostErr + } + + loadedProfiles, err := v.getLoadedProfiles() + if err != nil { + return fmt.Errorf("could not read loaded profiles: %v", err) + } + + for _, container := range pod.Spec.InitContainers { + if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { + return err + } + } + for _, container := range pod.Spec.Containers { + if err := validateProfile(GetProfileName(pod, container.Name), loadedProfiles); err != nil { + return err + } + } + + return nil +} + +func (v *validator) ValidateHost() error { + return v.validateHostErr +} + +// Verify that the host and runtime is capable of enforcing AppArmor profiles. +func validateHost(runtime string) error { + // Check feature-gates + if !utilfeature.DefaultFeatureGate.Enabled(features.AppArmor) { + return errors.New("AppArmor disabled by feature-gate") + } + + // Check build support. + if isDisabledBuild { + return errors.New("Binary not compiled for linux") + } + + // Check kernel support. + if !IsAppArmorEnabled() { + return errors.New("AppArmor is not enabled on the host") + } + + // Check runtime support. Currently only Docker is supported. + if runtime != kubetypes.DockerContainerRuntime && runtime != kubetypes.RemoteContainerRuntime { + return fmt.Errorf("AppArmor is only enabled for 'docker' and 'remote' runtimes. Found: %q.", runtime) + } + + return nil +} + +// Verify that the profile is valid and loaded. +func validateProfile(profile string, loadedProfiles map[string]bool) error { + if err := ValidateProfileFormat(profile); err != nil { + return err + } + + if strings.HasPrefix(profile, ProfileNamePrefix) { + profileName := strings.TrimPrefix(profile, ProfileNamePrefix) + if !loadedProfiles[profileName] { + return fmt.Errorf("profile %q is not loaded", profileName) + } + } + + return nil +} + +func ValidateProfileFormat(profile string) error { + if profile == "" || profile == ProfileRuntimeDefault || profile == ProfileNameUnconfined { + return nil + } + if !strings.HasPrefix(profile, ProfileNamePrefix) { + return fmt.Errorf("invalid AppArmor profile name: %q", profile) + } + return nil +} + +func (v *validator) getLoadedProfiles() (map[string]bool, error) { + profilesPath := path.Join(v.appArmorFS, "profiles") + profilesFile, err := os.Open(profilesPath) + if err != nil { + return nil, fmt.Errorf("failed to open %s: %v", profilesPath, err) + } + defer profilesFile.Close() + + profiles := map[string]bool{} + scanner := bufio.NewScanner(profilesFile) + for scanner.Scan() { + profileName := parseProfileName(scanner.Text()) + if profileName == "" { + // Unknown line format; skip it. + continue + } + profiles[profileName] = true + } + return profiles, nil +} + +// The profiles file is formatted with one profile per line, matching a form: +// namespace://profile-name (mode) +// profile-name (mode) +// Where mode is {enforce, complain, kill}. The "namespace://" is only included for namespaced +// profiles. For the purposes of Kubernetes, we consider the namespace part of the profile name. +func parseProfileName(profileLine string) string { + modeIndex := strings.IndexRune(profileLine, '(') + if modeIndex < 0 { + return "" + } + return strings.TrimSpace(profileLine[:modeIndex]) +} + +func getAppArmorFS() (string, error) { + mountsFile, err := os.Open("/proc/mounts") + if err != nil { + return "", fmt.Errorf("could not open /proc/mounts: %v", err) + } + defer mountsFile.Close() + + scanner := bufio.NewScanner(mountsFile) + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) < 3 { + // Unknown line format; skip it. + continue + } + if fields[2] == "securityfs" { + appArmorFS := path.Join(fields[1], "apparmor") + if ok, err := utilfile.FileExists(appArmorFS); !ok { + msg := fmt.Sprintf("path %s does not exist", appArmorFS) + if err != nil { + return "", fmt.Errorf("%s: %v", msg, err) + } else { + return "", errors.New(msg) + } + } else { + return appArmorFS, nil + } + } + } + if err := scanner.Err(); err != nil { + return "", fmt.Errorf("error scanning mounts: %v", err) + } + + return "", errors.New("securityfs not found") +} + +// IsAppArmorEnabled returns true if apparmor is enabled for the host. +// This function is forked from +// https://github.com/opencontainers/runc/blob/1a81e9ab1f138c091fe5c86d0883f87716088527/libcontainer/apparmor/apparmor.go +// to avoid the libapparmor dependency. +func IsAppArmorEnabled() bool { + if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { + if _, err = os.Stat("/sbin/apparmor_parser"); err == nil { + buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") + return err == nil && len(buf) > 1 && buf[0] == 'Y' + } + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go new file mode 100644 index 000000000..875054a94 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/security/apparmor/validate_disabled.go @@ -0,0 +1,24 @@ +// +build !linux + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apparmor + +func init() { + // If Kubernetes was not built for linux, apparmor is always disabled. + isDisabledBuild = true +} diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go new file mode 100644 index 000000000..3d48b6f2d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/claims.go @@ -0,0 +1,180 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "errors" + "fmt" + "time" + + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" + + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/kubernetes/pkg/apis/core" +) + +// time.Now stubbed out to allow testing +var now = time.Now + +type privateClaims struct { + Kubernetes kubernetes `json:"kubernetes.io,omitempty"` +} + +type kubernetes struct { + Namespace string `json:"namespace,omitempty"` + Svcacct ref `json:"serviceaccount,omitempty"` + Pod *ref `json:"pod,omitempty"` + Secret *ref `json:"secret,omitempty"` +} + +type ref struct { + Name string `json:"name,omitempty"` + UID string `json:"uid,omitempty"` +} + +func Claims(sa core.ServiceAccount, pod *core.Pod, secret *core.Secret, expirationSeconds int64, audience []string) (*jwt.Claims, interface{}) { + now := now() + sc := &jwt.Claims{ + Subject: apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name), + Audience: jwt.Audience(audience), + IssuedAt: jwt.NewNumericDate(now), + NotBefore: jwt.NewNumericDate(now), + Expiry: jwt.NewNumericDate(now.Add(time.Duration(expirationSeconds) * time.Second)), + } + pc := &privateClaims{ + Kubernetes: kubernetes{ + Namespace: sa.Namespace, + Svcacct: ref{ + Name: sa.Name, + UID: string(sa.UID), + }, + }, + } + switch { + case pod != nil: + pc.Kubernetes.Pod = &ref{ + Name: pod.Name, + UID: string(pod.UID), + } + case secret != nil: + pc.Kubernetes.Secret = &ref{ + Name: secret.Name, + UID: string(secret.UID), + } + } + return sc, pc +} + +func NewValidator(getter ServiceAccountTokenGetter) Validator { + return &validator{ + getter: getter, + } +} + +type validator struct { + getter ServiceAccountTokenGetter +} + +var _ = Validator(&validator{}) + +func (v *validator) Validate(_ string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { + private, ok := privateObj.(*privateClaims) + if !ok { + klog.Errorf("jwt validator expected private claim of type *privateClaims but got: %T", privateObj) + return nil, errors.New("Token could not be validated.") + } + err := public.Validate(jwt.Expected{ + Time: now(), + }) + switch { + case err == nil: + case err == jwt.ErrExpired: + return nil, errors.New("Token has expired.") + default: + klog.Errorf("unexpected validation error: %T", err) + return nil, errors.New("Token could not be validated.") + } + + namespace := private.Kubernetes.Namespace + saref := private.Kubernetes.Svcacct + podref := private.Kubernetes.Pod + secref := private.Kubernetes.Secret + // Make sure service account still exists (name and UID) + serviceAccount, err := v.getter.GetServiceAccount(namespace, saref.Name) + if err != nil { + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, saref.Name, err) + return nil, err + } + if serviceAccount.DeletionTimestamp != nil { + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, saref.Name) + return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, saref.Name) + } + if string(serviceAccount.UID) != saref.UID { + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, saref.Name, string(serviceAccount.UID), saref.UID) + return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, saref.UID) + } + + if secref != nil { + // Make sure token hasn't been invalidated by deletion of the secret + secret, err := v.getter.GetSecret(namespace, secref.Name) + if err != nil { + klog.V(4).Infof("Could not retrieve bound secret %s/%s for service account %s/%s: %v", namespace, secref.Name, namespace, saref.Name, err) + return nil, errors.New("Token has been invalidated") + } + if secret.DeletionTimestamp != nil { + klog.V(4).Infof("Bound secret is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secref.Name, namespace, saref.Name) + return nil, errors.New("Token has been invalidated") + } + if secref.UID != string(secret.UID) { + klog.V(4).Infof("Secret UID no longer matches %s/%s: %q != %q", namespace, secref.Name, string(secret.UID), secref.UID) + return nil, fmt.Errorf("Secret UID (%s) does not match claim (%s)", secret.UID, secref.UID) + } + } + + var podName, podUID string + if podref != nil { + // Make sure token hasn't been invalidated by deletion of the pod + pod, err := v.getter.GetPod(namespace, podref.Name) + if err != nil { + klog.V(4).Infof("Could not retrieve bound pod %s/%s for service account %s/%s: %v", namespace, podref.Name, namespace, saref.Name, err) + return nil, errors.New("Token has been invalidated") + } + if pod.DeletionTimestamp != nil { + klog.V(4).Infof("Bound pod is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, podref.Name, namespace, saref.Name) + return nil, errors.New("Token has been invalidated") + } + if podref.UID != string(pod.UID) { + klog.V(4).Infof("Pod UID no longer matches %s/%s: %q != %q", namespace, podref.Name, string(pod.UID), podref.UID) + return nil, fmt.Errorf("Pod UID (%s) does not match claim (%s)", pod.UID, podref.UID) + } + podName = podref.Name + podUID = podref.UID + } + + return &ServiceAccountInfo{ + Namespace: private.Kubernetes.Namespace, + Name: private.Kubernetes.Svcacct.Name, + UID: private.Kubernetes.Svcacct.UID, + PodName: podName, + PodUID: podUID, + }, nil +} + +func (v *validator) NewPrivateClaims() interface{} { + return &privateClaims{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go new file mode 100644 index 000000000..233fdee2d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/jwt.go @@ -0,0 +1,232 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/base64" + "encoding/json" + "fmt" + "strings" + + jose "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" + + "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +// ServiceAccountTokenGetter defines functions to retrieve a named service account and secret +type ServiceAccountTokenGetter interface { + GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) + GetPod(namespace, name string) (*v1.Pod, error) + GetSecret(namespace, name string) (*v1.Secret, error) +} + +type TokenGenerator interface { + // GenerateToken generates a token which will identify the given + // ServiceAccount. privateClaims is an interface that will be + // serialized into the JWT payload JSON encoding at the root level of + // the payload object. Public claims take precedent over private + // claims i.e. if both claims and privateClaims have an "exp" field, + // the value in claims will be used. + GenerateToken(claims *jwt.Claims, privateClaims interface{}) (string, error) +} + +// JWTTokenGenerator returns a TokenGenerator that generates signed JWT tokens, using the given privateKey. +// privateKey is a PEM-encoded byte array of a private RSA key. +// JWTTokenAuthenticator() +func JWTTokenGenerator(iss string, privateKey interface{}) (TokenGenerator, error) { + var alg jose.SignatureAlgorithm + switch pk := privateKey.(type) { + case *rsa.PrivateKey: + alg = jose.RS256 + case *ecdsa.PrivateKey: + switch pk.Curve { + case elliptic.P256(): + alg = jose.ES256 + case elliptic.P384(): + alg = jose.ES384 + case elliptic.P521(): + alg = jose.ES512 + default: + return nil, fmt.Errorf("unknown private key curve, must be 256, 384, or 521") + } + case jose.OpaqueSigner: + alg = jose.SignatureAlgorithm(pk.Public().Algorithm) + default: + return nil, fmt.Errorf("unknown private key type %T, must be *rsa.PrivateKey, *ecdsa.PrivateKey, or jose.OpaqueSigner", privateKey) + } + + signer, err := jose.NewSigner( + jose.SigningKey{ + Algorithm: alg, + Key: privateKey, + }, + nil, + ) + if err != nil { + return nil, err + } + return &jwtTokenGenerator{ + iss: iss, + signer: signer, + }, nil +} + +type jwtTokenGenerator struct { + iss string + signer jose.Signer +} + +func (j *jwtTokenGenerator) GenerateToken(claims *jwt.Claims, privateClaims interface{}) (string, error) { + // claims are applied in reverse precedence + return jwt.Signed(j.signer). + Claims(privateClaims). + Claims(claims). + Claims(&jwt.Claims{ + Issuer: j.iss, + }). + CompactSerialize() +} + +// JWTTokenAuthenticator authenticates tokens as JWT tokens produced by JWTTokenGenerator +// Token signatures are verified using each of the given public keys until one works (allowing key rotation) +// If lookup is true, the service account and secret referenced as claims inside the token are retrieved and verified with the provided ServiceAccountTokenGetter +func JWTTokenAuthenticator(iss string, keys []interface{}, implicitAuds authenticator.Audiences, validator Validator) authenticator.Token { + return &jwtTokenAuthenticator{ + iss: iss, + keys: keys, + implicitAuds: implicitAuds, + validator: validator, + } +} + +type jwtTokenAuthenticator struct { + iss string + keys []interface{} + validator Validator + implicitAuds authenticator.Audiences +} + +// Validator is called by the JWT token authenticator to apply domain specific +// validation to a token and extract user information. +type Validator interface { + // Validate validates a token and returns user information or an error. + // Validator can assume that the issuer and signature of a token are already + // verified when this function is called. + Validate(tokenData string, public *jwt.Claims, private interface{}) (*ServiceAccountInfo, error) + // NewPrivateClaims returns a struct that the authenticator should + // deserialize the JWT payload into. The authenticator may then pass this + // struct back to the Validator as the 'private' argument to a Validate() + // call. This struct should contain fields for any private claims that the + // Validator requires to validate the JWT. + NewPrivateClaims() interface{} +} + +func (j *jwtTokenAuthenticator) AuthenticateToken(ctx context.Context, tokenData string) (*authenticator.Response, bool, error) { + if !j.hasCorrectIssuer(tokenData) { + return nil, false, nil + } + + tok, err := jwt.ParseSigned(tokenData) + if err != nil { + return nil, false, nil + } + + public := &jwt.Claims{} + private := j.validator.NewPrivateClaims() + + var ( + found bool + errlist []error + ) + for _, key := range j.keys { + if err := tok.Claims(key, public, private); err != nil { + errlist = append(errlist, err) + continue + } + found = true + break + } + + if !found { + return nil, false, utilerrors.NewAggregate(errlist) + } + + tokenAudiences := authenticator.Audiences(public.Audience) + if len(tokenAudiences) == 0 { + // only apiserver audiences are allowed for legacy tokens + tokenAudiences = j.implicitAuds + } + + requestedAudiences, ok := authenticator.AudiencesFrom(ctx) + if !ok { + // default to apiserver audiences + requestedAudiences = j.implicitAuds + } + + auds := authenticator.Audiences(tokenAudiences).Intersect(requestedAudiences) + if len(auds) == 0 && len(j.implicitAuds) != 0 { + return nil, false, fmt.Errorf("token audiences %q is invalid for the target audiences %q", tokenAudiences, requestedAudiences) + } + + // If we get here, we have a token with a recognized signature and + // issuer string. + sa, err := j.validator.Validate(tokenData, public, private) + if err != nil { + return nil, false, err + } + + return &authenticator.Response{ + User: sa.UserInfo(), + Audiences: auds, + }, true, nil +} + +// hasCorrectIssuer returns true if tokenData is a valid JWT in compact +// serialization format and the "iss" claim matches the iss field of this token +// authenticator, and otherwise returns false. +// +// Note: go-jose currently does not allow access to unverified JWS payloads. +// See https://github.com/square/go-jose/issues/169 +func (j *jwtTokenAuthenticator) hasCorrectIssuer(tokenData string) bool { + parts := strings.Split(tokenData, ".") + if len(parts) != 3 { + return false + } + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) + if err != nil { + return false + } + claims := struct { + // WARNING: this JWT is not verified. Do not trust these claims. + Issuer string `json:"iss"` + }{} + if err := json.Unmarshal(payload, &claims); err != nil { + return false + } + if claims.Issuer != j.iss { + return false + } + return true + +} diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go new file mode 100644 index 000000000..57c482f0b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/legacy.go @@ -0,0 +1,139 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "bytes" + "errors" + "fmt" + + "gopkg.in/square/go-jose.v2/jwt" + "k8s.io/klog" + + "k8s.io/api/core/v1" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" +) + +func LegacyClaims(serviceAccount v1.ServiceAccount, secret v1.Secret) (*jwt.Claims, interface{}) { + return &jwt.Claims{ + Subject: apiserverserviceaccount.MakeUsername(serviceAccount.Namespace, serviceAccount.Name), + }, &legacyPrivateClaims{ + Namespace: serviceAccount.Namespace, + ServiceAccountName: serviceAccount.Name, + ServiceAccountUID: string(serviceAccount.UID), + SecretName: secret.Name, + } +} + +const LegacyIssuer = "kubernetes/serviceaccount" + +type legacyPrivateClaims struct { + ServiceAccountName string `json:"kubernetes.io/serviceaccount/service-account.name"` + ServiceAccountUID string `json:"kubernetes.io/serviceaccount/service-account.uid"` + SecretName string `json:"kubernetes.io/serviceaccount/secret.name"` + Namespace string `json:"kubernetes.io/serviceaccount/namespace"` +} + +func NewLegacyValidator(lookup bool, getter ServiceAccountTokenGetter) Validator { + return &legacyValidator{ + lookup: lookup, + getter: getter, + } +} + +type legacyValidator struct { + lookup bool + getter ServiceAccountTokenGetter +} + +var _ = Validator(&legacyValidator{}) + +func (v *legacyValidator) Validate(tokenData string, public *jwt.Claims, privateObj interface{}) (*ServiceAccountInfo, error) { + private, ok := privateObj.(*legacyPrivateClaims) + if !ok { + klog.Errorf("jwt validator expected private claim of type *legacyPrivateClaims but got: %T", privateObj) + return nil, errors.New("Token could not be validated.") + } + + // Make sure the claims we need exist + if len(public.Subject) == 0 { + return nil, errors.New("sub claim is missing") + } + namespace := private.Namespace + if len(namespace) == 0 { + return nil, errors.New("namespace claim is missing") + } + secretName := private.SecretName + if len(secretName) == 0 { + return nil, errors.New("secretName claim is missing") + } + serviceAccountName := private.ServiceAccountName + if len(serviceAccountName) == 0 { + return nil, errors.New("serviceAccountName claim is missing") + } + serviceAccountUID := private.ServiceAccountUID + if len(serviceAccountUID) == 0 { + return nil, errors.New("serviceAccountUID claim is missing") + } + + subjectNamespace, subjectName, err := apiserverserviceaccount.SplitUsername(public.Subject) + if err != nil || subjectNamespace != namespace || subjectName != serviceAccountName { + return nil, errors.New("sub claim is invalid") + } + + if v.lookup { + // Make sure token hasn't been invalidated by deletion of the secret + secret, err := v.getter.GetSecret(namespace, secretName) + if err != nil { + klog.V(4).Infof("Could not retrieve token %s/%s for service account %s/%s: %v", namespace, secretName, namespace, serviceAccountName, err) + return nil, errors.New("Token has been invalidated") + } + if secret.DeletionTimestamp != nil { + klog.V(4).Infof("Token is deleted and awaiting removal: %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + return nil, errors.New("Token has been invalidated") + } + if bytes.Compare(secret.Data[v1.ServiceAccountTokenKey], []byte(tokenData)) != 0 { + klog.V(4).Infof("Token contents no longer matches %s/%s for service account %s/%s", namespace, secretName, namespace, serviceAccountName) + return nil, errors.New("Token does not match server's copy") + } + + // Make sure service account still exists (name and UID) + serviceAccount, err := v.getter.GetServiceAccount(namespace, serviceAccountName) + if err != nil { + klog.V(4).Infof("Could not retrieve service account %s/%s: %v", namespace, serviceAccountName, err) + return nil, err + } + if serviceAccount.DeletionTimestamp != nil { + klog.V(4).Infof("Service account has been deleted %s/%s", namespace, serviceAccountName) + return nil, fmt.Errorf("ServiceAccount %s/%s has been deleted", namespace, serviceAccountName) + } + if string(serviceAccount.UID) != serviceAccountUID { + klog.V(4).Infof("Service account UID no longer matches %s/%s: %q != %q", namespace, serviceAccountName, string(serviceAccount.UID), serviceAccountUID) + return nil, fmt.Errorf("ServiceAccount UID (%s) does not match claim (%s)", serviceAccount.UID, serviceAccountUID) + } + } + + return &ServiceAccountInfo{ + Namespace: private.Namespace, + Name: private.ServiceAccountName, + UID: private.ServiceAccountUID, + }, nil +} + +func (v *legacyValidator) NewPrivateClaims() interface{} { + return &legacyPrivateClaims{} +} diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go new file mode 100644 index 000000000..9f0a7a468 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "k8s.io/api/core/v1" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" +) + +const ( + // PodNameKey is the key used in a user's "extra" to specify the pod name of + // the authenticating request. + PodNameKey = "authentication.kubernetes.io/pod-name" + // PodUIDKey is the key used in a user's "extra" to specify the pod UID of + // the authenticating request. + PodUIDKey = "authentication.kubernetes.io/pod-uid" +) + +// UserInfo returns a user.Info interface for the given namespace, service account name and UID +func UserInfo(namespace, name, uid string) user.Info { + return (&ServiceAccountInfo{ + Name: name, + Namespace: namespace, + UID: uid, + }).UserInfo() +} + +type ServiceAccountInfo struct { + Name, Namespace, UID string + PodName, PodUID string +} + +func (sa *ServiceAccountInfo) UserInfo() user.Info { + info := &user.DefaultInfo{ + Name: apiserverserviceaccount.MakeUsername(sa.Namespace, sa.Name), + UID: sa.UID, + Groups: apiserverserviceaccount.MakeGroupNames(sa.Namespace), + } + if sa.PodName != "" && sa.PodUID != "" { + info.Extra = map[string][]string{ + PodNameKey: {sa.PodName}, + PodUIDKey: {sa.PodUID}, + } + } + return info +} + +// IsServiceAccountToken returns true if the secret is a valid api token for the service account +func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { + if secret.Type != v1.SecretTypeServiceAccountToken { + return false + } + + name := secret.Annotations[v1.ServiceAccountNameKey] + uid := secret.Annotations[v1.ServiceAccountUIDKey] + if name != sa.Name { + // Name must match + return false + } + if len(uid) > 0 && uid != string(sa.UID) { + // If UID is specified, it must match + return false + } + + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/file/file.go b/vendor/k8s.io/kubernetes/pkg/util/file/file.go new file mode 100644 index 000000000..70d26c4ef --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/file/file.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package file + +import ( + "os" +) + +// FileExists checks if specified file exists. +func FileExists(filename string) (bool, error) { + if _, err := os.Stat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// FileOrSymlinkExists checks if specified file or symlink exists. +func FileOrSymlinkExists(filename string) (bool, error) { + if _, err := os.Lstat(filename); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} + +// ReadDirNoStat returns a string of files/directories contained +// in dirname without calling lstat on them. +func ReadDirNoStat(dirname string) ([]string, error) { + if dirname == "" { + dirname = "." + } + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + return f.Readdirnames(-1) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go new file mode 100644 index 000000000..803f066a4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/hash/hash.go @@ -0,0 +1,37 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hash + +import ( + "hash" + + "github.com/davecgh/go-spew/spew" +) + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { + hasher.Reset() + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + printer.Fprintf(hasher, "%#v", objectToWrite) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go b/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go new file mode 100644 index 000000000..f1bb7773e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/metrics/util.go @@ -0,0 +1,76 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "k8s.io/client-go/util/flowcontrol" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + metricsLock sync.Mutex + rateLimiterMetrics = make(map[string]*rateLimiterMetric) +) + +type rateLimiterMetric struct { + metric prometheus.Gauge + stopCh chan struct{} +} + +func registerRateLimiterMetric(ownerName string) error { + metricsLock.Lock() + defer metricsLock.Unlock() + + if _, ok := rateLimiterMetrics[ownerName]; ok { + // only register once in Prometheus. We happen to see an ownerName reused in parallel integration tests. + return nil + } + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "rate_limiter_use", + Subsystem: ownerName, + Help: fmt.Sprintf("A metric measuring the saturation of the rate limiter for %v", ownerName), + }) + if err := prometheus.Register(metric); err != nil { + return fmt.Errorf("error registering rate limiter usage metric: %v", err) + } + stopCh := make(chan struct{}) + rateLimiterMetrics[ownerName] = &rateLimiterMetric{ + metric: metric, + stopCh: stopCh, + } + return nil +} + +// RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track +// how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod +func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error { + if err := registerRateLimiterMetric(ownerName); err != nil { + return err + } + // TODO: determine how to track rate limiter saturation + // See discussion at https://go-review.googlesource.com/c/time/+/29958#message-4caffc11669cadd90e2da4c05122cfec50ea6a22 + // go wait.Until(func() { + // metricsLock.Lock() + // defer metricsLock.Unlock() + // rateLimiterMetrics[ownerName].metric.Set() + // }, updatePeriod, rateLimiterMetrics[ownerName].stopCh) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go b/vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go new file mode 100644 index 000000000..8414f74ac --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/net/sets/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains hand-coded set implementations that should be similar +// to the autogenerated ones in pkg/util/sets. +// We can't simply use net.IPNet as a map-key in Go (because it contains a +// []byte). +// We could use the same workaround we use here (a string representation as the +// key) to autogenerate sets. If we do that, or decide on an alternate +// approach, we should replace the implementations in this package with the +// autogenerated versions. +// It is expected that callers will alias this import as "netsets" i.e. import +// netsets "k8s.io/kubernetes/pkg/util/net/sets" + +package sets diff --git a/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go b/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go new file mode 100644 index 000000000..90ad58c63 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/net/sets/ipnet.go @@ -0,0 +1,121 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "net" + "strings" +) + +// IPNet maps string to net.IPNet. +type IPNet map[string]*net.IPNet + +// ParseIPNets parses string slice to IPNet. +func ParseIPNets(specs ...string) (IPNet, error) { + ipnetset := make(IPNet) + for _, spec := range specs { + spec = strings.TrimSpace(spec) + _, ipnet, err := net.ParseCIDR(spec) + if err != nil { + return nil, err + } + k := ipnet.String() // In case of normalization + ipnetset[k] = ipnet + } + return ipnetset, nil +} + +// Insert adds items to the set. +func (s IPNet) Insert(items ...*net.IPNet) { + for _, item := range items { + s[item.String()] = item + } +} + +// Delete removes all items from the set. +func (s IPNet) Delete(items ...*net.IPNet) { + for _, item := range items { + delete(s, item.String()) + } +} + +// Has returns true if and only if item is contained in the set. +func (s IPNet) Has(item *net.IPNet) bool { + _, contained := s[item.String()] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s IPNet) HasAll(items ...*net.IPNet) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s IPNet) Difference(s2 IPNet) IPNet { + result := make(IPNet) + for k, i := range s { + _, found := s2[k] + if found { + continue + } + result[k] = i + } + return result +} + +// StringSlice returns a []string with the String representation of each element in the set. +// Order is undefined. +func (s IPNet) StringSlice() []string { + a := make([]string, 0, len(s)) + for k := range s { + a = append(a, k) + } + return a +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s IPNet) IsSuperset(s2 IPNet) bool { + for k := range s2 { + _, found := s[k] + if !found { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s IPNet) Equal(s2 IPNet) bool { + return len(s) == len(s2) && s.IsSuperset(s2) +} + +// Len returns the size of the set. +func (s IPNet) Len() int { + return len(s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go b/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go new file mode 100644 index 000000000..be35da7cc --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/parsers/parsers.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parsers + +import ( + "fmt" + // Import the crypto sha256 algorithm for the docker image parser to work + _ "crypto/sha256" + // Import the crypto/sha512 algorithm for the docker image parser to work with 384 and 512 sha hashes + _ "crypto/sha512" + + dockerref "github.com/docker/distribution/reference" +) + +const ( + DefaultImageTag = "latest" +) + +// ParseImageName parses a docker image string into three parts: repo, tag and digest. +// If both tag and digest are empty, a default image tag will be returned. +func ParseImageName(image string) (string, string, string, error) { + named, err := dockerref.ParseNormalizedNamed(image) + if err != nil { + return "", "", "", fmt.Errorf("couldn't parse image name: %v", err) + } + + repoToPull := named.Name() + var tag, digest string + + tagged, ok := named.(dockerref.Tagged) + if ok { + tag = tagged.Tag() + } + + digested, ok := named.(dockerref.Digested) + if ok { + digest = digested.Digest().String() + } + // If no tag was specified, use the default "latest". + if len(tag) == 0 && len(digest) == 0 { + tag = DefaultImageTag + } + return repoToPull, tag, digest, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go new file mode 100644 index 000000000..0777f6d69 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go @@ -0,0 +1,342 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package taints implements utilities for working with taints +package taints + +import ( + "fmt" + "strings" + + "k8s.io/api/core/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" +) + +const ( + MODIFIED = "modified" + TAINTED = "tainted" + UNTAINTED = "untainted" +) + +// parseTaint parses a taint from a string. Taint must be of the format '=:'. +func parseTaint(st string) (v1.Taint, error) { + var taint v1.Taint + parts := strings.Split(st, "=") + if len(parts) != 2 || len(parts[1]) == 0 || len(validation.IsQualifiedName(parts[0])) > 0 { + return taint, fmt.Errorf("invalid taint spec: %v", st) + } + + parts2 := strings.Split(parts[1], ":") + + errs := validation.IsValidLabelValue(parts2[0]) + if len(parts2) != 2 || len(errs) != 0 { + return taint, fmt.Errorf("invalid taint spec: %v, %s", st, strings.Join(errs, "; ")) + } + + effect := v1.TaintEffect(parts2[1]) + if err := validateTaintEffect(effect); err != nil { + return taint, err + } + + taint.Key = parts[0] + taint.Value = parts2[0] + taint.Effect = effect + + return taint, nil +} + +func validateTaintEffect(effect v1.TaintEffect) error { + if effect != v1.TaintEffectNoSchedule && effect != v1.TaintEffectPreferNoSchedule && effect != v1.TaintEffectNoExecute { + return fmt.Errorf("invalid taint effect: %v, unsupported taint effect", effect) + } + + return nil +} + +// NewTaintsVar wraps []api.Taint in a struct that implements flag.Value to allow taints to be +// bound to command line flags. +func NewTaintsVar(ptr *[]api.Taint) taintsVar { + return taintsVar{ + ptr: ptr, + } +} + +type taintsVar struct { + ptr *[]api.Taint +} + +func (t taintsVar) Set(s string) error { + if len(s) == 0 { + *t.ptr = nil + return nil + } + sts := strings.Split(s, ",") + var taints []api.Taint + for _, st := range sts { + taint, err := parseTaint(st) + if err != nil { + return err + } + taints = append(taints, api.Taint{Key: taint.Key, Value: taint.Value, Effect: api.TaintEffect(taint.Effect)}) + } + *t.ptr = taints + return nil +} + +func (t taintsVar) String() string { + if len(*t.ptr) == 0 { + return "" + } + var taints []string + for _, taint := range *t.ptr { + taints = append(taints, fmt.Sprintf("%s=%s:%s", taint.Key, taint.Value, taint.Effect)) + } + return strings.Join(taints, ",") +} + +func (t taintsVar) Type() string { + return "[]api.Taint" +} + +// ParseTaints takes a spec which is an array and creates slices for new taints to be added, taints to be deleted. +func ParseTaints(spec []string) ([]v1.Taint, []v1.Taint, error) { + var taints, taintsToRemove []v1.Taint + uniqueTaints := map[v1.TaintEffect]sets.String{} + + for _, taintSpec := range spec { + if strings.Index(taintSpec, "=") != -1 && strings.Index(taintSpec, ":") != -1 { + newTaint, err := parseTaint(taintSpec) + if err != nil { + return nil, nil, err + } + // validate if taint is unique by + if len(uniqueTaints[newTaint.Effect]) > 0 && uniqueTaints[newTaint.Effect].Has(newTaint.Key) { + return nil, nil, fmt.Errorf("duplicated taints with the same key and effect: %v", newTaint) + } + // add taint to existingTaints for uniqueness check + if len(uniqueTaints[newTaint.Effect]) == 0 { + uniqueTaints[newTaint.Effect] = sets.String{} + } + uniqueTaints[newTaint.Effect].Insert(newTaint.Key) + + taints = append(taints, newTaint) + } else if strings.HasSuffix(taintSpec, "-") { + taintKey := taintSpec[:len(taintSpec)-1] + var effect v1.TaintEffect + if strings.Index(taintKey, ":") != -1 { + parts := strings.Split(taintKey, ":") + taintKey = parts[0] + effect = v1.TaintEffect(parts[1]) + } + + // If effect is specified, need to validate it. + if len(effect) > 0 { + err := validateTaintEffect(effect) + if err != nil { + return nil, nil, err + } + } + taintsToRemove = append(taintsToRemove, v1.Taint{Key: taintKey, Effect: effect}) + } else { + return nil, nil, fmt.Errorf("unknown taint spec: %v", taintSpec) + } + } + return taints, taintsToRemove, nil +} + +// ReorganizeTaints returns the updated set of taints, taking into account old taints that were not updated, +// old taints that were updated, old taints that were deleted, and new taints. +func ReorganizeTaints(node *v1.Node, overwrite bool, taintsToAdd []v1.Taint, taintsToRemove []v1.Taint) (string, []v1.Taint, error) { + newTaints := append([]v1.Taint{}, taintsToAdd...) + oldTaints := node.Spec.Taints + // add taints that already existing but not updated to newTaints + added := addTaints(oldTaints, &newTaints) + allErrs, deleted := deleteTaints(taintsToRemove, &newTaints) + if (added && deleted) || overwrite { + return MODIFIED, newTaints, utilerrors.NewAggregate(allErrs) + } else if added { + return TAINTED, newTaints, utilerrors.NewAggregate(allErrs) + } + return UNTAINTED, newTaints, utilerrors.NewAggregate(allErrs) +} + +// deleteTaints deletes the given taints from the node's taintlist. +func deleteTaints(taintsToRemove []v1.Taint, newTaints *[]v1.Taint) ([]error, bool) { + allErrs := []error{} + var removed bool + for _, taintToRemove := range taintsToRemove { + removed = false + if len(taintToRemove.Effect) > 0 { + *newTaints, removed = DeleteTaint(*newTaints, &taintToRemove) + } else { + *newTaints, removed = DeleteTaintsByKey(*newTaints, taintToRemove.Key) + } + if !removed { + allErrs = append(allErrs, fmt.Errorf("taint %q not found", taintToRemove.ToString())) + } + } + return allErrs, removed +} + +// addTaints adds the newTaints list to existing ones and updates the newTaints List. +// TODO: This needs a rewrite to take only the new values instead of appended newTaints list to be consistent. +func addTaints(oldTaints []v1.Taint, newTaints *[]v1.Taint) bool { + for _, oldTaint := range oldTaints { + existsInNew := false + for _, taint := range *newTaints { + if taint.MatchTaint(&oldTaint) { + existsInNew = true + break + } + } + if !existsInNew { + *newTaints = append(*newTaints, oldTaint) + } + } + return len(oldTaints) != len(*newTaints) +} + +// CheckIfTaintsAlreadyExists checks if the node already has taints that we want to add and returns a string with taint keys. +func CheckIfTaintsAlreadyExists(oldTaints []v1.Taint, taints []v1.Taint) string { + var existingTaintList = make([]string, 0) + for _, taint := range taints { + for _, oldTaint := range oldTaints { + if taint.Key == oldTaint.Key && taint.Effect == oldTaint.Effect { + existingTaintList = append(existingTaintList, taint.Key) + } + } + } + return strings.Join(existingTaintList, ",") +} + +// DeleteTaintsByKey removes all the taints that have the same key to given taintKey +func DeleteTaintsByKey(taints []v1.Taint, taintKey string) ([]v1.Taint, bool) { + newTaints := []v1.Taint{} + deleted := false + for i := range taints { + if taintKey == taints[i].Key { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// DeleteTaint removes all the taints that have the same key and effect to given taintToDelete. +func DeleteTaint(taints []v1.Taint, taintToDelete *v1.Taint) ([]v1.Taint, bool) { + newTaints := []v1.Taint{} + deleted := false + for i := range taints { + if taintToDelete.MatchTaint(&taints[i]) { + deleted = true + continue + } + newTaints = append(newTaints, taints[i]) + } + return newTaints, deleted +} + +// RemoveTaint tries to remove a taint from annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func RemoveTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { + newNode := node.DeepCopy() + nodeTaints := newNode.Spec.Taints + if len(nodeTaints) == 0 { + return newNode, false, nil + } + + if !TaintExists(nodeTaints, taint) { + return newNode, false, nil + } + + newTaints, _ := DeleteTaint(nodeTaints, taint) + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +// AddOrUpdateTaint tries to add a taint to annotations list. Returns a new copy of updated Node and true if something was updated +// false otherwise. +func AddOrUpdateTaint(node *v1.Node, taint *v1.Taint) (*v1.Node, bool, error) { + newNode := node.DeepCopy() + nodeTaints := newNode.Spec.Taints + + var newTaints []v1.Taint + updated := false + for i := range nodeTaints { + if taint.MatchTaint(&nodeTaints[i]) { + if helper.Semantic.DeepEqual(*taint, nodeTaints[i]) { + return newNode, false, nil + } + newTaints = append(newTaints, *taint) + updated = true + continue + } + + newTaints = append(newTaints, nodeTaints[i]) + } + + if !updated { + newTaints = append(newTaints, *taint) + } + + newNode.Spec.Taints = newTaints + return newNode, true, nil +} + +// TaintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise. +func TaintExists(taints []v1.Taint, taintToFind *v1.Taint) bool { + for _, taint := range taints { + if taint.MatchTaint(taintToFind) { + return true + } + } + return false +} + +func TaintSetDiff(t1, t2 []v1.Taint) (taintsToAdd []*v1.Taint, taintsToRemove []*v1.Taint) { + for _, taint := range t1 { + if !TaintExists(t2, &taint) { + t := taint + taintsToAdd = append(taintsToAdd, &t) + } + } + + for _, taint := range t2 { + if !TaintExists(t1, &taint) { + t := taint + taintsToRemove = append(taintsToRemove, &t) + } + } + + return +} + +func TaintSetFilter(taints []v1.Taint, fn func(*v1.Taint) bool) []v1.Taint { + res := []v1.Taint{} + + for _, taint := range taints { + if fn(&taint) { + res = append(res, taint) + } + } + + return res +} diff --git a/vendor/k8s.io/kubernetes/third_party/forked/godep/license.go b/vendor/k8s.io/kubernetes/third_party/forked/godep/license.go new file mode 100644 index 000000000..5ce82496f --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/godep/license.go @@ -0,0 +1,59 @@ +package main + +import ( + "strings" +) + +// LicenseFilePrefix is a list of filename prefixes that indicate it +// might contain a software license +var LicenseFilePrefix = []string{ + "licence", // UK spelling + "license", // US spelling + "copying", + "unlicense", + "copyright", + "copyleft", + "authors", + "contributors", +} + +// LegalFileSubstring are substrings that indicate the file is likely +// to contain some type of legal declaration. "legal" is often used +// that it might moved to LicenseFilePrefix +var LegalFileSubstring = []string{ + "legal", + "notice", + "disclaimer", + "patent", + "third-party", + "thirdparty", +} + +// IsLicenseFile returns true if the filename might be contain a +// software license +func IsLicenseFile(filename string) bool { + lowerfile := strings.ToLower(filename) + for _, prefix := range LicenseFilePrefix { + if strings.HasPrefix(lowerfile, prefix) { + return true + } + } + return false +} + +// IsLegalFile returns true if the file is likely to contain some type +// of of legal declaration or licensing information +func IsLegalFile(filename string) bool { + lowerfile := strings.ToLower(filename) + for _, prefix := range LicenseFilePrefix { + if strings.HasPrefix(lowerfile, prefix) { + return true + } + } + for _, substring := range LegalFileSubstring { + if strings.Contains(lowerfile, substring) { + return true + } + } + return false +} diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/k8s.io/utils/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/utils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go new file mode 100644 index 000000000..a11a540f4 --- /dev/null +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -0,0 +1,86 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pointer + +import ( + "fmt" + "reflect" +) + +// AllPtrFieldsNil tests whether all pointer fields in a struct are nil. This is useful when, +// for example, an API struct is handled by plugins which need to distinguish +// "no plugin accepted this spec" from "this spec is empty". +// +// This function is only valid for structs and pointers to structs. Any other +// type will cause a panic. Passing a typed nil pointer will return true. +func AllPtrFieldsNil(obj interface{}) bool { + v := reflect.ValueOf(obj) + if !v.IsValid() { + panic(fmt.Sprintf("reflect.ValueOf() produced a non-valid Value for %#v", obj)) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return true + } + v = v.Elem() + } + for i := 0; i < v.NumField(); i++ { + if v.Field(i).Kind() == reflect.Ptr && !v.Field(i).IsNil() { + return false + } + } + return true +} + +// Int32Ptr returns a pointer to an int32 +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64Ptr returns a pointer to an int64 +func Int64Ptr(i int64) *int64 { + return &i +} + +// Int32PtrDerefOr dereference the int32 ptr and returns it i not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} + +// BoolPtr returns a pointer to a bool +func BoolPtr(b bool) *bool { + return &b +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/k8s.io/utils/third_party/forked/golang/LICENSE b/vendor/k8s.io/utils/third_party/forked/golang/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/k8s.io/utils/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/utils/third_party/forked/golang/PATENTS b/vendor/k8s.io/utils/third_party/forked/golang/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/k8s.io/utils/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/sigs.k8s.io/application/pkg/controller/application/application_controller.go b/vendor/sigs.k8s.io/application/pkg/controller/application/application_controller.go new file mode 100644 index 000000000..3cec2d4ae --- /dev/null +++ b/vendor/sigs.k8s.io/application/pkg/controller/application/application_controller.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package application + +import ( + appv1beta1 "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1" + reconciler "github.com/kubernetes-sigs/application/pkg/genericreconciler" + kbc "github.com/kubernetes-sigs/application/pkg/kbcontroller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// Constants +const ( + NameLabelKey = "app.kubernetes.io/name" + VersionLabelKey = "app.kubernetes.io/version" + InstanceLabelKey = "app.kubernetes.io/instance" + PartOfLabelKey = "app.kubernetes.io/part-of" + ComponentLabelKey = "app.kubernetes.io/component" + ManagedByLabelKey = "app.kubernetes.io/managed-by" +) + +// Add creates a new Application Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller +// and Start it when the Manager is Started. +func Add(mgr manager.Manager) error { + return kbc.CreateController("application", mgr, &appv1beta1.Application{}, newReconciler(mgr)) +} + +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager) reconcile.Reconciler { + r := &reconciler.Reconciler{ + Manager: mgr, // why do we need manager ? + Handle: &appv1beta1.Application{}, + } + r.Init() + return r +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go deleted file mode 100644 index d918eeaa4..000000000 --- a/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllerutil - -import ( - "context" - "fmt" - "reflect" - - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" -) - -// AlreadyOwnedError is an error returned if the object you are trying to assign -// a controller reference is already owned by another controller Object is the -// subject and Owner is the reference for the current owner -type AlreadyOwnedError struct { - Object v1.Object - Owner v1.OwnerReference -} - -func (e *AlreadyOwnedError) Error() string { - return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name) -} - -func newAlreadyOwnedError(Object v1.Object, Owner v1.OwnerReference) *AlreadyOwnedError { - return &AlreadyOwnedError{ - Object: Object, - Owner: Owner, - } -} - -// SetControllerReference sets owner as a Controller OwnerReference on owned. -// This is used for garbage collection of the owned object and for -// reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner). -// Since only one OwnerReference can be a controller, it returns an error if -// there is another OwnerReference with Controller flag set. -func SetControllerReference(owner, object v1.Object, scheme *runtime.Scheme) error { - ro, ok := owner.(runtime.Object) - if !ok { - return fmt.Errorf("is not a %T a runtime.Object, cannot call SetControllerReference", owner) - } - - gvk, err := apiutil.GVKForObject(ro, scheme) - if err != nil { - return err - } - - // Create a new ref - ref := *v1.NewControllerRef(owner, schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind}) - - existingRefs := object.GetOwnerReferences() - fi := -1 - for i, r := range existingRefs { - if referSameObject(ref, r) { - fi = i - } else if r.Controller != nil && *r.Controller { - return newAlreadyOwnedError(object, r) - } - } - if fi == -1 { - existingRefs = append(existingRefs, ref) - } else { - existingRefs[fi] = ref - } - - // Update owner references - object.SetOwnerReferences(existingRefs) - return nil -} - -// Returns true if a and b point to the same object -func referSameObject(a, b v1.OwnerReference) bool { - aGV, err := schema.ParseGroupVersion(a.APIVersion) - if err != nil { - return false - } - - bGV, err := schema.ParseGroupVersion(b.APIVersion) - if err != nil { - return false - } - - return aGV == bGV && a.Kind == b.Kind && a.Name == b.Name -} - -// OperationResult is the action result of a CreateOrUpdate call -type OperationResult string - -const ( // They should complete the sentence "Deployment default/foo has been ..." - // OperationResultNone means that the resource has not been changed - OperationResultNone OperationResult = "unchanged" - // OperationResultCreated means that a new resource is created - OperationResultCreated OperationResult = "created" - // OperationResultUpdated means that an existing resource is updated - OperationResultUpdated OperationResult = "updated" -) - -// CreateOrUpdate creates or updates the given object obj in the Kubernetes -// cluster. The object's desired state should be reconciled with the existing -// state using the passed in ReconcileFn. obj must be a struct pointer so that -// obj can be updated with the content returned by the Server. -// -// It returns the executed operation and an error. -func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f MutateFn) (OperationResult, error) { - // op is the operation we are going to attempt - op := OperationResultNone - - // get the existing object meta - metaObj, ok := obj.(v1.Object) - if !ok { - return OperationResultNone, fmt.Errorf("%T does not implement metav1.Object interface", obj) - } - - // retrieve the existing object - key := client.ObjectKey{ - Name: metaObj.GetName(), - Namespace: metaObj.GetNamespace(), - } - err := c.Get(ctx, key, obj) - - // reconcile the existing object - existing := obj.DeepCopyObject() - existingObjMeta := existing.(v1.Object) - existingObjMeta.SetName(metaObj.GetName()) - existingObjMeta.SetNamespace(metaObj.GetNamespace()) - - if e := f(obj); e != nil { - return OperationResultNone, e - } - - if metaObj.GetName() != existingObjMeta.GetName() { - return OperationResultNone, fmt.Errorf("ReconcileFn cannot mutate objects name") - } - - if metaObj.GetNamespace() != existingObjMeta.GetNamespace() { - return OperationResultNone, fmt.Errorf("ReconcileFn cannot mutate objects namespace") - } - - if errors.IsNotFound(err) { - err = c.Create(ctx, obj) - op = OperationResultCreated - } else if err == nil { - if reflect.DeepEqual(existing, obj) { - return OperationResultNone, nil - } - err = c.Update(ctx, obj) - op = OperationResultUpdated - } else { - return OperationResultNone, err - } - - if err != nil { - op = OperationResultNone - } - return op, err -} - -// MutateFn is a function which mutates the existing object into it's desired state. -type MutateFn func(existing runtime.Object) error