From cfaeb10dc651b5a02d0b925e2354c3ef59b8a085 Mon Sep 17 00:00:00 2001 From: zryfish Date: Tue, 7 Apr 2020 19:33:45 +0800 Subject: [PATCH 01/12] add cluster api group (#1992) --- Makefile | 1 + api/api-rules/violation_exceptions.list | 38 +- api/openapi-spec/swagger.json | 2205 +++ .../bases/cluster.kubesphere.io_agents.yaml | 109 + .../bases/cluster.kubesphere.io_clusters.yaml | 81 + .../devops.kubesphere.io_s2ibinaries.yaml | 86 + .../devops.kubesphere.io_s2ibuilders.yaml | 578 + ...ops.kubesphere.io_s2ibuildertemplates.yaml | 141 + .../bases/devops.kubesphere.io_s2iruns.yaml | 181 + config/crd/bases/iam.kubesphere.io_users.yaml | 117 + ...ubesphere.io_namespacenetworkpolicies.yaml | 762 + ...ubesphere.io_workspacenetworkpolicies.yaml | 535 + ...icemesh.kubesphere.io_servicepolicies.yaml | 1605 ++ .../servicemesh.kubesphere.io_strategies.yaml | 1166 ++ .../tenant.kubesphere.io_workspaces.yaml | 54 + hack/generate_client.sh | 2 +- ...ha1.go => addtoscheme_cluster_v1alpha1.go} | 2 +- pkg/apis/cluster/group.go | 1 + .../v1alpha1/agent_types.go | 11 +- pkg/apis/cluster/v1alpha1/cluster_types.go | 75 + pkg/apis/cluster/v1alpha1/doc.go | 8 + .../cluster/v1alpha1/openapi_generated.go | 13697 ++++++++++++++++ .../{tower => cluster}/v1alpha1/register.go | 4 +- .../v1alpha1/zz_generated.deepcopy.go | 102 +- pkg/apis/iam/v1alpha2/user_types.go | 9 +- .../iam/v1alpha2/zz_generated.deepcopy.go | 7 +- .../v1alpha2/servicepolicy_types.go | 20 +- .../servicemesh/v1alpha2/strategy_types.go | 18 +- pkg/apis/tower/group.go | 1 - pkg/apis/tower/v1alpha1/doc.go | 8 - pkg/apiserver/apiserver.go | 2 +- pkg/apiserver/dispatch/dispatch.go | 10 +- pkg/client/clientset/versioned/clientset.go | 28 +- .../versioned/fake/clientset_generated.go | 14 +- .../clientset/versioned/fake/register.go | 4 +- .../clientset/versioned/scheme/register.go | 4 +- .../{tower => cluster}/v1alpha1/agent.go | 17 +- .../typed/cluster/v1alpha1/cluster.go | 180 + .../v1alpha1/cluster_client.go} | 35 +- .../typed/{tower => cluster}/v1alpha1/doc.go | 0 .../{tower => cluster}/v1alpha1/fake/doc.go | 0 .../v1alpha1/fake/fake_agent.go | 35 +- .../cluster/v1alpha1/fake/fake_cluster.go | 131 + .../v1alpha1/fake/fake_cluster_client.go} | 14 +- .../v1alpha1/generated_expansion.go | 2 + .../{tower => cluster}/interface.go | 4 +- .../{tower => cluster}/v1alpha1/agent.go | 21 +- .../cluster/v1alpha1/cluster.go | 88 + .../{tower => cluster}/v1alpha1/interface.go | 9 +- .../informers/externalversions/factory.go | 12 +- .../informers/externalversions/generic.go | 24 +- .../{tower => cluster}/v1alpha1/agent.go | 41 +- .../listers/cluster/v1alpha1/cluster.go | 65 + .../v1alpha1/expansion_generated.go | 6 +- pkg/controller/cluster/cluster_controller.go | 230 + .../cluster/cluster_controller_test.go | 1 + tools/cmd/crd-doc-gen/main.go | 16 + 57 files changed, 22388 insertions(+), 229 deletions(-) create mode 100644 config/crd/bases/cluster.kubesphere.io_agents.yaml create mode 100644 config/crd/bases/cluster.kubesphere.io_clusters.yaml create mode 100644 config/crd/bases/devops.kubesphere.io_s2ibinaries.yaml create mode 100644 config/crd/bases/devops.kubesphere.io_s2ibuilders.yaml create mode 100644 config/crd/bases/devops.kubesphere.io_s2ibuildertemplates.yaml create mode 100644 config/crd/bases/devops.kubesphere.io_s2iruns.yaml create mode 100644 config/crd/bases/iam.kubesphere.io_users.yaml create mode 100644 config/crd/bases/network.kubesphere.io_namespacenetworkpolicies.yaml create mode 100644 config/crd/bases/network.kubesphere.io_workspacenetworkpolicies.yaml create mode 100644 config/crd/bases/servicemesh.kubesphere.io_servicepolicies.yaml create mode 100644 config/crd/bases/servicemesh.kubesphere.io_strategies.yaml create mode 100644 config/crd/bases/tenant.kubesphere.io_workspaces.yaml rename pkg/apis/{addtoscheme_tower_v1alpha1.go => addtoscheme_cluster_v1alpha1.go} (68%) create mode 100644 pkg/apis/cluster/group.go rename pkg/apis/{tower => cluster}/v1alpha1/agent_types.go (94%) create mode 100644 pkg/apis/cluster/v1alpha1/cluster_types.go create mode 100644 pkg/apis/cluster/v1alpha1/doc.go create mode 100644 pkg/apis/cluster/v1alpha1/openapi_generated.go rename pkg/apis/{tower => cluster}/v1alpha1/register.go (90%) rename pkg/apis/{tower => cluster}/v1alpha1/zz_generated.deepcopy.go (59%) delete mode 100644 pkg/apis/tower/group.go delete mode 100644 pkg/apis/tower/v1alpha1/doc.go rename pkg/client/clientset/versioned/typed/{tower => cluster}/v1alpha1/agent.go (93%) create mode 100644 pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster.go rename pkg/client/clientset/versioned/typed/{tower/v1alpha1/tower_client.go => cluster/v1alpha1/cluster_client.go} (62%) rename pkg/client/clientset/versioned/typed/{tower => cluster}/v1alpha1/doc.go (100%) rename pkg/client/clientset/versioned/typed/{tower => cluster}/v1alpha1/fake/doc.go (100%) rename pkg/client/clientset/versioned/typed/{tower => cluster}/v1alpha1/fake/fake_agent.go (76%) create mode 100644 pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster.go rename pkg/client/clientset/versioned/typed/{tower/v1alpha1/fake/fake_tower_client.go => cluster/v1alpha1/fake/fake_cluster_client.go} (75%) rename pkg/client/clientset/versioned/typed/{tower => cluster}/v1alpha1/generated_expansion.go (95%) rename pkg/client/informers/externalversions/{tower => cluster}/interface.go (97%) rename pkg/client/informers/externalversions/{tower => cluster}/v1alpha1/agent.go (71%) create mode 100644 pkg/client/informers/externalversions/cluster/v1alpha1/cluster.go rename pkg/client/informers/externalversions/{tower => cluster}/v1alpha1/interface.go (81%) rename pkg/client/listers/{tower => cluster}/v1alpha1/agent.go (54%) create mode 100644 pkg/client/listers/cluster/v1alpha1/cluster.go rename pkg/client/listers/{tower => cluster}/v1alpha1/expansion_generated.go (84%) create mode 100644 pkg/controller/cluster/cluster_controller.go create mode 100644 pkg/controller/cluster/cluster_controller_test.go diff --git a/Makefile b/Makefile index 16cfa112d..56684a95b 100644 --- a/Makefile +++ b/Makefile @@ -83,6 +83,7 @@ openapi: go run ./vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go -O openapi_generated -i ./vendor/k8s.io/apimachinery/pkg/apis/meta/v1,./pkg/apis/servicemesh/v1alpha2 -p kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2 -h ./hack/boilerplate.go.txt --report-filename ./api/api-rules/violation_exceptions.list go run ./vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go -O openapi_generated -i ./vendor/k8s.io/api/networking/v1,./vendor/k8s.io/apimachinery/pkg/apis/meta/v1,./vendor/k8s.io/apimachinery/pkg/util/intstr,./pkg/apis/network/v1alpha1 -p kubesphere.io/kubesphere/pkg/apis/network/v1alpha1 -h ./hack/boilerplate.go.txt --report-filename ./api/api-rules/violation_exceptions.list go run ./vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go -O openapi_generated -i ./vendor/k8s.io/apimachinery/pkg/apis/meta/v1,./pkg/apis/devops/v1alpha1,./vendor/k8s.io/apimachinery/pkg/runtime,./vendor/k8s.io/api/core/v1 -p kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1 -h ./hack/boilerplate.go.txt --report-filename ./api/api-rules/violation_exceptions.list + go run ./vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go -O openapi_generated -i ./vendor/k8s.io/apimachinery/pkg/apis/meta/v1,./pkg/apis/cluster/v1alpha1,./vendor/k8s.io/apimachinery/pkg/runtime,./vendor/k8s.io/api/core/v1 -p kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1 -h ./hack/boilerplate.go.txt --report-filename ./api/api-rules/violation_exceptions.list go run ./tools/cmd/crd-doc-gen/main.go # Build the docker image docker-build: all diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index 57509feda..d315be3a9 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -141,27 +141,10 @@ API rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,Table API rule violation: list_type_missing,k8s.io/apimachinery/pkg/apis/meta/v1,UpdateOptions,DryRun API rule violation: list_type_missing,k8s.io/apimachinery/pkg/runtime,RawExtension,Raw API rule violation: list_type_missing,k8s.io/apimachinery/pkg/runtime,Unknown,Raw -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,ContainerConfig,Env -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,ContainerInfo,BuildVolumes -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,ContainerInfo,RuntimeArtifacts -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,Parameter,OptValues -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iAutoScale,Containers -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBinaryList,Items -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBuildResult,ImageRepoTags -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBuilderList,Items -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBuilderTemplateList,Items -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBuilderTemplateSpec,ContainerInfo -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBuilderTemplateSpec,Parameters -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,AddHost -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,BuildVolumes -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,DropCapabilities -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,Environment -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,Injections -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,NodeAffinityValues -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,RuntimeArtifacts -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,SecurityOpt -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iRunList,Items -API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,UserDefineTemplate,Parameters +API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1,AgentList,Items +API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1,AgentStatus,Conditions +API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1,AgentStatus,KubeConfig +API rule violation: list_type_missing,kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1,ClusterList,Items API rule violation: names_match,k8s.io/api/core/v1,AzureDiskVolumeSource,DataDiskURI API rule violation: names_match,k8s.io/api/core/v1,ContainerStatus,LastTerminationState API rule violation: names_match,k8s.io/api/core/v1,DaemonEndpoint,Port @@ -198,13 +181,6 @@ API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Time,Time API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentEncoding API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentType API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,Raw -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,ContainerConfig,Env -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,ContainerConfig,Labels -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,DockerConfig,Endpoint -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBinarySpec,MD5 -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iBuilderTemplateSpec,Parameters -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,CGroupLimits -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,CallbackURL -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,ImageScriptsURL -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,ScriptsURL -API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1,S2iConfig,SourceURL +API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1,AgentSpec,KubeSphereAPIServerPort +API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1,AgentSpec,Paused +API rule violation: names_match,kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1,AgentStatus,KubeConfig diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index ce706e6cf..ee43966b5 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -44,6 +44,1944 @@ } } }, + "/apis/cluster.kubesphere.io/": { + "get": { + "description": "get information of a group", + "consumes": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo" + ], + "operationId": "getClusterKubesphereIoAPIGroup", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup" + } + } + } + } + }, + "/apis/cluster.kubesphere.io/v1alpha1/": { + "get": { + "description": "get available resources", + "consumes": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "getClusterKubesphereIoV1alpha1APIResources", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList" + } + } + } + } + }, + "/apis/cluster.kubesphere.io/v1alpha1/agents": { + "get": { + "description": "list or watch objects of kind Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "listClusterKubesphereIoV1alpha1Agent", + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentList" + } + } + }, + "x-kubernetes-action": "list", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "post": { + "description": "create an Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "createClusterKubesphereIoV1alpha1Agent", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "name": "fieldManager", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + } + }, + "x-kubernetes-action": "post", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "delete": { + "description": "delete collection of Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "deleteClusterKubesphereIoV1alpha1CollectionAgent", + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "name": "gracePeriodSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "name": "orphanDependents", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "name": "propagationPolicy", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + }, + "x-kubernetes-action": "deletecollection", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/agents/{name}": { + "get": { + "description": "read the specified Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "readClusterKubesphereIoV1alpha1Agent", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "put": { + "description": "replace the specified Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "replaceClusterKubesphereIoV1alpha1Agent", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "name": "fieldManager", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + } + }, + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "delete": { + "description": "delete an Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "deleteClusterKubesphereIoV1alpha1Agent", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "name": "gracePeriodSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "name": "orphanDependents", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "name": "propagationPolicy", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + }, + "x-kubernetes-action": "delete", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "patch": { + "description": "partially update the specified Agent", + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json", + "application/apply-patch+yaml" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "patchClusterKubesphereIoV1alpha1Agent", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "name": "fieldManager", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "name": "force", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "string", + "description": "name of the Agent", + "name": "name", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/agents/{name}/status": { + "get": { + "description": "read status of the specified Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "readClusterKubesphereIoV1alpha1AgentStatus", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "put": { + "description": "replace status of the specified Agent", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "replaceClusterKubesphereIoV1alpha1AgentStatus", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "name": "fieldManager", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + } + }, + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "patch": { + "description": "partially update status of the specified Agent", + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json", + "application/apply-patch+yaml" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "patchClusterKubesphereIoV1alpha1AgentStatus", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "name": "fieldManager", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "name": "force", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "string", + "description": "name of the Agent", + "name": "name", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/clusters": { + "get": { + "description": "list or watch objects of kind Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "listClusterKubesphereIoV1alpha1Cluster", + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterList" + } + } + }, + "x-kubernetes-action": "list", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "post": { + "description": "create a Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "createClusterKubesphereIoV1alpha1Cluster", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "name": "fieldManager", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + } + }, + "x-kubernetes-action": "post", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "delete": { + "description": "delete collection of Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "deleteClusterKubesphereIoV1alpha1CollectionCluster", + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "name": "gracePeriodSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "name": "orphanDependents", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "name": "propagationPolicy", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + }, + "x-kubernetes-action": "deletecollection", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/clusters/{name}": { + "get": { + "description": "read the specified Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "readClusterKubesphereIoV1alpha1Cluster", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "put": { + "description": "replace the specified Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "replaceClusterKubesphereIoV1alpha1Cluster", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "name": "fieldManager", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + } + }, + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "delete": { + "description": "delete a Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "deleteClusterKubesphereIoV1alpha1Cluster", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "name": "gracePeriodSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "name": "orphanDependents", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "name": "propagationPolicy", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + }, + "x-kubernetes-action": "delete", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "patch": { + "description": "partially update the specified Cluster", + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json", + "application/apply-patch+yaml" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "patchClusterKubesphereIoV1alpha1Cluster", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "name": "fieldManager", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "name": "force", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "string", + "description": "name of the Cluster", + "name": "name", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/clusters/{name}/status": { + "get": { + "description": "read status of the specified Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "readClusterKubesphereIoV1alpha1ClusterStatus", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + } + }, + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "put": { + "description": "replace status of the specified Cluster", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "replaceClusterKubesphereIoV1alpha1ClusterStatus", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "name": "fieldManager", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + } + }, + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "patch": { + "description": "partially update status of the specified Cluster", + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json", + "application/apply-patch+yaml" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "patchClusterKubesphereIoV1alpha1ClusterStatus", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + { + "uniqueItems": true, + "type": "string", + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "name": "dryRun", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "name": "fieldManager", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "name": "force", + "in": "query" + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + } + }, + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "string", + "description": "name of the Cluster", + "name": "name", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/watch/agents": { + "get": { + "description": "watch individual changes to a list of Agent. deprecated: use the 'watch' parameter with a list operation instead.", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "watchClusterKubesphereIoV1alpha1AgentList", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + } + }, + "x-kubernetes-action": "watchlist", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/watch/agents/{name}": { + "get": { + "description": "watch changes to an object of kind Agent. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "watchClusterKubesphereIoV1alpha1Agent", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + } + }, + "x-kubernetes-action": "watch", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Agent" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "name of the Agent", + "name": "name", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/watch/clusters": { + "get": { + "description": "watch individual changes to a list of Cluster. deprecated: use the 'watch' parameter with a list operation instead.", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "watchClusterKubesphereIoV1alpha1ClusterList", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + } + }, + "x-kubernetes-action": "watchlist", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ] + }, + "/apis/cluster.kubesphere.io/v1alpha1/watch/clusters/{name}": { + "get": { + "description": "watch changes to an object of kind Cluster. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", + "consumes": [ + "*/*" + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "schemes": [ + "https" + ], + "tags": [ + "clusterKubesphereIo_v1alpha1" + ], + "operationId": "watchClusterKubesphereIoV1alpha1Cluster", + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + } + }, + "x-kubernetes-action": "watch", + "x-kubernetes-group-version-kind": { + "group": "cluster.kubesphere.io", + "version": "v1alpha1", + "kind": "Cluster" + } + }, + "parameters": [ + { + "uniqueItems": true, + "type": "boolean", + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + "name": "allowWatchBookmarks", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "name": "continue", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "name": "fieldSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "name": "labelSelector", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "name": "limit", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "name of the Cluster", + "name": "name", + "in": "path", + "required": true + }, + { + "uniqueItems": true, + "type": "string", + "description": "If 'true', then the output is pretty printed.", + "name": "pretty", + "in": "query" + }, + { + "uniqueItems": true, + "type": "string", + "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "name": "resourceVersion", + "in": "query" + }, + { + "uniqueItems": true, + "type": "integer", + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "name": "timeoutSeconds", + "in": "query" + }, + { + "uniqueItems": true, + "type": "boolean", + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "name": "watch", + "in": "query" + } + ] + }, "/apis/devops.kubesphere.io/": { "get": { "description": "get information of a group", @@ -6123,6 +8061,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "cluster.kubesphere.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "devops.kubesphere.io", "kind": "DeleteOptions", @@ -6554,6 +8497,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "cluster.kubesphere.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "devops.kubesphere.io", "kind": "WatchEvent", @@ -6585,6 +8533,263 @@ "type": "string", "format": "int-or-string" }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent": { + "description": "Agent is the Schema for the agents API", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentSpec" + }, + "status": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "cluster.kubesphere.io", + "kind": "Agent", + "version": "v1alpha1" + } + ] + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentCondition": { + "type": "object", + "required": [ + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastUpdateTime": { + "description": "The last time this condition was updated.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of AgentCondition", + "type": "string" + } + } + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentList": { + "description": "AgentList contains a list of Agent", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Agent" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "cluster.kubesphere.io", + "kind": "AgentList", + "version": "v1alpha1" + } + ] + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentSpec": { + "description": "AgentSpec defines the desired state of Agent", + "type": "object", + "properties": { + "Paused": { + "description": "Indicates that the agent is paused.", + "type": "boolean" + }, + "kubernetesAPIServerPort": { + "description": "KubeAPIServerPort is the port which listens for forwarding kube-apiserver traffic", + "type": "integer", + "format": "int32" + }, + "kubesphereAPIServerPort": { + "description": "KubeSphereAPIServerPort is the port which listens for forwarding kubesphere apigateway traffic", + "type": "integer", + "format": "int32" + }, + "proxy": { + "description": "Proxy address", + "type": "string" + }, + "token": { + "description": "Token used by agents to connect to proxy.", + "type": "string" + } + } + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentStatus": { + "description": "AgentStatus defines the observed state of Agent", + "type": "object", + "required": [ + "KubeConfig" + ], + "properties": { + "KubeConfig": { + "description": "Issued new kubeconfig by proxy server", + "type": "string", + "format": "byte" + }, + "conditions": { + "description": "Represents the latest available observations of a agent's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.AgentCondition" + } + }, + "ping": { + "description": "Represents the connection quality, in ms", + "type": "integer", + "format": "int64" + } + } + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster": { + "description": "Cluster is the schema for the clusters API", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterSpec" + }, + "status": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "cluster.kubesphere.io", + "kind": "Cluster", + "version": "v1alpha1" + } + ] + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterList": { + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.Cluster" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "cluster.kubesphere.io", + "kind": "ClusterList", + "version": "v1alpha1" + } + ] + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterSpec": { + "type": "object", + "properties": { + "federated": { + "description": "Join cluster as kubefed cluster", + "type": "boolean" + }, + "state": { + "description": "Desired state of the cluster", + "type": "string" + } + } + }, + "io.kubesphere.kubesphere.pkg.apis.cluster.v1alpha1.ClusterStatus": { + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastUpdateTime": { + "description": "The last time this condition was updated.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of the condition", + "type": "string" + } + } + }, "io.kubesphere.kubesphere.pkg.apis.devops.v1alpha1.AuthConfig": { "description": "AuthConfig is our abstraction of the Registry authorization information for whatever docker client we happen to be based on", "type": "object", diff --git a/config/crd/bases/cluster.kubesphere.io_agents.yaml b/config/crd/bases/cluster.kubesphere.io_agents.yaml new file mode 100644 index 000000000..e82bb6272 --- /dev/null +++ b/config/crd/bases/cluster.kubesphere.io_agents.yaml @@ -0,0 +1,109 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: agents.cluster.kubesphere.io +spec: + group: cluster.kubesphere.io + names: + kind: Agent + listKind: AgentList + plural: agents + singular: agent + scope: Namespaced + validation: + openAPIV3Schema: + description: Agent is the Schema for the agents API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AgentSpec defines the desired state of Agent + properties: + kubernetesAPIServerPort: + description: KubeAPIServerPort is the port which listens for forwarding + kube-apiserver traffic + type: integer + kubesphereAPIServerPort: + description: KubeSphereAPIServerPort is the port which listens for forwarding + kubesphere apigateway traffic + type: integer + paused: + description: Indicates that the agent is paused. + type: boolean + proxy: + description: Proxy address + type: string + token: + description: Token used by agents to connect to proxy. + type: string + type: object + status: + description: AgentStatus defines the observed state of Agent + properties: + conditions: + description: Represents the latest available observations of a agent's + current state. + items: + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + lastUpdateTime: + description: The last time this condition was updated. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of AgentCondition + type: string + required: + - status + type: object + type: array + kubeconfig: + description: Issued new kubeconfig by proxy server + format: byte + type: string + ping: + description: Represents the connection quality, in ms + format: int64 + type: integer + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/cluster.kubesphere.io_clusters.yaml b/config/crd/bases/cluster.kubesphere.io_clusters.yaml new file mode 100644 index 000000000..2992c8885 --- /dev/null +++ b/config/crd/bases/cluster.kubesphere.io_clusters.yaml @@ -0,0 +1,81 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: clusters.cluster.kubesphere.io +spec: + group: cluster.kubesphere.io + names: + kind: Cluster + listKind: ClusterList + plural: clusters + singular: cluster + scope: Namespaced + validation: + openAPIV3Schema: + description: Cluster is the schema for the clusters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + active: + description: Desired state of the cluster + type: boolean + federated: + description: Join cluster as kubefed cluster + type: boolean + type: object + status: + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to + another. + format: date-time + type: string + lastUpdateTime: + description: The last time this condition was updated. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of the condition + type: string + required: + - status + - type + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/devops.kubesphere.io_s2ibinaries.yaml b/config/crd/bases/devops.kubesphere.io_s2ibinaries.yaml new file mode 100644 index 000000000..01658d5f6 --- /dev/null +++ b/config/crd/bases/devops.kubesphere.io_s2ibinaries.yaml @@ -0,0 +1,86 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: s2ibinaries.devops.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.fileName + name: FileName + type: string + - JSONPath: .spec.md5 + name: MD5 + type: string + - JSONPath: .spec.size + name: Size + type: string + - JSONPath: .status.phase + name: Phase + type: string + group: devops.kubesphere.io + names: + kind: S2iBinary + listKind: S2iBinaryList + plural: s2ibinaries + singular: s2ibinary + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: S2iBinary is the Schema for the s2ibinaries API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S2iBinarySpec defines the desired state of S2iBinary + properties: + downloadURL: + description: DownloadURL in KubeSphere + type: string + fileName: + description: FileName is filename of binary + type: string + md5: + description: MD5 is Binary's MD5 Hash + type: string + size: + description: Size is the file size of file + type: string + uploadTimeStamp: + description: UploadTime is last upload time + format: date-time + type: string + type: object + status: + description: S2iBinaryStatus defines the observed state of S2iBinary + properties: + phase: + description: Phase is status of S2iBinary . Possible value is "Ready","UnableToDownload" + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/devops.kubesphere.io_s2ibuilders.yaml b/config/crd/bases/devops.kubesphere.io_s2ibuilders.yaml new file mode 100644 index 000000000..54df7a1cb --- /dev/null +++ b/config/crd/bases/devops.kubesphere.io_s2ibuilders.yaml @@ -0,0 +1,578 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: s2ibuilders.devops.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .status.runCount + name: RunCount + type: integer + - JSONPath: .status.lastRunState + name: LastRunState + type: string + - JSONPath: .status.lastRunName + name: LastRunName + type: string + - JSONPath: .status.lastRunStartTime + name: LastRunStartTime + type: date + group: devops.kubesphere.io + names: + kind: S2iBuilder + listKind: S2iBuilderList + plural: s2ibuilders + shortNames: + - s2ib + singular: s2ibuilder + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: S2iBuilder is the Schema for the s2ibuilders API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S2iBuilderSpec defines the desired state of S2iBuilder + properties: + config: + description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + Important: Run "make" to regenerate code after modifying this file' + properties: + addHost: + description: AddHost Add a line to /etc/hosts for test purpose or + private use in LAN. Its format is host:IP,muliple hosts can be + added by using multiple --add-host + items: + type: string + type: array + asDockerfile: + description: AsDockerfile indicates the path where the Dockerfile + should be written instead of building a new image. + type: string + assembleUser: + description: AssembleUser specifies the user to run the assemble + script in container + type: string + blockOnBuild: + description: BlockOnBuild prevents s2i from performing a docker + build operation if one is necessary to execute ONBUILD commands, + or to layer source code into the container for images that don't + have a tar binary available, if the image contains ONBUILD commands + that would be executed. + type: boolean + branchExpression: + description: Regular expressions, ignoring names that do not match + the provided regular expression + type: string + buildVolumes: + description: BuildVolumes specifies a list of volumes to mount to + container running the build. + items: + type: string + type: array + builderBaseImageVersion: + description: BuilderBaseImageVersion provides optional version information + about the builder base image. + type: string + builderImage: + description: BuilderImage describes which image is used for building + the result images. + type: string + builderImageVersion: + description: BuilderImageVersion provides optional version information + about the builder image. + type: string + builderPullPolicy: + description: BuilderPullPolicy specifies when to pull the builder + image + type: string + callbackUrl: + description: CallbackURL is a URL which is called upon successful + build to inform about that fact. + type: string + cgroupLimits: + description: CGroupLimits describes the cgroups limits that will + be applied to any containers run by s2i. + properties: + cpuPeriod: + format: int64 + type: integer + cpuQuota: + format: int64 + type: integer + cpuShares: + format: int64 + type: integer + memoryLimitBytes: + format: int64 + type: integer + memorySwap: + format: int64 + type: integer + parent: + type: string + required: + - cpuPeriod + - cpuQuota + - cpuShares + - memoryLimitBytes + - memorySwap + - parent + type: object + contextDir: + description: Specify a relative directory inside the application + repository that should be used as a root directory for the application. + type: string + description: + description: Description is a result image description label. The + default is no description. + type: string + destination: + description: Destination specifies a location where the untar operation + will place its artifacts. + type: string + displayName: + description: DisplayName is a result image display-name label. This + defaults to the output image name. + type: string + dockerConfig: + description: DockerConfig describes how to access host docker daemon. + properties: + caFile: + description: CAFile is the certificate authority file path for + a TLS connection + type: string + certFile: + description: CertFile is the certificate file path for a TLS + connection + type: string + endPoint: + description: Endpoint is the docker network endpoint or socket + type: string + keyFile: + description: KeyFile is the key file path for a TLS connection + type: string + tlsVerify: + description: TLSVerify indicates if TLS peer must be verified + type: boolean + useTLS: + description: UseTLS indicates if TLS must be used + type: boolean + required: + - caFile + - certFile + - endPoint + - keyFile + - tlsVerify + - useTLS + type: object + dockerNetworkMode: + description: DockerNetworkMode is used to set the docker network + setting to --net=container: when the builder is invoked from + a container. + type: string + dropCapabilities: + description: DropCapabilities contains a list of capabilities to + drop when executing containers + items: + type: string + type: array + environment: + description: Environment is a map of environment variables to be + passed to the image. + items: + description: EnvironmentSpec specifies a single environment variable. + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + excludeRegExp: + description: ExcludeRegExp contains a string representation of the + regular expression desired for deciding which files to exclude + from the tar stream + type: string + export: + description: Export Push the result image to specify image registry + in tag + type: boolean + gitSecretRef: + description: GitSecretRef is the BasicAuth Secret of Git Clone + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + hasOnBuild: + description: HasOnBuild will be set to true if the builder image + contains ONBUILD instructions + type: boolean + imageName: + description: ImageName Contains the registry address and reponame, + tag should set by field tag alone + type: string + imageScriptsUrl: + description: ImageScriptsURL is the default location to find the + assemble/run scripts for a builder image. This url can be a reference + within the builder image if the scheme is specified as image:// + type: string + imageWorkDir: + description: ImageWorkDir is the default working directory for the + builder image. + type: string + incremental: + description: Incremental describes whether to try to perform incremental + build. + type: boolean + incrementalAuthentication: + description: IncrementalAuthentication holds the authentication + information for pulling the previous image from private repositories + properties: + email: + type: string + password: + type: string + secretRef: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + serverAddress: + type: string + username: + type: string + type: object + incrementalFromTag: + description: IncrementalFromTag sets an alternative image tag to + look for existing artifacts. Tag is used by default if this is + not set. + type: string + injections: + description: Injections specifies a list source/destination folders + that are injected to the container that runs assemble. All files + we inject will be truncated after the assemble script finishes. + items: + description: VolumeSpec represents a single volume mount point. + properties: + destination: + description: Destination is the path to mount the volume to + - absolute or relative. + type: string + keep: + description: Keep indicates if the mounted data should be + kept in the final image. + type: boolean + source: + description: Source is a reference to the volume source. + type: string + type: object + type: array + isBinaryURL: + description: IsBinaryURL explain the type of SourceURL. If it is + IsBinaryURL, it will download the file directly without using + git. + type: boolean + keepSymlinks: + description: KeepSymlinks indicates to copy symlinks as symlinks. + Default behavior is to follow symlinks and copy files by content. + type: boolean + labelNamespace: + description: LabelNamespace provides the namespace under which the + labels will be generated. + type: string + labels: + additionalProperties: + type: string + description: Labels specify labels and their values to be applied + to the resulting image. Label keys must have non-zero length. + The labels defined here override generated labels in case they + have the same name. + type: object + layeredBuild: + description: LayeredBuild describes if this is build which layered + scripts and sources on top of BuilderImage. + type: boolean + nodeAffinityKey: + description: The key of Node Affinity. + type: string + nodeAffinityValues: + description: The values of Node Affinity. + items: + type: string + type: array + outputBuildResult: + description: Whether output build result to status. + type: boolean + outputImageName: + description: OutputImageName is a result image name without tag, + default is latest. tag will append to ImageName in the end + type: string + preserveWorkingDir: + description: PreserveWorkingDir describes if working directory should + be left after processing. + type: boolean + previousImagePullPolicy: + description: PreviousImagePullPolicy specifies when to pull the + previously build image when doing incremental build + type: string + pullAuthentication: + description: PullAuthentication holds the authentication information + for pulling the Docker images from private repositories + properties: + email: + type: string + password: + type: string + secretRef: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + serverAddress: + type: string + username: + type: string + type: object + pushAuthentication: + description: PullAuthentication holds the authentication information + for pulling the Docker images from private repositories + properties: + email: + type: string + password: + type: string + secretRef: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + serverAddress: + type: string + username: + type: string + type: object + removePreviousImage: + description: RemovePreviousImage describes if previous image should + be removed after successful build. This applies only to incremental + builds. + type: boolean + revisionId: + description: The RevisionId is a branch name or a SHA-1 hash of + every important thing about the commit + type: string + runImage: + description: RunImage will trigger a "docker run ..." invocation + of the produced image so the user can see if it operates as he + would expect + type: boolean + runtimeArtifacts: + description: RuntimeArtifacts specifies a list of source/destination + pairs that will be copied from builder to a runtime image. Source + can be a file or directory. Destination must be a directory. Regardless + whether it is an absolute or relative path, it will be placed + into image's WORKDIR. Destination also can be empty or equals + to ".", in this case it just refers to a root of WORKDIR. In case + it's empty, S2I will try to get this list from io.openshift.s2i.assemble-input-files + label on a RuntimeImage. + items: + description: VolumeSpec represents a single volume mount point. + properties: + destination: + description: Destination is the path to mount the volume to + - absolute or relative. + type: string + keep: + description: Keep indicates if the mounted data should be + kept in the final image. + type: boolean + source: + description: Source is a reference to the volume source. + type: string + type: object + type: array + runtimeAuthentication: + description: RuntimeAuthentication holds the authentication information + for pulling the runtime Docker images from private repositories. + properties: + email: + type: string + password: + type: string + secretRef: + description: LocalObjectReference contains enough information + to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + serverAddress: + type: string + username: + type: string + type: object + runtimeImage: + description: RuntimeImage specifies the image that will be a base + for resulting image and will be used for running an application. + By default, BuilderImage is used for building and running, but + the latter may be overridden. + type: string + runtimeImagePullPolicy: + description: RuntimeImagePullPolicy specifies when to pull a runtime + image. + type: string + scriptDownloadProxyConfig: + description: ScriptDownloadProxyConfig optionally specifies the + http and https proxy to use when downloading scripts + properties: + httpProxy: + type: string + httpsProxy: + type: string + type: object + scriptsUrl: + description: ScriptsURL is a URL describing where to fetch the S2I + scripts from during build process. This url can be a reference + within the builder image if the scheme is specified as image:// + type: string + secretCode: + description: SecretCode + type: string + securityOpt: + description: SecurityOpt are passed as options to the docker containers + launched by s2i. + items: + type: string + type: array + sourceUrl: + description: SourceURL is url of the codes such as https://github.com/a/b.git + type: string + tag: + description: Tag is a result image tag name. + type: string + taintKey: + description: The name of taint. + type: string + usage: + description: Usage allows for properly shortcircuiting s2i logic + when `s2i usage` is invoked + type: boolean + workingDir: + description: WorkingDir describes temporary directory used for downloading + sources, scripts and tar operations. + type: string + workingSourceDir: + description: WorkingSourceDir describes the subdirectory off of + WorkingDir set up during the repo download that is later used + as the root for ignore processing + type: string + required: + - imageName + - sourceUrl + type: object + fromTemplate: + description: FromTemplate define some inputs from user + properties: + builderImage: + description: BaseImage specify which version of this template to + use + type: string + name: + description: Name specify a template to use, so many fields in Config + can left empty + type: string + parameters: + description: Parameters must use with `template`, fill some parameters + which template will use + items: + properties: + defaultValue: + type: string + description: + type: string + key: + type: string + optValues: + items: + type: string + type: array + required: + type: boolean + type: + type: string + value: + type: string + type: object + type: array + type: object + type: object + status: + description: S2iBuilderStatus defines the observed state of S2iBuilder + properties: + lastRunName: + description: LastRunState return the name of the newest run of this + builder + type: string + lastRunStartTime: + description: LastRunStartTime return the startTime of the newest run + of this builder + format: date-time + type: string + lastRunState: + description: LastRunState return the state of the newest run of this + builder + type: string + runCount: + description: RunCount represent the sum of s2irun of this builder + type: integer + required: + - runCount + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/devops.kubesphere.io_s2ibuildertemplates.yaml b/config/crd/bases/devops.kubesphere.io_s2ibuildertemplates.yaml new file mode 100644 index 000000000..50f0a4fed --- /dev/null +++ b/config/crd/bases/devops.kubesphere.io_s2ibuildertemplates.yaml @@ -0,0 +1,141 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: s2ibuildertemplates.devops.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.codeFramework + name: Framework + type: string + - JSONPath: .spec.defaultBaseImage + name: DefaultBaseImage + type: string + - JSONPath: .spec.version + name: Version + type: string + group: devops.kubesphere.io + names: + categories: + - devops + kind: S2iBuilderTemplate + listKind: S2iBuilderTemplateList + plural: s2ibuildertemplates + shortNames: + - s2ibt + singular: s2ibuildertemplate + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: S2iBuilderTemplate is the Schema for the s2ibuildertemplates API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S2iBuilderTemplateSpec defines the desired state of S2iBuilderTemplate + properties: + codeFramework: + description: CodeFramework means which language this template is designed + for and which framework is using if has framework. Like Java, NodeJS + etc + type: string + containerInfo: + description: Images are the images this template will use. + items: + properties: + buildVolumes: + description: BuildVolumes specifies a list of volumes to mount + to container running the build. + items: + type: string + type: array + builderImage: + description: BaseImage are the images this template will use. + type: string + runtimeArtifacts: + items: + description: VolumeSpec represents a single volume mount point. + properties: + destination: + description: Destination is the path to mount the volume + to - absolute or relative. + type: string + keep: + description: Keep indicates if the mounted data should be + kept in the final image. + type: boolean + source: + description: Source is a reference to the volume source. + type: string + type: object + type: array + runtimeImage: + type: string + type: object + type: array + defaultBaseImage: + description: DefaultBaseImage is the image that will be used by default + type: string + description: + description: Description illustrate the purpose of this template + type: string + environment: + description: Parameters is a set of environment variables to be passed + to the image. + items: + properties: + defaultValue: + type: string + description: + type: string + key: + type: string + optValues: + items: + type: string + type: array + required: + type: boolean + type: + type: string + value: + type: string + type: object + type: array + iconPath: + description: IconPath is used for frontend display + type: string + version: + description: Version of template + type: string + type: object + status: + description: S2iBuilderTemplateStatus defines the observed state of S2iBuilderTemplate + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/devops.kubesphere.io_s2iruns.yaml b/config/crd/bases/devops.kubesphere.io_s2iruns.yaml new file mode 100644 index 000000000..492908cd6 --- /dev/null +++ b/config/crd/bases/devops.kubesphere.io_s2iruns.yaml @@ -0,0 +1,181 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: s2iruns.devops.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .status.runState + name: State + type: string + - JSONPath: .status.kubernetesJobName + name: K8sJobName + type: string + - JSONPath: .status.startTime + name: StartTime + type: date + - JSONPath: .status.completionTime + name: CompletionTime + type: date + - JSONPath: .status.s2iBuildResult.imageName + name: ImageName + type: string + group: devops.kubesphere.io + names: + kind: S2iRun + listKind: S2iRunList + plural: s2iruns + shortNames: + - s2ir + singular: s2irun + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: S2iRun is the Schema for the s2iruns API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S2iRunSpec defines the desired state of S2iRun + properties: + backoffLimit: + description: BackoffLimit limits the restart count of each s2irun. Default + is 0 + format: int32 + type: integer + builderName: + description: BuilderName specify the name of s2ibuilder, required + type: string + newRevisionId: + description: NewRevisionId override the default NewRevisionId in its + s2ibuilder. + type: string + newSourceURL: + description: NewSourceURL is used to download new binary artifacts + type: string + newTag: + description: NewTag override the default tag in its s2ibuilder, image + name cannot be changed. + type: string + secondsAfterFinished: + description: SecondsAfterFinished if is set and greater than zero, and + the job created by s2irun become successful or failed , the job will + be auto deleted after SecondsAfterFinished + format: int32 + type: integer + required: + - builderName + type: object + status: + description: S2iRunStatus defines the observed state of S2iRun + properties: + completionTime: + description: Represents time when the job was completed. It is not guaranteed + to be set in happens-before order across separate operations. It is + represented in RFC3339 form and is in UTC. + format: date-time + type: string + kubernetesJobName: + description: KubernetesJobName is the job name in k8s + type: string + logURL: + description: LogURL is uesd for external log handler to let user know + where is log located in + type: string + runState: + description: RunState indicates whether this job is done or failed + type: string + s2iBuildResult: + description: S2i build result info. + properties: + commandPull: + description: Command for pull image. + type: string + imageCreated: + description: Image created time. + type: string + imageID: + description: Image ID. + type: string + imageName: + description: ImageName is the name of artifact + type: string + imageRepoTags: + description: image tags. + items: + type: string + type: array + imageSize: + description: The size in bytes of the image + format: int64 + type: integer + type: object + s2iBuildSource: + description: S2i build source info. + properties: + binaryName: + description: Binary file Name + type: string + binarySize: + description: Binary file Size + format: int64 + type: integer + builderImage: + description: // BuilderImage describes which image is used for building + the result images. + type: string + commitID: + description: CommitID represents an arbitrary extended object reference + in Git as SHA-1 + type: string + committerEmail: + description: CommitterEmail contains the e-mail of the committer + type: string + committerName: + description: CommitterName contains the name of the committer + type: string + description: + description: Description is a result image description label. The + default is no description. + type: string + revisionId: + description: The RevisionId is a branch name or a SHA-1 hash of + every important thing about the commit + type: string + sourceUrl: + description: SourceURL is url of the codes such as https://github.com/a/b.git + type: string + type: object + startTime: + description: StartTime represent when this run began + format: date-time + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/iam.kubesphere.io_users.yaml b/config/crd/bases/iam.kubesphere.io_users.yaml new file mode 100644 index 000000000..eec35e012 --- /dev/null +++ b/config/crd/bases/iam.kubesphere.io_users.yaml @@ -0,0 +1,117 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: users.iam.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.email + name: Email + type: string + - JSONPath: .status.state + name: Status + type: string + group: iam.kubesphere.io + names: + categories: + - iam + kind: User + listKind: UserList + plural: users + singular: user + scope: Cluster + subresources: {} + validation: + openAPIV3Schema: + description: User is the Schema for the users API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + description: + description: Description of the user. + type: string + displayName: + type: string + email: + description: Unique email address. + type: string + finalizers: + description: Finalizers is an opaque list of values that must be empty + to permanently remove object from storage. + items: + type: string + type: array + groups: + items: + type: string + type: array + lang: + description: The preferred written or spoken language for the user. + type: string + password: + type: string + required: + - email + - password + type: object + status: + description: UserStatus defines the observed state of User + properties: + conditions: + description: Represents the latest available observations of a namespace's + current state. + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of namespace controller condition. + type: string + required: + - status + - type + type: object + type: array + state: + description: The user status + type: string + type: object + required: + - spec + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/network.kubesphere.io_namespacenetworkpolicies.yaml b/config/crd/bases/network.kubesphere.io_namespacenetworkpolicies.yaml new file mode 100644 index 000000000..0be1db2ab --- /dev/null +++ b/config/crd/bases/network.kubesphere.io_namespacenetworkpolicies.yaml @@ -0,0 +1,762 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: namespacenetworkpolicies.network.kubesphere.io +spec: + group: network.kubesphere.io + names: + categories: + - networking + kind: NamespaceNetworkPolicy + listKind: NamespaceNetworkPolicyList + plural: namespacenetworkpolicies + shortNames: + - nsnp + singular: namespacenetworkpolicy + scope: Namespaced + validation: + openAPIV3Schema: + description: NamespaceNetworkPolicy is the Schema for the namespacenetworkpolicies + API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NamespaceNetworkPolicySpec defines the desired state of NamespaceNetworkPolicy + properties: + egress: + description: The ordered set of egress rules. Each rule contains a + set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an action. + \ Both selector-based security Policy and security Profiles reference + rules - separated out as a list of rules for both ingress and egress + packet matching. \n Each positive match criteria has a negated version, + prefixed with â€Notâ€. All the match criteria within a rule must be + satisfied for a packet to match. A single rule can contain the positive + and negative version of a match and both must be satisfied for the + rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected namespaces + will be matched. When both NamespaceSelector and Selector + are defined on the same rule, then only workload endpoints + that are matched by both selectors will be selected by the + rule. \n For NetworkPolicy, an empty NamespaceSelector implies + that the Selector is limited to selecting only workload + endpoints in the same namespace as the NetworkPolicy. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or terminates + at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated selectors. + type: string + ports: + description: "Ports is an optional field that restricts the + rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges of + ports. \n Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to \"TCP\" or \"UDP\"." + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). Only + traffic that originates from (terminates at) endpoints matching + the selector will be matched. \n Note that: in addition + to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports negation. + \ The two types of negation are subtly different. One negates + the set of matched endpoints, the other negates the whole + match: \n \tSelector = \"!has(my_label)\" matches packets + that are from other Calico-controlled \tendpoints that do + not have the label “my_labelâ€. \n \tNotSelector = \"has(my_label)\" + matches packets that are not from Calico-controlled \tendpoints + that do have the label “my_labelâ€. \n The effect is that + the latter will accept packets from non-Calico sources whereas + the former is limited to packets from Calico-controlled + endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + that matches the given label selector. If both Names + and Selector are specified then they are AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP requests. + properties: + methods: + description: Methods is an optional field that restricts the + rule to apply only to HTTP requests that use one of the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods + are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts the + rule to apply to HTTP requests that use one of the listed + HTTP Paths. Multiple paths are OR''d together. e.g: - exact: + /foo - prefix: /bar NOTE: Each entry may ONLY specify either + a `exact` or a `prefix` match. The validator will check + for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel’s iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example a + value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel’s iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example a + value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + description: NotProtocol is the negated version of the Protocol + field. + type: string + protocol: + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" + or an integer in the range 1-255." + type: string + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected namespaces + will be matched. When both NamespaceSelector and Selector + are defined on the same rule, then only workload endpoints + that are matched by both selectors will be selected by the + rule. \n For NetworkPolicy, an empty NamespaceSelector implies + that the Selector is limited to selecting only workload + endpoints in the same namespace as the NetworkPolicy. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or terminates + at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated selectors. + type: string + ports: + description: "Ports is an optional field that restricts the + rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges of + ports. \n Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to \"TCP\" or \"UDP\"." + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). Only + traffic that originates from (terminates at) endpoints matching + the selector will be matched. \n Note that: in addition + to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports negation. + \ The two types of negation are subtly different. One negates + the set of matched endpoints, the other negates the whole + match: \n \tSelector = \"!has(my_label)\" matches packets + that are from other Calico-controlled \tendpoints that do + not have the label “my_labelâ€. \n \tNotSelector = \"has(my_label)\" + matches packets that are not from Calico-controlled \tendpoints + that do have the label “my_labelâ€. \n The effect is that + the latter will accept packets from non-Calico sources whereas + the former is limited to packets from Calico-controlled + endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + that matches the given label selector. If both Names + and Selector are specified then they are AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: The ordered set of ingress rules. Each rule contains a + set of packet match criteria and a corresponding action to apply. + items: + description: "A Rule encapsulates a set of match criteria and an action. + \ Both selector-based security Policy and security Profiles reference + rules - separated out as a list of rules for both ingress and egress + packet matching. \n Each positive match criteria has a negated version, + prefixed with â€Notâ€. All the match criteria within a rule must be + satisfied for a packet to match. A single rule can contain the positive + and negative version of a match and both must be satisfied for the + rule to match." + properties: + action: + type: string + destination: + description: Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected namespaces + will be matched. When both NamespaceSelector and Selector + are defined on the same rule, then only workload endpoints + that are matched by both selectors will be selected by the + rule. \n For NetworkPolicy, an empty NamespaceSelector implies + that the Selector is limited to selecting only workload + endpoints in the same namespace as the NetworkPolicy. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or terminates + at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated selectors. + type: string + ports: + description: "Ports is an optional field that restricts the + rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges of + ports. \n Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to \"TCP\" or \"UDP\"." + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). Only + traffic that originates from (terminates at) endpoints matching + the selector will be matched. \n Note that: in addition + to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports negation. + \ The two types of negation are subtly different. One negates + the set of matched endpoints, the other negates the whole + match: \n \tSelector = \"!has(my_label)\" matches packets + that are from other Calico-controlled \tendpoints that do + not have the label “my_labelâ€. \n \tNotSelector = \"has(my_label)\" + matches packets that are not from Calico-controlled \tendpoints + that do have the label “my_labelâ€. \n The effect is that + the latter will accept packets from non-Calico sources whereas + the former is limited to packets from Calico-controlled + endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + that matches the given label selector. If both Names + and Selector are specified then they are AND'ed. + type: string + type: object + type: object + http: + description: HTTP contains match criteria that apply to HTTP requests. + properties: + methods: + description: Methods is an optional field that restricts the + rule to apply only to HTTP requests that use one of the + listed HTTP Methods (e.g. GET, PUT, etc.) Multiple methods + are OR'd together. + items: + type: string + type: array + paths: + description: 'Paths is an optional field that restricts the + rule to apply to HTTP requests that use one of the listed + HTTP Paths. Multiple paths are OR''d together. e.g: - exact: + /foo - prefix: /bar NOTE: Each entry may ONLY specify either + a `exact` or a `prefix` match. The validator will check + for it.' + items: + description: 'HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix' + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel’s iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example a + value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel’s iptables firewall, which + Calico uses to enforce the rule. + type: integer + type: + description: Match on a specific ICMP type. For example a + value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + description: NotProtocol is the negated version of the Protocol + field. + type: string + protocol: + description: "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", \"UDPLite\" + or an integer in the range 1-255." + type: string + source: + description: Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected namespaces + will be matched. When both NamespaceSelector and Selector + are defined on the same rule, then only workload endpoints + that are matched by both selectors will be selected by the + rule. \n For NetworkPolicy, an empty NamespaceSelector implies + that the Selector is limited to selecting only workload + endpoints in the same namespace as the NetworkPolicy. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all namespaces." + type: string + nets: + description: Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or terminates + at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: NotNets is the negated version of the Nets field. + items: + type: string + type: array + notPorts: + description: NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + notSelector: + description: NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated selectors. + type: string + ports: + description: "Ports is an optional field that restricts the + rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges of + ports. \n Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to \"TCP\" or \"UDP\"." + items: + description: "Port represents either a range of numeric + ports or a named port. \n - For a named port, set + the PortName, leaving MinPort and MaxPort as 0. - + For a port range, set MinPort and MaxPort to the (inclusive) + port numbers. Set PortName to \"\". - For a + single port, set MinPort = MaxPort and PortName = \"\"." + properties: + maxPort: + type: integer + minPort: + type: integer + portName: + type: string + type: object + type: array + selector: + description: "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). Only + traffic that originates from (terminates at) endpoints matching + the selector will be matched. \n Note that: in addition + to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports negation. + \ The two types of negation are subtly different. One negates + the set of matched endpoints, the other negates the whole + match: \n \tSelector = \"!has(my_label)\" matches packets + that are from other Calico-controlled \tendpoints that do + not have the label “my_labelâ€. \n \tNotSelector = \"has(my_label)\" + matches packets that are not from Calico-controlled \tendpoints + that do have the label “my_labelâ€. \n The effect is that + the latter will accept packets from non-Calico sources whereas + the former is limited to packets from Calico-controlled + endpoints." + type: string + serviceAccounts: + description: ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from (or + terminates at) a pod running as a matching service account. + properties: + names: + description: Names is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + whose name is in the list. + items: + type: string + type: array + selector: + description: Selector is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a service account + that matches the given label selector. If both Names + and Selector are specified then they are AND'ed. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: integer + selector: + description: "The selector is an expression used to pick pick out the + endpoints that the policy should be applied to. \n Selector expressions + follow this syntax: \n \tlabel == \"string_literal\" -> comparison, + e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" -> not + equal; also matches if label is not present \tlabel in { \"a\", \"b\", + \"c\", ... } -> true if the value of label X is one of \"a\", \"b\", + \"c\" \tlabel not in { \"a\", \"b\", \"c\", ... } -> true if the + value of label X is not one of \"a\", \"b\", \"c\" \thas(label_name) + \ -> True if that label is present \t! expr -> negation of expr \texpr + && expr -> Short-circuit and \texpr || expr -> Short-circuit or + \t( expr ) -> parens for grouping \tall() or the empty selector -> + matches all endpoints. \n Label names are allowed to contain alphanumerics, + -, _ and /. String literals are more permissive but they do not support + escape characters. \n Examples (with made-up labels): \n \ttype == + \"webserver\" && deployment == \"prod\" \ttype in {\"frontend\", \"backend\"} + \tdeployment != \"dev\" \t! has(label_name)" + type: string + types: + description: "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so the + value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. \n + When the policy is read back again, Types will always be one of these + values, never empty or nil." + items: + type: string + type: array + required: + - selector + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/network.kubesphere.io_workspacenetworkpolicies.yaml b/config/crd/bases/network.kubesphere.io_workspacenetworkpolicies.yaml new file mode 100644 index 000000000..77302069b --- /dev/null +++ b/config/crd/bases/network.kubesphere.io_workspacenetworkpolicies.yaml @@ -0,0 +1,535 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: workspacenetworkpolicies.network.kubesphere.io +spec: + group: network.kubesphere.io + names: + categories: + - networking + kind: WorkspaceNetworkPolicy + listKind: WorkspaceNetworkPolicyList + plural: workspacenetworkpolicies + shortNames: + - wsnp + singular: workspacenetworkpolicy + scope: Cluster + validation: + openAPIV3Schema: + description: WorkspaceNetworkPolicy is a set of network policies applied to + the scope to workspace + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: WorkspaceNetworkPolicySpec defines the desired state of WorkspaceNetworkPolicy + properties: + egress: + description: List of egress rules to be applied to the selected pods. + Outgoing traffic is allowed if there are no NetworkPolicies selecting + the pod (and cluster policy otherwise allows the traffic), OR if the + traffic matches at least one egress rule across all of the NetworkPolicy + objects whose podSelector matches the pod. If this field is empty + then this NetworkPolicy limits all outgoing traffic (and serves solely + to ensure that the pods it selects are isolated by default). This + field is beta-level in 1.8 + items: + description: WorkspaceNetworkPolicyEgressRule describes a particular + set of traffic that is allowed out of pods matched by a WorkspaceNetworkPolicySpec's + podSelector. The traffic must match both ports and to. + properties: + from: + description: List of sources which should be able to access the + pods selected for this rule. Items in this list are combined + using a logical OR operation. If this field is empty or missing, + this rule matches all sources (traffic not restricted by source). + If this field is present and contains at least on item, this + rule allows traffic only if the traffic matches at least one + item in the from list. + items: + description: WorkspaceNetworkPolicyPeer describes a peer to + allow traffic from. Only certain combinations of fields are + allowed. It is same as 'NetworkPolicyPeer' in k8s but with + an additional field 'WorkspaceSelector' + properties: + ipBlock: + description: IPBlock defines policy on a particular IPBlock. + If this field is set then neither of the other fields + can be. + properties: + cidr: + description: CIDR is a string representing the IP Block + Valid examples are "192.168.1.1/24" + type: string + except: + description: Except is a slice of CIDRs that should + not be included within an IP Block Valid examples + are "192.168.1.1/24" Except values will be rejected + if they are outside the CIDR range + items: + type: string + type: array + required: + - cidr + type: object + namespaceSelector: + description: "Selects Namespaces using cluster-scoped labels. + This field follows standard label selector semantics; + if present but empty, it selects all namespaces. \n If + PodSelector is also set, then the NetworkPolicyPeer as + a whole selects the Pods matching PodSelector in the Namespaces + selected by NamespaceSelector. Otherwise it selects all + Pods in the Namespaces selected by NamespaceSelector." + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + podSelector: + description: "This is a label selector which selects Pods. + This field follows standard label selector semantics; + if present but empty, it selects all pods. \n If NamespaceSelector + is also set, then the NetworkPolicyPeer as a whole selects + the Pods matching PodSelector in the Namespaces selected + by NamespaceSelector. Otherwise it selects the Pods matching + PodSelector in the policy's own Namespace." + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + workspaceSelector: + description: A label selector is a label query over a set + of resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. + A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + type: object + type: array + ports: + description: List of ports which should be made accessible on + the pods selected for this rule. Each item in this list is combined + using a logical OR. If this field is empty or missing, this + rule matches all ports (traffic not restricted by port). If + this field is present and contains at least one item, then this + rule allows traffic only if the traffic matches at least one + port in the list. + items: + description: NetworkPolicyPort describes a port to allow traffic + on + properties: + port: + anyOf: + - type: integer + - type: string + description: The port on the given protocol. This can either + be a numerical or named port on a pod. If this field is + not provided, this matches all port names and numbers. + x-kubernetes-int-or-string: true + protocol: + description: The protocol (TCP, UDP, or SCTP) which traffic + must match. If not specified, this field defaults to TCP. + type: string + type: object + type: array + type: object + type: array + ingress: + description: List of ingress rules to be applied to the selected pods. + Traffic is allowed to a pod if there are no NetworkPolicies selecting + the pod (and cluster policy otherwise allows the traffic), OR if the + traffic source is the pod's local node, OR if the traffic matches + at least one ingress rule across all of the NetworkPolicy objects + whose podSelector matches the pod. If this field is empty then this + NetworkPolicy does not allow any traffic (and serves solely to ensure + that the pods it selects are isolated by default) + items: + description: WorkspaceNetworkPolicyIngressRule describes a particular + set of traffic that is allowed to the pods matched by a WorkspaceNetworkPolicySpec's + podSelector. The traffic must match both ports and from. + properties: + from: + description: List of sources which should be able to access the + pods selected for this rule. Items in this list are combined + using a logical OR operation. If this field is empty or missing, + this rule matches all sources (traffic not restricted by source). + If this field is present and contains at least on item, this + rule allows traffic only if the traffic matches at least one + item in the from list. + items: + description: WorkspaceNetworkPolicyPeer describes a peer to + allow traffic from. Only certain combinations of fields are + allowed. It is same as 'NetworkPolicyPeer' in k8s but with + an additional field 'WorkspaceSelector' + properties: + ipBlock: + description: IPBlock defines policy on a particular IPBlock. + If this field is set then neither of the other fields + can be. + properties: + cidr: + description: CIDR is a string representing the IP Block + Valid examples are "192.168.1.1/24" + type: string + except: + description: Except is a slice of CIDRs that should + not be included within an IP Block Valid examples + are "192.168.1.1/24" Except values will be rejected + if they are outside the CIDR range + items: + type: string + type: array + required: + - cidr + type: object + namespaceSelector: + description: "Selects Namespaces using cluster-scoped labels. + This field follows standard label selector semantics; + if present but empty, it selects all namespaces. \n If + PodSelector is also set, then the NetworkPolicyPeer as + a whole selects the Pods matching PodSelector in the Namespaces + selected by NamespaceSelector. Otherwise it selects all + Pods in the Namespaces selected by NamespaceSelector." + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + podSelector: + description: "This is a label selector which selects Pods. + This field follows standard label selector semantics; + if present but empty, it selects all pods. \n If NamespaceSelector + is also set, then the NetworkPolicyPeer as a whole selects + the Pods matching PodSelector in the Namespaces selected + by NamespaceSelector. Otherwise it selects the Pods matching + PodSelector in the policy's own Namespace." + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + workspaceSelector: + description: A label selector is a label query over a set + of resources. The result of matchLabels and matchExpressions + are ANDed. An empty label selector matches all objects. + A null label selector matches no objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, + NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists + or DoesNotExist, the values array must be empty. + This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field + is "key", the operator is "In", and the values array + contains only "value". The requirements are ANDed. + type: object + type: object + type: object + type: array + ports: + description: List of ports which should be made accessible on + the pods selected for this rule. Each item in this list is combined + using a logical OR. If this field is empty or missing, this + rule matches all ports (traffic not restricted by port). If + this field is present and contains at least one item, then this + rule allows traffic only if the traffic matches at least one + port in the list. + items: + description: NetworkPolicyPort describes a port to allow traffic + on + properties: + port: + anyOf: + - type: integer + - type: string + description: The port on the given protocol. This can either + be a numerical or named port on a pod. If this field is + not provided, this matches all port names and numbers. + x-kubernetes-int-or-string: true + protocol: + description: The protocol (TCP, UDP, or SCTP) which traffic + must match. If not specified, this field defaults to TCP. + type: string + type: object + type: array + type: object + type: array + policyTypes: + description: List of rule types that the WorkspaceNetworkPolicy relates + to. Valid options are Ingress, Egress, or Ingress,Egress. If this + field is not specified, it will default based on the existence of + Ingress or Egress rules; policies that contain an Egress section are + assumed to affect Egress, and all policies (whether or not they contain + an Ingress section) are assumed to affect Ingress. If you want to + write an egress-only policy, you must explicitly specify policyTypes + [ "Egress" ]. Likewise, if you want to write a policy that specifies + that no egress is allowed, you must specify a policyTypes value that + include "Egress" (since such a policy would not include an Egress + section and would otherwise default to just [ "Ingress" ]). + items: + description: Policy Type string describes the NetworkPolicy type This + type is beta-level in 1.8 + type: string + type: array + workspace: + description: Workspace specify the name of ws to apply this workspace + network policy + type: string + type: object + status: + description: WorkspaceNetworkPolicyStatus defines the observed state of + WorkspaceNetworkPolicy + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/servicemesh.kubesphere.io_servicepolicies.yaml b/config/crd/bases/servicemesh.kubesphere.io_servicepolicies.yaml new file mode 100644 index 000000000..252260dc6 --- /dev/null +++ b/config/crd/bases/servicemesh.kubesphere.io_servicepolicies.yaml @@ -0,0 +1,1605 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: servicepolicies.servicemesh.kubesphere.io +spec: + group: servicemesh.kubesphere.io + names: + kind: ServicePolicy + listKind: ServicePolicyList + plural: servicepolicies + singular: servicepolicy + scope: Namespaced + validation: + openAPIV3Schema: + description: ServicePolicy is the Schema for the servicepolicies API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ServicePolicySpec defines the desired state of ServicePolicy + properties: + selector: + description: Label selector for destination rules. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + template: + description: Template used to create a destination rule + properties: + metadata: + description: Metadata of the virtual services created from this + template + type: object + spec: + description: Spec indicates the behavior of a destination rule. + properties: + export_to: + description: "A list of namespaces to which this destination + rule is exported. The resolution of a destination rule to + apply to a service occurs in the context of a hierarchy of + namespaces. Exporting a destination rule allows it to be included + in the resolution hierarchy for services in other namespaces. + This feature provides a mechanism for service owners and mesh + administrators to control the visibility of destination rules + across namespace boundaries. \n If no namespaces are specified + then the destination rule is exported to all namespaces by + default. \n The value \".\" is reserved and defines an export + to the same namespace that the destination rule is declared + in. Similarly, the value \"*\" is reserved and defines an + export to all namespaces. \n NOTE: in the current release, + the `exportTo` value is restricted to \".\" or \"*\" (i.e., + the current namespace or all namespaces)." + items: + type: string + type: array + host: + description: "The name of a service from the service registry. + Service names are looked up from the platform's service registry + (e.g., Kubernetes services, Consul services, etc.) and from + the hosts declared by [ServiceEntries](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry). + Rules defined for services that do not exist in the service + registry will be ignored. \n *Note for Kubernetes users*: + When short names are used (e.g. \"reviews\" instead of \"reviews.default.svc.cluster.local\"), + Istio will interpret the short name based on the namespace + of the rule, not the service. A rule in the \"default\" namespace + containing a host \"reviews\" will be interpreted as \"reviews.default.svc.cluster.local\", + irrespective of the actual namespace associated with the reviews + service. _To avoid potential misconfigurations, it is recommended + to always use fully qualified domain names over short names._ + \n Note that the host field applies to both HTTP and TCP services." + type: string + subsets: + description: One or more named sets that represent individual + versions of a service. Traffic policies can be overridden + at subset level. + items: + description: "A subset of endpoints of a service. Subsets + can be used for scenarios like A/B testing, or routing to + a specific version of a service. Refer to [VirtualService](https://istio.io/docs/reference/config/networking/virtual-service/#VirtualService) + documentation for examples of using subsets in these scenarios. + In addition, traffic policies defined at the service-level + can be overridden at a subset-level. The following rule + uses a round robin load balancing policy for all traffic + going to a subset named testversion that is composed of + endpoints (e.g., pods) with labels (version:v3). \n ```yaml + apiVersion: networking.istio.io/v1alpha3 kind: DestinationRule + metadata: name: bookinfo-ratings spec: host: ratings.prod.svc.cluster.local + \ trafficPolicy: loadBalancer: simple: LEAST_CONN + \ subsets: - name: testversion labels: version: + v3 trafficPolicy: loadBalancer: simple: + ROUND_ROBIN ``` \n **Note:** Policies specified for subsets + will not take effect until a route rule explicitly sends + traffic to this subset. \n One or more labels are typically + required to identify the subset destination, however, when + the corresponding DestinationRule represents a host that + supports multiple SNI hosts (e.g., an egress gateway), a + subset without labels may be meaningful. In this case a + traffic policy with [TLSSettings](#TLSSettings) can be used + to identify a specific SNI host corresponding to the named + subset." + properties: + labels: + additionalProperties: + type: string + description: Labels apply a filter over the endpoints + of a service in the service registry. See route rules + for examples of usage. + type: object + name: + description: Name of the subset. The service name and + the subset name can be used for traffic splitting in + a route rule. + type: string + traffic_policy: + description: Traffic policies that apply to this subset. + Subsets inherit the traffic policies specified at the + DestinationRule level. Settings specified at the subset + level will override the corresponding settings specified + at the DestinationRule level. + properties: + connection_pool: + description: Settings controlling the volume of connections + to an upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + h2_upgrade_policy: + description: Specify if http1.1 connection + should be upgraded to http2 for the associated + destination. + format: int32 + type: integer + http1_max_pending_requests: + description: Maximum number of pending HTTP + requests to a destination. Default 2^32-1. + format: int32 + type: integer + http2_max_requests: + description: Maximum number of requests to + a backend. Default 2^32-1. + format: int32 + type: integer + idle_timeout: + description: The idle timeout for upstream + connection pool connections. The idle timeout + is defined as the period in which there + are no active requests. If not set, there + is no idle timeout. When the idle timeout + is reached the connection will be closed. + Note that request based timeouts mean that + HTTP/2 PINGs will not keep the connection + alive. Applies to both HTTP1.1 and HTTP2 + connections. + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span + of time. Durations less than one second + are represented with a 0 `seconds` field + and a positive or negative `nanos` field. + For durations of one second or more, + a non-zero value for the `nanos` field + must be of the same sign as the `seconds` + field. Must be from -999,999,999 to + +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span + of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: 60 sec/min + * 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + max_requests_per_connection: + description: Maximum number of requests per + connection to a backend. Setting this parameter + to 1 disables keep alive. Default 0, meaning + "unlimited", up to 2^29. + format: int32 + type: integer + max_retries: + description: Maximum number of retries that + can be outstanding to all hosts in a cluster + at a given time. Defaults to 2^32-1. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and + TCP upstream connections. + properties: + connect_timeout: + description: TCP connection timeout. + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span + of time. Durations less than one second + are represented with a 0 `seconds` field + and a positive or negative `nanos` field. + For durations of one second or more, + a non-zero value for the `nanos` field + must be of the same sign as the `seconds` + field. Must be from -999,999,999 to + +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span + of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: 60 sec/min + * 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + max_connections: + description: Maximum number of HTTP1 /TCP + connections to a destination host. Default + 2^32-1. + format: int32 + type: integer + tcp_keepalive: + description: If set then set SO_KEEPALIVE + on the socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between + keep-alive probes. Default is to use + the OS level configuration (unless overridden, + Linux defaults to 75s.) + properties: + nanos: + description: Signed fractions of a + second at nanosecond resolution + of the span of time. Durations less + than one second are represented + with a 0 `seconds` field and a positive + or negative `nanos` field. For durations + of one second or more, a non-zero + value for the `nanos` field must + be of the same sign as the `seconds` + field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the + span of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + probes: + description: Maximum number of keepalive + probes to send without response before + deciding the connection is dead. Default + is to use the OS level configuration + (unless overridden, Linux defaults to + 9.) + format: int32 + type: integer + time: + description: The time duration a connection + needs to be idle before keep-alive probes + start being sent. Default is to use + the OS level configuration (unless overridden, + Linux defaults to 7200s (ie 2 hours.) + properties: + nanos: + description: Signed fractions of a + second at nanosecond resolution + of the span of time. Durations less + than one second are represented + with a 0 `seconds` field and a positive + or negative `nanos` field. For durations + of one second or more, a non-zero + value for the `nanos` field must + be of the same sign as the `seconds` + field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the + span of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: object + load_balancer: + description: Settings controlling the load balancer + algorithms. + type: object + outlier_detection: + description: Settings controlling eviction of unhealthy + hosts from the load balancing pool + properties: + base_ejection_time: + description: 'Minimum ejection duration. A host + will remain ejected for a period equal to the + product of minimum ejection duration and the + number of times the host has been ejected. This + technique allows the system to automatically + increase the ejection period for unhealthy upstream + servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. + Default is 30s.' + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span of + time. Durations less than one second are + represented with a 0 `seconds` field and + a positive or negative `nanos` field. For + durations of one second or more, a non-zero + value for the `nanos` field must be of the + same sign as the `seconds` field. Must be + from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of + time. Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed + from: 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + consecutive_errors: + description: Number of errors before a host is + ejected from the connection pool. Defaults to + 5. When the upstream host is accessed over HTTP, + a 502, 503, or 504 return code qualifies as + an error. When the upstream host is accessed + over an opaque TCP connection, connect timeouts + and connection error/failure events qualify + as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection sweep + analysis. format: 1h/1m/1s/1ms. MUST BE >=1ms. + Default is 10s.' + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span of + time. Durations less than one second are + represented with a 0 `seconds` field and + a positive or negative `nanos` field. For + durations of one second or more, a non-zero + value for the `nanos` field must be of the + same sign as the `seconds` field. Must be + from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of + time. Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed + from: 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + max_ejection_percent: + description: Maximum % of hosts in the load balancing + pool for the upstream service that can be ejected. + Defaults to 10%. + format: int32 + type: integer + min_health_percent: + description: Outlier detection will be enabled + as long as the associated load balancing pool + has at least min_health_percent hosts in healthy + mode. When the percentage of healthy hosts in + the load balancing pool drops below this threshold, + outlier detection will be disabled and the proxy + will load balance across all hosts in the pool + (healthy and unhealthy). The threshold can be + disabled by setting it to 0%. The default is + 0% as it's not typically applicable in k8s environments + with few pods per service. + format: int32 + type: integer + type: object + port_level_settings: + description: Traffic policies specific to individual + ports. Note that port level settings will override + the destination-level settings. Traffic settings + specified at the destination-level will not be inherited + when overridden by port-level settings, i.e. default + values will be applied to fields omitted in port-level + traffic policies. + items: + description: Traffic policies that apply to specific + ports of the service + properties: + connection_pool: + description: Settings controlling the volume + of connections to an upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + h2_upgrade_policy: + description: Specify if http1.1 connection + should be upgraded to http2 for the + associated destination. + format: int32 + type: integer + http1_max_pending_requests: + description: Maximum number of pending + HTTP requests to a destination. Default + 2^32-1. + format: int32 + type: integer + http2_max_requests: + description: Maximum number of requests + to a backend. Default 2^32-1. + format: int32 + type: integer + idle_timeout: + description: The idle timeout for upstream + connection pool connections. The idle + timeout is defined as the period in + which there are no active requests. + If not set, there is no idle timeout. + When the idle timeout is reached the + connection will be closed. Note that + request based timeouts mean that HTTP/2 + PINGs will not keep the connection + alive. Applies to both HTTP1.1 and + HTTP2 connections. + properties: + nanos: + description: Signed fractions of + a second at nanosecond resolution + of the span of time. Durations + less than one second are represented + with a 0 `seconds` field and a + positive or negative `nanos` field. + For durations of one second or + more, a non-zero value for the + `nanos` field must be of the same + sign as the `seconds` field. Must + be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of + the span of time. Must be from + -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds + are computed from: 60 sec/min + * 60 min/hr * 24 hr/day * 365.25 + days/year * 10000 years' + format: int64 + type: integer + type: object + max_requests_per_connection: + description: Maximum number of requests + per connection to a backend. Setting + this parameter to 1 disables keep + alive. Default 0, meaning "unlimited", + up to 2^29. + format: int32 + type: integer + max_retries: + description: Maximum number of retries + that can be outstanding to all hosts + in a cluster at a given time. Defaults + to 2^32-1. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP + and TCP upstream connections. + properties: + connect_timeout: + description: TCP connection timeout. + properties: + nanos: + description: Signed fractions of + a second at nanosecond resolution + of the span of time. Durations + less than one second are represented + with a 0 `seconds` field and a + positive or negative `nanos` field. + For durations of one second or + more, a non-zero value for the + `nanos` field must be of the same + sign as the `seconds` field. Must + be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of + the span of time. Must be from + -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds + are computed from: 60 sec/min + * 60 min/hr * 24 hr/day * 365.25 + days/year * 10000 years' + format: int64 + type: integer + type: object + max_connections: + description: Maximum number of HTTP1 + /TCP connections to a destination + host. Default 2^32-1. + format: int32 + type: integer + tcp_keepalive: + description: If set then set SO_KEEPALIVE + on the socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between + keep-alive probes. Default is + to use the OS level configuration + (unless overridden, Linux defaults + to 75s.) + properties: + nanos: + description: Signed fractions + of a second at nanosecond + resolution of the span of + time. Durations less than + one second are represented + with a 0 `seconds` field and + a positive or negative `nanos` + field. For durations of one + second or more, a non-zero + value for the `nanos` field + must be of the same sign as + the `seconds` field. Must + be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds + of the span of time. Must + be from -315,576,000,000 to + +315,576,000,000 inclusive. + Note: these bounds are computed + from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + probes: + description: Maximum number of keepalive + probes to send without response + before deciding the connection + is dead. Default is to use the + OS level configuration (unless + overridden, Linux defaults to + 9.) + format: int32 + type: integer + time: + description: The time duration a + connection needs to be idle before + keep-alive probes start being + sent. Default is to use the OS + level configuration (unless overridden, + Linux defaults to 7200s (ie 2 + hours.) + properties: + nanos: + description: Signed fractions + of a second at nanosecond + resolution of the span of + time. Durations less than + one second are represented + with a 0 `seconds` field and + a positive or negative `nanos` + field. For durations of one + second or more, a non-zero + value for the `nanos` field + must be of the same sign as + the `seconds` field. Must + be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds + of the span of time. Must + be from -315,576,000,000 to + +315,576,000,000 inclusive. + Note: these bounds are computed + from: 60 sec/min * 60 min/hr + * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: object + load_balancer: + description: Settings controlling the load balancer + algorithms. + type: object + outlier_detection: + description: Settings controlling eviction of + unhealthy hosts from the load balancing pool + properties: + base_ejection_time: + description: 'Minimum ejection duration. + A host will remain ejected for a period + equal to the product of minimum ejection + duration and the number of times the host + has been ejected. This technique allows + the system to automatically increase the + ejection period for unhealthy upstream + servers. format: 1h/1m/1s/1ms. MUST BE + >=1ms. Default is 30s.' + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span + of time. Durations less than one second + are represented with a 0 `seconds` + field and a positive or negative `nanos` + field. For durations of one second + or more, a non-zero value for the + `nanos` field must be of the same + sign as the `seconds` field. Must + be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the + span of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: 60 + sec/min * 60 min/hr * 24 hr/day * + 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + consecutive_errors: + description: Number of errors before a host + is ejected from the connection pool. Defaults + to 5. When the upstream host is accessed + over HTTP, a 502, 503, or 504 return code + qualifies as an error. When the upstream + host is accessed over an opaque TCP connection, + connect timeouts and connection error/failure + events qualify as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection + sweep analysis. format: 1h/1m/1s/1ms. + MUST BE >=1ms. Default is 10s.' + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span + of time. Durations less than one second + are represented with a 0 `seconds` + field and a positive or negative `nanos` + field. For durations of one second + or more, a non-zero value for the + `nanos` field must be of the same + sign as the `seconds` field. Must + be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the + span of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: 60 + sec/min * 60 min/hr * 24 hr/day * + 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + max_ejection_percent: + description: Maximum % of hosts in the load + balancing pool for the upstream service + that can be ejected. Defaults to 10%. + format: int32 + type: integer + min_health_percent: + description: Outlier detection will be enabled + as long as the associated load balancing + pool has at least min_health_percent hosts + in healthy mode. When the percentage of + healthy hosts in the load balancing pool + drops below this threshold, outlier detection + will be disabled and the proxy will load + balance across all hosts in the pool (healthy + and unhealthy). The threshold can be disabled + by setting it to 0%. The default is 0% + as it's not typically applicable in k8s + environments with few pods per service. + format: int32 + type: integer + type: object + port: + description: Specifies the number of a port + on the destination service on which this policy + is being applied. + properties: + number: + description: Valid port number + format: int32 + type: integer + type: object + tls: + description: TLS related settings for connections + to the upstream service. + properties: + ca_certificates: + description: 'OPTIONAL: The path to the + file containing certificate authority + certificates to use in verifying a presented + server certificate. If omitted, the proxy + will not verify the server''s certificate. + Should be empty if mode is `ISTIO_MUTUAL`.' + type: string + client_certificate: + description: REQUIRED if mode is `MUTUAL`. + The path to the file holding the client-side + TLS certificate to use. Should be empty + if mode is `ISTIO_MUTUAL`. + type: string + mode: + description: Indicates whether connections + to this port should be secured using TLS. + The value of this field determines how + TLS is enforced. + format: int32 + type: integer + private_key: + description: REQUIRED if mode is `MUTUAL`. + The path to the file holding the client's + private key. Should be empty if mode is + `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the + server during TLS handshake. + type: string + subject_alt_names: + description: A list of alternate names to + verify the subject identity in the certificate. + If specified, the proxy will verify that + the server certificate's subject alt name + matches one of the specified values. If + specified, this list overrides the value + of subject_alt_names from the ServiceEntry. + items: + type: string + type: array + type: object + type: object + type: array + tls: + description: TLS related settings for connections + to the upstream service. + properties: + ca_certificates: + description: 'OPTIONAL: The path to the file containing + certificate authority certificates to use in + verifying a presented server certificate. If + omitted, the proxy will not verify the server''s + certificate. Should be empty if mode is `ISTIO_MUTUAL`.' + type: string + client_certificate: + description: REQUIRED if mode is `MUTUAL`. The + path to the file holding the client-side TLS + certificate to use. Should be empty if mode + is `ISTIO_MUTUAL`. + type: string + mode: + description: Indicates whether connections to + this port should be secured using TLS. The value + of this field determines how TLS is enforced. + format: int32 + type: integer + private_key: + description: REQUIRED if mode is `MUTUAL`. The + path to the file holding the client's private + key. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the server + during TLS handshake. + type: string + subject_alt_names: + description: A list of alternate names to verify + the subject identity in the certificate. If + specified, the proxy will verify that the server + certificate's subject alt name matches one of + the specified values. If specified, this list + overrides the value of subject_alt_names from + the ServiceEntry. + items: + type: string + type: array + type: object + type: object + type: object + type: array + traffic_policy: + description: Traffic policies to apply (load balancing policy, + connection pool sizes, outlier detection). + properties: + connection_pool: + description: Settings controlling the volume of connections + to an upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + h2_upgrade_policy: + description: Specify if http1.1 connection should + be upgraded to http2 for the associated destination. + format: int32 + type: integer + http1_max_pending_requests: + description: Maximum number of pending HTTP requests + to a destination. Default 2^32-1. + format: int32 + type: integer + http2_max_requests: + description: Maximum number of requests to a backend. + Default 2^32-1. + format: int32 + type: integer + idle_timeout: + description: The idle timeout for upstream connection + pool connections. The idle timeout is defined + as the period in which there are no active requests. + If not set, there is no idle timeout. When the + idle timeout is reached the connection will be + closed. Note that request based timeouts mean + that HTTP/2 PINGs will not keep the connection + alive. Applies to both HTTP1.1 and HTTP2 connections. + properties: + nanos: + description: Signed fractions of a second at + nanosecond resolution of the span of time. + Durations less than one second are represented + with a 0 `seconds` field and a positive or + negative `nanos` field. For durations of one + second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` + field. Must be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of + time. Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed + from: 60 sec/min * 60 min/hr * 24 hr/day * + 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + max_requests_per_connection: + description: Maximum number of requests per connection + to a backend. Setting this parameter to 1 disables + keep alive. Default 0, meaning "unlimited", up + to 2^29. + format: int32 + type: integer + max_retries: + description: Maximum number of retries that can + be outstanding to all hosts in a cluster at a + given time. Defaults to 2^32-1. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and TCP upstream + connections. + properties: + connect_timeout: + description: TCP connection timeout. + properties: + nanos: + description: Signed fractions of a second at + nanosecond resolution of the span of time. + Durations less than one second are represented + with a 0 `seconds` field and a positive or + negative `nanos` field. For durations of one + second or more, a non-zero value for the `nanos` + field must be of the same sign as the `seconds` + field. Must be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of + time. Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed + from: 60 sec/min * 60 min/hr * 24 hr/day * + 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + max_connections: + description: Maximum number of HTTP1 /TCP connections + to a destination host. Default 2^32-1. + format: int32 + type: integer + tcp_keepalive: + description: If set then set SO_KEEPALIVE on the + socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between keep-alive + probes. Default is to use the OS level configuration + (unless overridden, Linux defaults to 75s.) + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span of + time. Durations less than one second are + represented with a 0 `seconds` field and + a positive or negative `nanos` field. + For durations of one second or more, a + non-zero value for the `nanos` field must + be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span + of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: these + bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + probes: + description: Maximum number of keepalive probes + to send without response before deciding the + connection is dead. Default is to use the + OS level configuration (unless overridden, + Linux defaults to 9.) + format: int32 + type: integer + time: + description: The time duration a connection + needs to be idle before keep-alive probes + start being sent. Default is to use the OS + level configuration (unless overridden, Linux + defaults to 7200s (ie 2 hours.) + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span of + time. Durations less than one second are + represented with a 0 `seconds` field and + a positive or negative `nanos` field. + For durations of one second or more, a + non-zero value for the `nanos` field must + be of the same sign as the `seconds` field. + Must be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span + of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: these + bounds are computed from: 60 sec/min * + 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: object + load_balancer: + description: Settings controlling the load balancer algorithms. + type: object + outlier_detection: + description: Settings controlling eviction of unhealthy + hosts from the load balancing pool + properties: + base_ejection_time: + description: 'Minimum ejection duration. A host will + remain ejected for a period equal to the product of + minimum ejection duration and the number of times + the host has been ejected. This technique allows the + system to automatically increase the ejection period + for unhealthy upstream servers. format: 1h/1m/1s/1ms. + MUST BE >=1ms. Default is 30s.' + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less + than one second are represented with a 0 `seconds` + field and a positive or negative `nanos` field. + For durations of one second or more, a non-zero + value for the `nanos` field must be of the same + sign as the `seconds` field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. + Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + consecutive_errors: + description: Number of errors before a host is ejected + from the connection pool. Defaults to 5. When the + upstream host is accessed over HTTP, a 502, 503, or + 504 return code qualifies as an error. When the upstream + host is accessed over an opaque TCP connection, connect + timeouts and connection error/failure events qualify + as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection sweep analysis. + format: 1h/1m/1s/1ms. MUST BE >=1ms. Default is 10s.' + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less + than one second are represented with a 0 `seconds` + field and a positive or negative `nanos` field. + For durations of one second or more, a non-zero + value for the `nanos` field must be of the same + sign as the `seconds` field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. + Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + max_ejection_percent: + description: Maximum % of hosts in the load balancing + pool for the upstream service that can be ejected. + Defaults to 10%. + format: int32 + type: integer + min_health_percent: + description: Outlier detection will be enabled as long + as the associated load balancing pool has at least + min_health_percent hosts in healthy mode. When the + percentage of healthy hosts in the load balancing + pool drops below this threshold, outlier detection + will be disabled and the proxy will load balance across + all hosts in the pool (healthy and unhealthy). The + threshold can be disabled by setting it to 0%. The + default is 0% as it's not typically applicable in + k8s environments with few pods per service. + format: int32 + type: integer + type: object + port_level_settings: + description: Traffic policies specific to individual ports. + Note that port level settings will override the destination-level + settings. Traffic settings specified at the destination-level + will not be inherited when overridden by port-level settings, + i.e. default values will be applied to fields omitted + in port-level traffic policies. + items: + description: Traffic policies that apply to specific ports + of the service + properties: + connection_pool: + description: Settings controlling the volume of connections + to an upstream service + properties: + http: + description: HTTP connection pool settings. + properties: + h2_upgrade_policy: + description: Specify if http1.1 connection + should be upgraded to http2 for the associated + destination. + format: int32 + type: integer + http1_max_pending_requests: + description: Maximum number of pending HTTP + requests to a destination. Default 2^32-1. + format: int32 + type: integer + http2_max_requests: + description: Maximum number of requests to + a backend. Default 2^32-1. + format: int32 + type: integer + idle_timeout: + description: The idle timeout for upstream + connection pool connections. The idle timeout + is defined as the period in which there + are no active requests. If not set, there + is no idle timeout. When the idle timeout + is reached the connection will be closed. + Note that request based timeouts mean that + HTTP/2 PINGs will not keep the connection + alive. Applies to both HTTP1.1 and HTTP2 + connections. + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span + of time. Durations less than one second + are represented with a 0 `seconds` field + and a positive or negative `nanos` field. + For durations of one second or more, + a non-zero value for the `nanos` field + must be of the same sign as the `seconds` + field. Must be from -999,999,999 to + +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span + of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: 60 sec/min + * 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + max_requests_per_connection: + description: Maximum number of requests per + connection to a backend. Setting this parameter + to 1 disables keep alive. Default 0, meaning + "unlimited", up to 2^29. + format: int32 + type: integer + max_retries: + description: Maximum number of retries that + can be outstanding to all hosts in a cluster + at a given time. Defaults to 2^32-1. + format: int32 + type: integer + type: object + tcp: + description: Settings common to both HTTP and + TCP upstream connections. + properties: + connect_timeout: + description: TCP connection timeout. + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span + of time. Durations less than one second + are represented with a 0 `seconds` field + and a positive or negative `nanos` field. + For durations of one second or more, + a non-zero value for the `nanos` field + must be of the same sign as the `seconds` + field. Must be from -999,999,999 to + +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span + of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: 60 sec/min + * 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + max_connections: + description: Maximum number of HTTP1 /TCP + connections to a destination host. Default + 2^32-1. + format: int32 + type: integer + tcp_keepalive: + description: If set then set SO_KEEPALIVE + on the socket to enable TCP Keepalives. + properties: + interval: + description: The time duration between + keep-alive probes. Default is to use + the OS level configuration (unless overridden, + Linux defaults to 75s.) + properties: + nanos: + description: Signed fractions of a + second at nanosecond resolution + of the span of time. Durations less + than one second are represented + with a 0 `seconds` field and a positive + or negative `nanos` field. For durations + of one second or more, a non-zero + value for the `nanos` field must + be of the same sign as the `seconds` + field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the + span of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + probes: + description: Maximum number of keepalive + probes to send without response before + deciding the connection is dead. Default + is to use the OS level configuration + (unless overridden, Linux defaults to + 9.) + format: int32 + type: integer + time: + description: The time duration a connection + needs to be idle before keep-alive probes + start being sent. Default is to use + the OS level configuration (unless overridden, + Linux defaults to 7200s (ie 2 hours.) + properties: + nanos: + description: Signed fractions of a + second at nanosecond resolution + of the span of time. Durations less + than one second are represented + with a 0 `seconds` field and a positive + or negative `nanos` field. For durations + of one second or more, a non-zero + value for the `nanos` field must + be of the same sign as the `seconds` + field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the + span of time. Must be from -315,576,000,000 + to +315,576,000,000 inclusive. Note: + these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + type: object + type: object + load_balancer: + description: Settings controlling the load balancer + algorithms. + type: object + outlier_detection: + description: Settings controlling eviction of unhealthy + hosts from the load balancing pool + properties: + base_ejection_time: + description: 'Minimum ejection duration. A host + will remain ejected for a period equal to the + product of minimum ejection duration and the + number of times the host has been ejected. This + technique allows the system to automatically + increase the ejection period for unhealthy upstream + servers. format: 1h/1m/1s/1ms. MUST BE >=1ms. + Default is 30s.' + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span of + time. Durations less than one second are + represented with a 0 `seconds` field and + a positive or negative `nanos` field. For + durations of one second or more, a non-zero + value for the `nanos` field must be of the + same sign as the `seconds` field. Must be + from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of + time. Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed + from: 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + consecutive_errors: + description: Number of errors before a host is + ejected from the connection pool. Defaults to + 5. When the upstream host is accessed over HTTP, + a 502, 503, or 504 return code qualifies as + an error. When the upstream host is accessed + over an opaque TCP connection, connect timeouts + and connection error/failure events qualify + as an error. + format: int32 + type: integer + interval: + description: 'Time interval between ejection sweep + analysis. format: 1h/1m/1s/1ms. MUST BE >=1ms. + Default is 10s.' + properties: + nanos: + description: Signed fractions of a second + at nanosecond resolution of the span of + time. Durations less than one second are + represented with a 0 `seconds` field and + a positive or negative `nanos` field. For + durations of one second or more, a non-zero + value for the `nanos` field must be of the + same sign as the `seconds` field. Must be + from -999,999,999 to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of + time. Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed + from: 60 sec/min * 60 min/hr * 24 hr/day + * 365.25 days/year * 10000 years' + format: int64 + type: integer + type: object + max_ejection_percent: + description: Maximum % of hosts in the load balancing + pool for the upstream service that can be ejected. + Defaults to 10%. + format: int32 + type: integer + min_health_percent: + description: Outlier detection will be enabled + as long as the associated load balancing pool + has at least min_health_percent hosts in healthy + mode. When the percentage of healthy hosts in + the load balancing pool drops below this threshold, + outlier detection will be disabled and the proxy + will load balance across all hosts in the pool + (healthy and unhealthy). The threshold can be + disabled by setting it to 0%. The default is + 0% as it's not typically applicable in k8s environments + with few pods per service. + format: int32 + type: integer + type: object + port: + description: Specifies the number of a port on the + destination service on which this policy is being + applied. + properties: + number: + description: Valid port number + format: int32 + type: integer + type: object + tls: + description: TLS related settings for connections + to the upstream service. + properties: + ca_certificates: + description: 'OPTIONAL: The path to the file containing + certificate authority certificates to use in + verifying a presented server certificate. If + omitted, the proxy will not verify the server''s + certificate. Should be empty if mode is `ISTIO_MUTUAL`.' + type: string + client_certificate: + description: REQUIRED if mode is `MUTUAL`. The + path to the file holding the client-side TLS + certificate to use. Should be empty if mode + is `ISTIO_MUTUAL`. + type: string + mode: + description: Indicates whether connections to + this port should be secured using TLS. The value + of this field determines how TLS is enforced. + format: int32 + type: integer + private_key: + description: REQUIRED if mode is `MUTUAL`. The + path to the file holding the client's private + key. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the server + during TLS handshake. + type: string + subject_alt_names: + description: A list of alternate names to verify + the subject identity in the certificate. If + specified, the proxy will verify that the server + certificate's subject alt name matches one of + the specified values. If specified, this list + overrides the value of subject_alt_names from + the ServiceEntry. + items: + type: string + type: array + type: object + type: object + type: array + tls: + description: TLS related settings for connections to the + upstream service. + properties: + ca_certificates: + description: 'OPTIONAL: The path to the file containing + certificate authority certificates to use in verifying + a presented server certificate. If omitted, the proxy + will not verify the server''s certificate. Should + be empty if mode is `ISTIO_MUTUAL`.' + type: string + client_certificate: + description: REQUIRED if mode is `MUTUAL`. The path + to the file holding the client-side TLS certificate + to use. Should be empty if mode is `ISTIO_MUTUAL`. + type: string + mode: + description: Indicates whether connections to this port + should be secured using TLS. The value of this field + determines how TLS is enforced. + format: int32 + type: integer + private_key: + description: REQUIRED if mode is `MUTUAL`. The path + to the file holding the client's private key. Should + be empty if mode is `ISTIO_MUTUAL`. + type: string + sni: + description: SNI string to present to the server during + TLS handshake. + type: string + subject_alt_names: + description: A list of alternate names to verify the + subject identity in the certificate. If specified, + the proxy will verify that the server certificate's + subject alt name matches one of the specified values. + If specified, this list overrides the value of subject_alt_names + from the ServiceEntry. + items: + type: string + type: array + type: object + type: object + type: object + type: object + type: object + status: + description: ServicePolicyStatus defines the observed state of ServicePolicy + properties: + completionTime: + description: Represents time when the strategy was completed. It is + represented in RFC3339 form and is in UTC. + format: date-time + type: string + conditions: + description: The latest available observations of an object's current + state. + items: + description: StrategyCondition describes current state of a strategy. + properties: + lastProbeTime: + description: Last time the condition was checked. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transit from one status to + another + format: date-time + type: string + message: + description: Human readable message indicating details about last + transition. + type: string + reason: + description: reason for the condition's last transition + type: string + status: + description: Status of the condition, one of True, False, Unknown + type: string + type: + description: Type of strategy condition, Complete or Failed. + type: string + type: object + type: array + startTime: + description: Represents time when the strategy was acknowledged by the + controller. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/servicemesh.kubesphere.io_strategies.yaml b/config/crd/bases/servicemesh.kubesphere.io_strategies.yaml new file mode 100644 index 000000000..a02650a35 --- /dev/null +++ b/config/crd/bases/servicemesh.kubesphere.io_strategies.yaml @@ -0,0 +1,1166 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: strategies.servicemesh.kubesphere.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.type + description: type of strategy + name: Type + type: string + - JSONPath: .spec.template.spec.hosts + description: destination hosts + name: Hosts + type: string + - JSONPath: .metadata.creationTimestamp + description: 'CreationTimestamp is a timestamp representing the server time when + this object was created. It is not guaranteed to be set in happens-before order + across separate operations. Clients may not set this value. It is represented + in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for + lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' + name: Age + type: date + group: servicemesh.kubesphere.io + names: + kind: Strategy + listKind: StrategyList + plural: strategies + singular: strategy + scope: Namespaced + subresources: {} + validation: + openAPIV3Schema: + description: Strategy is the Schema for the strategies API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StrategySpec defines the desired state of Strategy + properties: + governor: + description: Governor version, the version takes control of all incoming + traffic label version value + type: string + principal: + description: Principal version, the one as reference version label version + value + type: string + selector: + description: Label selector for virtual services. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + strategyPolicy: + description: strategy policy, how the strategy will be applied by the + strategy controller + type: string + template: + description: Template describes the virtual service that will be created. + properties: + metadata: + description: Metadata of the virtual services created from this + template + type: object + spec: + description: Spec indicates the behavior of a virtual service. + properties: + export_to: + description: "A list of namespaces to which this virtual service + is exported. Exporting a virtual service allows it to be used + by sidecars and gateways defined in other namespaces. This + feature provides a mechanism for service owners and mesh administrators + to control the visibility of virtual services across namespace + boundaries. \n If no namespaces are specified then the virtual + service is exported to all namespaces by default. \n The value + \".\" is reserved and defines an export to the same namespace + that the virtual service is declared in. Similarly the value + \"*\" is reserved and defines an export to all namespaces. + \n NOTE: in the current release, the `exportTo` value is restricted + to \".\" or \"*\" (i.e., the current namespace or all namespaces)." + items: + type: string + type: array + gateways: + description: The names of gateways and sidecars that should + apply these routes. A single VirtualService is used for sidecars + inside the mesh as well as for one or more gateways. The selection + condition imposed by this field can be overridden using the + source field in the match conditions of protocol-specific + routes. The reserved word `mesh` is used to imply all the + sidecars in the mesh. When this field is omitted, the default + gateway (`mesh`) will be used, which would apply the rule + to all sidecars in the mesh. If a list of gateway names is + provided, the rules will apply only to the gateways. To apply + the rules to both gateways and sidecars, specify `mesh` as + one of the gateway names. + items: + type: string + type: array + hosts: + description: "The destination hosts to which traffic is being + sent. Could be a DNS name with wildcard prefix or an IP address. + \ Depending on the platform, short-names can also be used + instead of a FQDN (i.e. has no dots in the name). In such + a scenario, the FQDN of the host would be derived based on + the underlying platform. \n A single VirtualService can be + used to describe all the traffic properties of the corresponding + hosts, including those for multiple HTTP and TCP ports. Alternatively, + the traffic properties of a host can be defined using more + than one VirtualService, with certain caveats. Refer to the + [Operations Guide](https://istio.io/docs/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host) + for details. \n *Note for Kubernetes users*: When short names + are used (e.g. \"reviews\" instead of \"reviews.default.svc.cluster.local\"), + Istio will interpret the short name based on the namespace + of the rule, not the service. A rule in the \"default\" namespace + containing a host \"reviews\" will be interpreted as \"reviews.default.svc.cluster.local\", + irrespective of the actual namespace associated with the reviews + service. _To avoid potential misconfigurations, it is recommended + to always use fully qualified domain names over short names._ + \n The hosts field applies to both HTTP and TCP services. + Service inside the mesh, i.e., those found in the service + registry, must always be referred to using their alphanumeric + names. IP addresses are allowed only for services defined + via the Gateway." + items: + type: string + type: array + http: + description: An ordered list of route rules for HTTP traffic. + HTTP routes will be applied to platform service ports named + 'http-*'/'http2-*'/'grpc-*', gateway ports with protocol HTTP/HTTP2/GRPC/ + TLS-terminated-HTTPS and service entry ports using HTTP/HTTP2/GRPC + protocols. The first rule matching an incoming request is + used. + items: + description: Describes match conditions and actions for routing + HTTP/1.1, HTTP2, and gRPC traffic. See VirtualService for + usage examples. + properties: + append_headers: + additionalProperties: + type: string + description: $hide_from_docs + type: object + append_request_headers: + additionalProperties: + type: string + description: $hide_from_docs + type: object + append_response_headers: + additionalProperties: + type: string + description: $hide_from_docs + type: object + cors_policy: + description: Cross-Origin Resource Sharing policy (CORS). + Refer to [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) + for further details about cross origin resource sharing. + properties: + allow_credentials: + description: Indicates whether the caller is allowed + to send the actual request (not the preflight) using + credentials. Translates to `Access-Control-Allow-Credentials` + header. + properties: + value: + description: The bool value. + type: boolean + type: object + allow_headers: + description: List of HTTP headers that can be used + when requesting the resource. Serialized to Access-Control-Allow-Headers + header. + items: + type: string + type: array + allow_methods: + description: List of HTTP methods allowed to access + the resource. The content will be serialized into + the Access-Control-Allow-Methods header. + items: + type: string + type: array + allow_origin: + description: The list of origins that are allowed + to perform CORS requests. The content will be serialized + into the Access-Control-Allow-Origin header. Wildcard + * will allow all origins. + items: + type: string + type: array + expose_headers: + description: A white list of HTTP headers that the + browsers are allowed to access. Serialized into + Access-Control-Expose-Headers header. + items: + type: string + type: array + max_age: + description: Specifies how long the results of a preflight + request can be cached. Translates to the `Access-Control-Max-Age` + header. + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less + than one second are represented with a 0 `seconds` + field and a positive or negative `nanos` field. + For durations of one second or more, a non-zero + value for the `nanos` field must be of the same + sign as the `seconds` field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. + Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day * 365.25 + days/year * 10000 years' + format: int64 + type: integer + type: object + type: object + fault: + description: Fault injection policy to apply on HTTP traffic + at the client side. Note that timeouts or retries will + not be enabled when faults are enabled on the client + side. + properties: + abort: + description: Abort Http request attempts and return + error codes back to downstream service, giving the + impression that the upstream service is faulty. + properties: + percent: + description: Percentage of requests to be aborted + with the error code provided (0-100). Use of + integer `percent` value is deprecated. Use the + double `percentage` field instead. + format: int32 + type: integer + percentage: + description: Percentage of requests to be aborted + with the error code provided. + properties: + value: {} + type: object + type: object + delay: + description: Delay requests before forwarding, emulating + various failures such as network issues, overloaded + upstream service, etc. + properties: + percent: + description: Percentage of requests on which the + delay will be injected (0-100). Use of integer + `percent` value is deprecated. Use the double + `percentage` field instead. + format: int32 + type: integer + percentage: + description: Percentage of requests on which the + delay will be injected. + properties: + value: {} + type: object + type: object + type: object + headers: + description: Header manipulation rules + properties: + request: + description: Header manipulation rules to apply before + forwarding a request to the destination service + properties: + add: + additionalProperties: + type: string + description: Append the given values to the headers + specified by keys (will create a comma-separated + list of values) + type: object + remove: + description: Remove a the specified headers + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Overwrite the headers specified by + key with the given values + type: object + type: object + response: + description: Header manipulation rules to apply before + returning a response to the caller + properties: + add: + additionalProperties: + type: string + description: Append the given values to the headers + specified by keys (will create a comma-separated + list of values) + type: object + remove: + description: Remove a the specified headers + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Overwrite the headers specified by + key with the given values + type: object + type: object + type: object + match: + description: Match conditions to be satisfied for the + rule to be activated. All conditions inside a single + match block have AND semantics, while the list of match + blocks have OR semantics. The rule is matched if any + one of the match blocks succeed. + items: + description: "HttpMatchRequest specifies a set of criterion + to be met in order for the rule to be applied to the + HTTP request. For example, the following restricts + the rule to match only requests where the URL path + starts with /ratings/v2/ and the request contains + a custom `end-user` header with value `jason`. \n + ```yaml apiVersion: networking.istio.io/v1alpha3 kind: + VirtualService metadata: name: ratings-route spec: + \ hosts: - ratings.prod.svc.cluster.local http: + \ - match: - headers: end-user: exact: + jason uri: prefix: \"/ratings/v2/\" + \ ignoreUriCase: true route: - destination: + \ host: ratings.prod.svc.cluster.local ``` + \n HTTPMatchRequest CANNOT be empty." + properties: + authority: + description: "HTTP Authority values are case-sensitive + and formatted as follows: \n - `exact: \"value\"` + for exact string match \n - `prefix: \"value\"` + for prefix-based match \n - `regex: \"value\"` + for ECMAscript style regex-based match" + type: object + gateways: + description: $hide_from_docs + items: + type: string + type: array + headers: + description: "The header keys must be lowercase + and use hyphen as the separator, e.g. _x-request-id_. + \n Header values are case-sensitive and formatted + as follows: \n - `exact: \"value\"` for exact + string match \n - `prefix: \"value\"` for prefix-based + match \n - `regex: \"value\"` for ECMAscript style + regex-based match \n **Note:** The keys `uri`, + `scheme`, `method`, and `authority` will be ignored." + ignore_uri_case: + description: "Flag to specify whether the URI matching + should be case-insensitive. \n **Note:** The case + will be ignored only in the case of `exact` and + `prefix` URI matches." + type: boolean + method: + description: "HTTP Method values are case-sensitive + and formatted as follows: \n - `exact: \"value\"` + for exact string match \n - `prefix: \"value\"` + for prefix-based match \n - `regex: \"value\"` + for ECMAscript style regex-based match" + type: object + name: + description: The name assigned to a match. The match's + name will be concatenated with the parent route's + name and will be logged in the access logs for + requests matching this route. + type: string + port: + description: Specifies the ports on the host that + is being addressed. Many services only expose + a single port or label ports with the protocols + they support, in these cases it is not required + to explicitly select the port. + format: int32 + type: integer + query_params: + description: "Query parameters for matching. \n + Ex: - For a query parameter like \"?key=true\", + the map key would be \"key\" and the string + match could be defined as `exact: \"true\"`. - + For a query parameter like \"?key\", the map key + would be \"key\" and the string match could + be defined as `exact: \"\"`. - For a query parameter + like \"?key=123\", the map key would be \"key\" + and the string match could be defined as `regex: + \"\\d+$\"`. Note that this configuration will + only match values like \"123\" but not \"a123\" + or \"123a\". \n **Note:** `prefix` matching is + currently not supported." + scheme: + description: "URI Scheme values are case-sensitive + and formatted as follows: \n - `exact: \"value\"` + for exact string match \n - `prefix: \"value\"` + for prefix-based match \n - `regex: \"value\"` + for ECMAscript style regex-based match" + type: object + source_labels: + additionalProperties: + type: string + description: One or more labels that constrain the + applicability of a rule to workloads with the + given labels. If the VirtualService has a list + of gateways specified at the top, it must include + the reserved gateway `mesh` for this field to + be applicable. + type: object + uri: + description: "URI to match values are case-sensitive + and formatted as follows: \n - `exact: \"value\"` + for exact string match \n - `prefix: \"value\"` + for prefix-based match \n - `regex: \"value\"` + for ECMAscript style regex-based match \n **Note:** + Case-insensitive matching could be enabled via + the `ignore_uri_case` flag." + type: object + type: object + type: array + mirror: + description: Mirror HTTP traffic to a another destination + in addition to forwarding the requests to the intended + destination. Mirrored traffic is on a best effort basis + where the sidecar/gateway will not wait for the mirrored + cluster to respond before returning the response from + the original destination. Statistics will be generated + for the mirrored destination. + properties: + host: + description: "The name of a service from the service + registry. Service names are looked up from the platform's + service registry (e.g., Kubernetes services, Consul + services, etc.) and from the hosts declared by [ServiceEntry](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry). + Traffic forwarded to destinations that are not found + in either of the two, will be dropped. \n *Note + for Kubernetes users*: When short names are used + (e.g. \"reviews\" instead of \"reviews.default.svc.cluster.local\"), + Istio will interpret the short name based on the + namespace of the rule, not the service. A rule in + the \"default\" namespace containing a host \"reviews + will be interpreted as \"reviews.default.svc.cluster.local\", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential misconfigurations, + it is recommended to always use fully qualified + domain names over short names._" + type: string + port: + description: Specifies the port on the host that is + being addressed. If a service exposes only a single + port it is not required to explicitly select the + port. + properties: + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the service. + Applicable only to services within the mesh. The + subset must be defined in a corresponding DestinationRule. + type: string + type: object + mirror_percent: + description: Percentage of the traffic to be mirrored + by the `mirror` field. If this field is absent, all + the traffic (100%) will be mirrored. Max value is 100. + properties: + value: + description: The uint32 value. + format: int32 + type: integer + type: object + name: + description: The name assigned to the route for debugging + purposes. The route's name will be concatenated with + the match's name and will be logged in the access logs + for requests matching this route/match. + type: string + redirect: + description: A http rule can either redirect or forward + (default) traffic. If traffic passthrough option is + specified in the rule, route/redirect will be ignored. + The redirect primitive can be used to send a HTTP 301 + redirect to a different URI or Authority. + properties: + authority: + description: On a redirect, overwrite the Authority/Host + portion of the URL with this value. + type: string + redirect_code: + description: On a redirect, Specifies the HTTP status + code to use in the redirect response. The default + response code is MOVED_PERMANENTLY (301). + format: int32 + type: integer + uri: + description: On a redirect, overwrite the Path portion + of the URL with this value. Note that the entire + path will be replaced, irrespective of the request + URI being matched as an exact path or prefix. + type: string + type: object + remove_request_headers: + description: $hide_from_docs + items: + type: string + type: array + remove_response_headers: + description: $hide_from_docs + items: + type: string + type: array + retries: + description: Retry policy for HTTP requests. + properties: + attempts: + description: Number of retries for a given request. + The interval between retries will be determined + automatically (25ms+). Actual number of retries + attempted depends on the httpReqTimeout. + format: int32 + type: integer + per_try_timeout: + description: 'Timeout per retry attempt for a given + request. format: 1h/1m/1s/1ms. MUST BE >=1ms.' + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less + than one second are represented with a 0 `seconds` + field and a positive or negative `nanos` field. + For durations of one second or more, a non-zero + value for the `nanos` field must be of the same + sign as the `seconds` field. Must be from -999,999,999 + to +999,999,999 inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. + Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day * 365.25 + days/year * 10000 years' + format: int64 + type: integer + type: object + retry_on: + description: Specifies the conditions under which + retry takes place. One or more policies can be specified + using a ‘,’ delimited list. See the [retry policies](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-on) + and [gRPC retry policies](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-grpc-on) + for more details. + type: string + type: object + rewrite: + description: Rewrite HTTP URIs and Authority headers. + Rewrite cannot be used with Redirect primitive. Rewrite + will be performed before forwarding. + properties: + authority: + description: rewrite the Authority/Host header with + this value. + type: string + uri: + description: rewrite the path (or the prefix) portion + of the URI with this value. If the original URI + was matched based on prefix, the value provided + in this field will replace the corresponding matched + prefix. + type: string + type: object + route: + description: A http rule can either redirect or forward + (default) traffic. The forwarding target can be one + of several versions of a service (see glossary in beginning + of document). Weights associated with the service version + determine the proportion of traffic it receives. + items: + description: "Each routing rule is associated with one + or more service versions (see glossary in beginning + of document). Weights associated with the version + determine the proportion of traffic it receives. For + example, the following rule will route 25% of traffic + for the \"reviews\" service to instances with the + \"v2\" tag and the remaining traffic (i.e., 75%) to + \"v1\". \n ```yaml apiVersion: networking.istio.io/v1alpha3 + kind: VirtualService metadata: name: reviews-route + spec: hosts: - reviews.prod.svc.cluster.local + \ http: - route: - destination: host: + reviews.prod.svc.cluster.local subset: v2 + \ weight: 25 - destination: host: + reviews.prod.svc.cluster.local subset: v1 + \ weight: 75 ``` \n And the associated DestinationRule + \n ```yaml apiVersion: networking.istio.io/v1alpha3 + kind: DestinationRule metadata: name: reviews-destination + spec: host: reviews.prod.svc.cluster.local subsets: + \ - name: v1 labels: version: v1 - name: + v2 labels: version: v2 ``` \n Traffic can + also be split across two entirely different services + without having to define new subsets. For example, + the following rule forwards 25% of traffic to reviews.com + to dev.reviews.com \n ```yaml apiVersion: networking.istio.io/v1alpha3 + kind: VirtualService metadata: name: reviews-route-two-domains + spec: hosts: - reviews.com http: - route: + \ - destination: host: dev.reviews.com + \ weight: 25 - destination: host: + reviews.com weight: 75 ```" + properties: + append_request_headers: + additionalProperties: + type: string + description: Use of `append_request_headers` is + deprecated. Use the `headers` field instead. + type: object + append_response_headers: + additionalProperties: + type: string + description: Use of `append_response_headers` is + deprecated. Use the `headers` field instead. + type: object + destination: + description: Destination uniquely identifies the + instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: "The name of a service from the + service registry. Service names are looked + up from the platform's service registry (e.g., + Kubernetes services, Consul services, etc.) + and from the hosts declared by [ServiceEntry](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry). + Traffic forwarded to destinations that are + not found in either of the two, will be dropped. + \n *Note for Kubernetes users*: When short + names are used (e.g. \"reviews\" instead of + \"reviews.default.svc.cluster.local\"), Istio + will interpret the short name based on the + namespace of the rule, not the service. A + rule in the \"default\" namespace containing + a host \"reviews will be interpreted as \"reviews.default.svc.cluster.local\", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential + misconfigurations, it is recommended to always + use fully qualified domain names over short + names._" + type: string + port: + description: Specifies the port on the host + that is being addressed. If a service exposes + only a single port it is not required to explicitly + select the port. + properties: + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the + service. Applicable only to services within + the mesh. The subset must be defined in a + corresponding DestinationRule. + type: string + type: object + headers: + description: Header manipulation rules + properties: + request: + description: Header manipulation rules to apply + before forwarding a request to the destination + service + properties: + add: + additionalProperties: + type: string + description: Append the given values to + the headers specified by keys (will create + a comma-separated list of values) + type: object + remove: + description: Remove a the specified headers + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Overwrite the headers specified + by key with the given values + type: object + type: object + response: + description: Header manipulation rules to apply + before returning a response to the caller + properties: + add: + additionalProperties: + type: string + description: Append the given values to + the headers specified by keys (will create + a comma-separated list of values) + type: object + remove: + description: Remove a the specified headers + items: + type: string + type: array + set: + additionalProperties: + type: string + description: Overwrite the headers specified + by key with the given values + type: object + type: object + type: object + remove_request_headers: + description: Use of `remove_request_headers` is + deprecated. Use the `headers` field instead. + items: + type: string + type: array + remove_response_headers: + description: Use of `remove_response_header` is + deprecated. Use the `headers` field instead. + items: + type: string + type: array + weight: + description: The proportion of traffic to be forwarded + to the service version. (0-100). Sum of weights + across destinations SHOULD BE == 100. If there + is only one destination in a rule, the weight + value is assumed to be 100. + format: int32 + type: integer + type: object + type: array + timeout: + description: Timeout for HTTP requests. + properties: + nanos: + description: Signed fractions of a second at nanosecond + resolution of the span of time. Durations less than + one second are represented with a 0 `seconds` field + and a positive or negative `nanos` field. For durations + of one second or more, a non-zero value for the + `nanos` field must be of the same sign as the `seconds` + field. Must be from -999,999,999 to +999,999,999 + inclusive. + format: int32 + type: integer + seconds: + description: 'Signed seconds of the span of time. + Must be from -315,576,000,000 to +315,576,000,000 + inclusive. Note: these bounds are computed from: + 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year + * 10000 years' + format: int64 + type: integer + type: object + websocket_upgrade: + description: Deprecated. Websocket upgrades are done automatically + starting from Istio 1.0. $hide_from_docs + type: boolean + type: object + type: array + tcp: + description: An ordered list of route rules for opaque TCP traffic. + TCP routes will be applied to any port that is not a HTTP + or TLS port. The first rule matching an incoming request is + used. + items: + description: "Describes match conditions and actions for routing + TCP traffic. The following routing rule forwards traffic + arriving at port 27017 for mongo.prod.svc.cluster.local + to another Mongo server on port 5555. \n ```yaml apiVersion: + networking.istio.io/v1alpha3 kind: VirtualService metadata: + \ name: bookinfo-Mongo spec: hosts: - mongo.prod.svc.cluster.local + \ tcp: - match: - port: 27017 route: - destination: + \ host: mongo.backup.svc.cluster.local port: + \ number: 5555 ```" + properties: + match: + description: Match conditions to be satisfied for the + rule to be activated. All conditions inside a single + match block have AND semantics, while the list of match + blocks have OR semantics. The rule is matched if any + one of the match blocks succeed. + items: + description: L4 connection match attributes. Note that + L4 connection matching support is incomplete. + properties: + destination_subnets: + description: IPv4 or IPv6 ip addresses of destination + with optional subnet. E.g., a.b.c.d/xx form or + just a.b.c.d. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should + be applied to. Gateway names at the top of the + VirtualService (if any) are overridden. The gateway + match is independent of sourceLabels. + items: + type: string + type: array + port: + description: Specifies the port on the host that + is being addressed. Many services only expose + a single port or label ports with the protocols + they support, in these cases it is not required + to explicitly select the port. + format: int32 + type: integer + source_labels: + additionalProperties: + type: string + description: One or more labels that constrain the + applicability of a rule to workloads with the + given labels. If the VirtualService has a list + of gateways specified at the top, it should include + the reserved gateway `mesh` in order for this + field to be applicable. + type: object + source_subnet: + description: IPv4 or IPv6 ip address of source with + optional subnet. E.g., a.b.c.d/xx form or just + a.b.c.d $hide_from_docs + type: string + type: object + type: array + route: + description: The destination to which the connection should + be forwarded to. + items: + description: L4 routing rule weighted destination. + properties: + destination: + description: Destination uniquely identifies the + instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: "The name of a service from the + service registry. Service names are looked + up from the platform's service registry (e.g., + Kubernetes services, Consul services, etc.) + and from the hosts declared by [ServiceEntry](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry). + Traffic forwarded to destinations that are + not found in either of the two, will be dropped. + \n *Note for Kubernetes users*: When short + names are used (e.g. \"reviews\" instead of + \"reviews.default.svc.cluster.local\"), Istio + will interpret the short name based on the + namespace of the rule, not the service. A + rule in the \"default\" namespace containing + a host \"reviews will be interpreted as \"reviews.default.svc.cluster.local\", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential + misconfigurations, it is recommended to always + use fully qualified domain names over short + names._" + type: string + port: + description: Specifies the port on the host + that is being addressed. If a service exposes + only a single port it is not required to explicitly + select the port. + properties: + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the + service. Applicable only to services within + the mesh. The subset must be defined in a + corresponding DestinationRule. + type: string + type: object + weight: + description: The proportion of traffic to be forwarded + to the service version. If there is only one destination + in a rule, all traffic will be routed to it irrespective + of the weight. + format: int32 + type: integer + type: object + type: array + type: object + type: array + tls: + description: 'An ordered list of route rule for non-terminated + TLS & HTTPS traffic. Routing is typically performed using + the SNI value presented by the ClientHello message. TLS routes + will be applied to platform service ports named ''https-*'', + ''tls-*'', unterminated gateway ports using HTTPS/TLS protocols + (i.e. with "passthrough" TLS mode) and service entry ports + using HTTPS/TLS protocols. The first rule matching an incoming + request is used. NOTE: Traffic ''https-*'' or ''tls-*'' ports + without associated virtual service will be treated as opaque + TCP traffic.' + items: + description: "Describes match conditions and actions for routing + unterminated TLS traffic (TLS/HTTPS) The following routing + rule forwards unterminated TLS traffic arriving at port + 443 of gateway called \"mygateway\" to internal services + in the mesh based on the SNI value. \n ```yaml apiVersion: + networking.istio.io/v1alpha3 kind: VirtualService metadata: + \ name: bookinfo-sni spec: hosts: - \"*.bookinfo.com\" + \ gateways: - mygateway tls: - match: - port: + 443 sniHosts: - login.bookinfo.com route: + \ - destination: host: login.prod.svc.cluster.local + \ - match: - port: 443 sniHosts: - reviews.bookinfo.com + \ route: - destination: host: reviews.prod.svc.cluster.local + ```" + properties: + match: + description: Match conditions to be satisfied for the + rule to be activated. All conditions inside a single + match block have AND semantics, while the list of match + blocks have OR semantics. The rule is matched if any + one of the match blocks succeed. + items: + description: TLS connection match attributes. + properties: + destination_subnets: + description: IPv4 or IPv6 ip addresses of destination + with optional subnet. E.g., a.b.c.d/xx form or + just a.b.c.d. + items: + type: string + type: array + gateways: + description: Names of gateways where the rule should + be applied to. Gateway names at the top of the + VirtualService (if any) are overridden. The gateway + match is independent of sourceLabels. + items: + type: string + type: array + port: + description: Specifies the port on the host that + is being addressed. Many services only expose + a single port or label ports with the protocols + they support, in these cases it is not required + to explicitly select the port. + format: int32 + type: integer + sni_hosts: + description: SNI (server name indicator) to match + on. Wildcard prefixes can be used in the SNI value, + e.g., *.com will match foo.example.com as well + as example.com. An SNI value must be a subset + (i.e., fall within the domain) of the corresponding + virtual serivce's hosts. + items: + type: string + type: array + source_labels: + additionalProperties: + type: string + description: One or more labels that constrain the + applicability of a rule to workloads with the + given labels. If the VirtualService has a list + of gateways specified at the top, it should include + the reserved gateway `mesh` in order for this + field to be applicable. + type: object + source_subnet: + description: IPv4 or IPv6 ip address of source with + optional subnet. E.g., a.b.c.d/xx form or just + a.b.c.d $hide_from_docs + type: string + type: object + type: array + route: + description: The destination to which the connection should + be forwarded to. + items: + description: L4 routing rule weighted destination. + properties: + destination: + description: Destination uniquely identifies the + instances of a service to which the request/connection + should be forwarded to. + properties: + host: + description: "The name of a service from the + service registry. Service names are looked + up from the platform's service registry (e.g., + Kubernetes services, Consul services, etc.) + and from the hosts declared by [ServiceEntry](https://istio.io/docs/reference/config/networking/service-entry/#ServiceEntry). + Traffic forwarded to destinations that are + not found in either of the two, will be dropped. + \n *Note for Kubernetes users*: When short + names are used (e.g. \"reviews\" instead of + \"reviews.default.svc.cluster.local\"), Istio + will interpret the short name based on the + namespace of the rule, not the service. A + rule in the \"default\" namespace containing + a host \"reviews will be interpreted as \"reviews.default.svc.cluster.local\", + irrespective of the actual namespace associated + with the reviews service. _To avoid potential + misconfigurations, it is recommended to always + use fully qualified domain names over short + names._" + type: string + port: + description: Specifies the port on the host + that is being addressed. If a service exposes + only a single port it is not required to explicitly + select the port. + properties: + number: + description: Valid port number + format: int32 + type: integer + type: object + subset: + description: The name of a subset within the + service. Applicable only to services within + the mesh. The subset must be defined in a + corresponding DestinationRule. + type: string + type: object + weight: + description: The proportion of traffic to be forwarded + to the service version. If there is only one destination + in a rule, all traffic will be routed to it irrespective + of the weight. + format: int32 + type: integer + type: object + type: array + type: object + type: array + type: object + type: object + type: + description: Strategy type + type: string + type: object + status: + description: StrategyStatus defines the observed state of Strategy + properties: + completionTime: + description: Represents time when the strategy was completed. It is + represented in RFC3339 form and is in UTC. + format: date-time + type: string + conditions: + description: The latest available observations of an object's current + state. + items: + description: StrategyCondition describes current state of a strategy. + properties: + lastProbeTime: + description: Last time the condition was checked. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transit from one status to + another + format: date-time + type: string + message: + description: Human readable message indicating details about last + transition. + type: string + reason: + description: reason for the condition's last transition + type: string + status: + description: Status of the condition, one of True, False, Unknown + type: string + type: + description: Type of strategy condition, Complete or Failed. + type: string + type: object + type: array + startTime: + description: Represents time when the strategy was acknowledged by the + controller. It is represented in RFC3339 form and is in UTC. + format: date-time + type: string + type: object + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/bases/tenant.kubesphere.io_workspaces.yaml b/config/crd/bases/tenant.kubesphere.io_workspaces.yaml new file mode 100644 index 000000000..2eb730876 --- /dev/null +++ b/config/crd/bases/tenant.kubesphere.io_workspaces.yaml @@ -0,0 +1,54 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: workspaces.tenant.kubesphere.io +spec: + group: tenant.kubesphere.io + names: + kind: Workspace + listKind: WorkspaceList + plural: workspaces + singular: workspace + scope: Namespaced + validation: + openAPIV3Schema: + description: Workspace is the Schema for the workspaces API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + manager: + type: string + type: object + status: + description: WorkspaceStatus defines the observed state of Workspace + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/hack/generate_client.sh b/hack/generate_client.sh index 75b799bc8..1c908ec0c 100755 --- a/hack/generate_client.sh +++ b/hack/generate_client.sh @@ -1,7 +1,7 @@ #!/bin/bash set -e -GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 devops:v1alpha1 iam:v1alpha2 tower:v1alpha1" +GV="network:v1alpha1 servicemesh:v1alpha2 tenant:v1alpha1 devops:v1alpha1 iam:v1alpha2 cluster:v1alpha1" rm -rf ./pkg/client ./hack/generate_group.sh "client,lister,informer" kubesphere.io/kubesphere/pkg/client kubesphere.io/kubesphere/pkg/apis "$GV" --output-base=./ -h "$PWD/hack/boilerplate.go.txt" diff --git a/pkg/apis/addtoscheme_tower_v1alpha1.go b/pkg/apis/addtoscheme_cluster_v1alpha1.go similarity index 68% rename from pkg/apis/addtoscheme_tower_v1alpha1.go rename to pkg/apis/addtoscheme_cluster_v1alpha1.go index e952df5a0..8722b3516 100644 --- a/pkg/apis/addtoscheme_tower_v1alpha1.go +++ b/pkg/apis/addtoscheme_cluster_v1alpha1.go @@ -1,7 +1,7 @@ package apis import ( - "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" ) func init() { diff --git a/pkg/apis/cluster/group.go b/pkg/apis/cluster/group.go new file mode 100644 index 000000000..916b1b53b --- /dev/null +++ b/pkg/apis/cluster/group.go @@ -0,0 +1 @@ +package cluster diff --git a/pkg/apis/tower/v1alpha1/agent_types.go b/pkg/apis/cluster/v1alpha1/agent_types.go similarity index 94% rename from pkg/apis/tower/v1alpha1/agent_types.go rename to pkg/apis/cluster/v1alpha1/agent_types.go index f4e928ff4..2be87bbb9 100644 --- a/pkg/apis/tower/v1alpha1/agent_types.go +++ b/pkg/apis/cluster/v1alpha1/agent_types.go @@ -20,6 +20,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + ResourceKindAgent = "Agent" + ResourcesSingularAgent = "agent" + ResourcesPluralAgent = "agents" +) + // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. @@ -46,7 +52,7 @@ type AgentSpec struct { // Indicates that the agent is paused. // +optional - Paused bool + Paused bool `json:"paused,omitempty"` } type AgentConditionType string @@ -86,12 +92,13 @@ type AgentStatus struct { Ping uint64 `json:"ping,omitempty"` // Issued new kubeconfig by proxy server - KubeConfig []byte + KubeConfig []byte `json:"kubeconfig,omitempty"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=true +// +genclient:nonNamespaced // Agent is the Schema for the agents API type Agent struct { diff --git a/pkg/apis/cluster/v1alpha1/cluster_types.go b/pkg/apis/cluster/v1alpha1/cluster_types.go new file mode 100644 index 000000000..834ba7d50 --- /dev/null +++ b/pkg/apis/cluster/v1alpha1/cluster_types.go @@ -0,0 +1,75 @@ +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ResourceKindCluster = "Cluster" + ResourcesSingularCluster = "cluster" + ResourcesPluralCluster = "clusters" +) + +type ClusterSpec struct { + + // Join cluster as kubefed cluster + Federated bool `json:"federated,omitempty"` + + // Desired state of the cluster + Active bool `json:"active,omitempty"` +} + +type ClusterConditionType string + +const ( + // Cluster agent is initialized and waiting for connecting + ClusterAgentInitialized ClusterConditionType = "AgentInitialized" + + // Cluster agent is available + ClusterAgentAvailable ClusterConditionType = "AgentAvailable" + + // + +) + +type ClusterStatus struct { + // Type of the condition + Type ClusterConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +genclient:nonNamespaced + +// Cluster is the schema for the clusters API +type Cluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSpec `json:"spec,omitempty"` + Status ClusterStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Cluster{}, &ClusterList{}) +} diff --git a/pkg/apis/cluster/v1alpha1/doc.go b/pkg/apis/cluster/v1alpha1/doc.go new file mode 100644 index 000000000..4a05e1696 --- /dev/null +++ b/pkg/apis/cluster/v1alpha1/doc.go @@ -0,0 +1,8 @@ +// Package v1alpha1 contains API Schema definitions for the tower v1alpha1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=kubesphere.io/kubesphere/pkg/apis/cluster +// +k8s:defaulter-gen=TypeMeta +// +groupName=cluster.kubesphere.io + +package v1alpha1 diff --git a/pkg/apis/cluster/v1alpha1/openapi_generated.go b/pkg/apis/cluster/v1alpha1/openapi_generated.go new file mode 100644 index 000000000..ecbca0f5d --- /dev/null +++ b/pkg/apis/cluster/v1alpha1/openapi_generated.go @@ -0,0 +1,13697 @@ +// +build !ignore_autogenerated + +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by openapi-gen. DO NOT EDIT. + +// This file was autogenerated by openapi-gen. Do not edit it manually! + +package v1alpha1 + +import ( + spec "github.com/go-openapi/spec" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + common "k8s.io/kube-openapi/pkg/common" +) + +func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { + return map[string]common.OpenAPIDefinition{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref), + "k8s.io/api/core/v1.Affinity": schema_k8sio_api_core_v1_Affinity(ref), + "k8s.io/api/core/v1.AttachedVolume": schema_k8sio_api_core_v1_AttachedVolume(ref), + "k8s.io/api/core/v1.AvoidPods": schema_k8sio_api_core_v1_AvoidPods(ref), + "k8s.io/api/core/v1.AzureDiskVolumeSource": schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref), + "k8s.io/api/core/v1.AzureFilePersistentVolumeSource": schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref), + "k8s.io/api/core/v1.AzureFileVolumeSource": schema_k8sio_api_core_v1_AzureFileVolumeSource(ref), + "k8s.io/api/core/v1.Binding": schema_k8sio_api_core_v1_Binding(ref), + "k8s.io/api/core/v1.CSIPersistentVolumeSource": schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref), + "k8s.io/api/core/v1.CSIVolumeSource": schema_k8sio_api_core_v1_CSIVolumeSource(ref), + "k8s.io/api/core/v1.Capabilities": schema_k8sio_api_core_v1_Capabilities(ref), + "k8s.io/api/core/v1.CephFSPersistentVolumeSource": schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref), + "k8s.io/api/core/v1.CephFSVolumeSource": schema_k8sio_api_core_v1_CephFSVolumeSource(ref), + "k8s.io/api/core/v1.CinderPersistentVolumeSource": schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref), + "k8s.io/api/core/v1.CinderVolumeSource": schema_k8sio_api_core_v1_CinderVolumeSource(ref), + "k8s.io/api/core/v1.ClientIPConfig": schema_k8sio_api_core_v1_ClientIPConfig(ref), + "k8s.io/api/core/v1.ComponentCondition": schema_k8sio_api_core_v1_ComponentCondition(ref), + "k8s.io/api/core/v1.ComponentStatus": schema_k8sio_api_core_v1_ComponentStatus(ref), + "k8s.io/api/core/v1.ComponentStatusList": schema_k8sio_api_core_v1_ComponentStatusList(ref), + "k8s.io/api/core/v1.ConfigMap": schema_k8sio_api_core_v1_ConfigMap(ref), + "k8s.io/api/core/v1.ConfigMapEnvSource": schema_k8sio_api_core_v1_ConfigMapEnvSource(ref), + "k8s.io/api/core/v1.ConfigMapKeySelector": schema_k8sio_api_core_v1_ConfigMapKeySelector(ref), + "k8s.io/api/core/v1.ConfigMapList": schema_k8sio_api_core_v1_ConfigMapList(ref), + "k8s.io/api/core/v1.ConfigMapNodeConfigSource": schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref), + "k8s.io/api/core/v1.ConfigMapProjection": schema_k8sio_api_core_v1_ConfigMapProjection(ref), + "k8s.io/api/core/v1.ConfigMapVolumeSource": schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref), + "k8s.io/api/core/v1.Container": schema_k8sio_api_core_v1_Container(ref), + "k8s.io/api/core/v1.ContainerImage": schema_k8sio_api_core_v1_ContainerImage(ref), + "k8s.io/api/core/v1.ContainerPort": schema_k8sio_api_core_v1_ContainerPort(ref), + "k8s.io/api/core/v1.ContainerState": schema_k8sio_api_core_v1_ContainerState(ref), + "k8s.io/api/core/v1.ContainerStateRunning": schema_k8sio_api_core_v1_ContainerStateRunning(ref), + "k8s.io/api/core/v1.ContainerStateTerminated": schema_k8sio_api_core_v1_ContainerStateTerminated(ref), + "k8s.io/api/core/v1.ContainerStateWaiting": schema_k8sio_api_core_v1_ContainerStateWaiting(ref), + "k8s.io/api/core/v1.ContainerStatus": schema_k8sio_api_core_v1_ContainerStatus(ref), + "k8s.io/api/core/v1.DaemonEndpoint": schema_k8sio_api_core_v1_DaemonEndpoint(ref), + "k8s.io/api/core/v1.DownwardAPIProjection": schema_k8sio_api_core_v1_DownwardAPIProjection(ref), + "k8s.io/api/core/v1.DownwardAPIVolumeFile": schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref), + "k8s.io/api/core/v1.DownwardAPIVolumeSource": schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref), + "k8s.io/api/core/v1.EmptyDirVolumeSource": schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref), + "k8s.io/api/core/v1.EndpointAddress": schema_k8sio_api_core_v1_EndpointAddress(ref), + "k8s.io/api/core/v1.EndpointPort": schema_k8sio_api_core_v1_EndpointPort(ref), + "k8s.io/api/core/v1.EndpointSubset": schema_k8sio_api_core_v1_EndpointSubset(ref), + "k8s.io/api/core/v1.Endpoints": schema_k8sio_api_core_v1_Endpoints(ref), + "k8s.io/api/core/v1.EndpointsList": schema_k8sio_api_core_v1_EndpointsList(ref), + "k8s.io/api/core/v1.EnvFromSource": schema_k8sio_api_core_v1_EnvFromSource(ref), + "k8s.io/api/core/v1.EnvVar": schema_k8sio_api_core_v1_EnvVar(ref), + "k8s.io/api/core/v1.EnvVarSource": schema_k8sio_api_core_v1_EnvVarSource(ref), + "k8s.io/api/core/v1.EphemeralContainer": schema_k8sio_api_core_v1_EphemeralContainer(ref), + "k8s.io/api/core/v1.EphemeralContainerCommon": schema_k8sio_api_core_v1_EphemeralContainerCommon(ref), + "k8s.io/api/core/v1.EphemeralContainers": schema_k8sio_api_core_v1_EphemeralContainers(ref), + "k8s.io/api/core/v1.Event": schema_k8sio_api_core_v1_Event(ref), + "k8s.io/api/core/v1.EventList": schema_k8sio_api_core_v1_EventList(ref), + "k8s.io/api/core/v1.EventSeries": schema_k8sio_api_core_v1_EventSeries(ref), + "k8s.io/api/core/v1.EventSource": schema_k8sio_api_core_v1_EventSource(ref), + "k8s.io/api/core/v1.ExecAction": schema_k8sio_api_core_v1_ExecAction(ref), + "k8s.io/api/core/v1.FCVolumeSource": schema_k8sio_api_core_v1_FCVolumeSource(ref), + "k8s.io/api/core/v1.FlexPersistentVolumeSource": schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref), + "k8s.io/api/core/v1.FlexVolumeSource": schema_k8sio_api_core_v1_FlexVolumeSource(ref), + "k8s.io/api/core/v1.FlockerVolumeSource": schema_k8sio_api_core_v1_FlockerVolumeSource(ref), + "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource": schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref), + "k8s.io/api/core/v1.GitRepoVolumeSource": schema_k8sio_api_core_v1_GitRepoVolumeSource(ref), + "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource": schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref), + "k8s.io/api/core/v1.GlusterfsVolumeSource": schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref), + "k8s.io/api/core/v1.HTTPGetAction": schema_k8sio_api_core_v1_HTTPGetAction(ref), + "k8s.io/api/core/v1.HTTPHeader": schema_k8sio_api_core_v1_HTTPHeader(ref), + "k8s.io/api/core/v1.Handler": schema_k8sio_api_core_v1_Handler(ref), + "k8s.io/api/core/v1.HostAlias": schema_k8sio_api_core_v1_HostAlias(ref), + "k8s.io/api/core/v1.HostPathVolumeSource": schema_k8sio_api_core_v1_HostPathVolumeSource(ref), + "k8s.io/api/core/v1.ISCSIPersistentVolumeSource": schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref), + "k8s.io/api/core/v1.ISCSIVolumeSource": schema_k8sio_api_core_v1_ISCSIVolumeSource(ref), + "k8s.io/api/core/v1.KeyToPath": schema_k8sio_api_core_v1_KeyToPath(ref), + "k8s.io/api/core/v1.Lifecycle": schema_k8sio_api_core_v1_Lifecycle(ref), + "k8s.io/api/core/v1.LimitRange": schema_k8sio_api_core_v1_LimitRange(ref), + "k8s.io/api/core/v1.LimitRangeItem": schema_k8sio_api_core_v1_LimitRangeItem(ref), + "k8s.io/api/core/v1.LimitRangeList": schema_k8sio_api_core_v1_LimitRangeList(ref), + "k8s.io/api/core/v1.LimitRangeSpec": schema_k8sio_api_core_v1_LimitRangeSpec(ref), + "k8s.io/api/core/v1.List": schema_k8sio_api_core_v1_List(ref), + "k8s.io/api/core/v1.LoadBalancerIngress": schema_k8sio_api_core_v1_LoadBalancerIngress(ref), + "k8s.io/api/core/v1.LoadBalancerStatus": schema_k8sio_api_core_v1_LoadBalancerStatus(ref), + "k8s.io/api/core/v1.LocalObjectReference": schema_k8sio_api_core_v1_LocalObjectReference(ref), + "k8s.io/api/core/v1.LocalVolumeSource": schema_k8sio_api_core_v1_LocalVolumeSource(ref), + "k8s.io/api/core/v1.NFSVolumeSource": schema_k8sio_api_core_v1_NFSVolumeSource(ref), + "k8s.io/api/core/v1.Namespace": schema_k8sio_api_core_v1_Namespace(ref), + "k8s.io/api/core/v1.NamespaceCondition": schema_k8sio_api_core_v1_NamespaceCondition(ref), + "k8s.io/api/core/v1.NamespaceList": schema_k8sio_api_core_v1_NamespaceList(ref), + "k8s.io/api/core/v1.NamespaceSpec": schema_k8sio_api_core_v1_NamespaceSpec(ref), + "k8s.io/api/core/v1.NamespaceStatus": schema_k8sio_api_core_v1_NamespaceStatus(ref), + "k8s.io/api/core/v1.Node": schema_k8sio_api_core_v1_Node(ref), + "k8s.io/api/core/v1.NodeAddress": schema_k8sio_api_core_v1_NodeAddress(ref), + "k8s.io/api/core/v1.NodeAffinity": schema_k8sio_api_core_v1_NodeAffinity(ref), + "k8s.io/api/core/v1.NodeCondition": schema_k8sio_api_core_v1_NodeCondition(ref), + "k8s.io/api/core/v1.NodeConfigSource": schema_k8sio_api_core_v1_NodeConfigSource(ref), + "k8s.io/api/core/v1.NodeConfigStatus": schema_k8sio_api_core_v1_NodeConfigStatus(ref), + "k8s.io/api/core/v1.NodeDaemonEndpoints": schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref), + "k8s.io/api/core/v1.NodeList": schema_k8sio_api_core_v1_NodeList(ref), + "k8s.io/api/core/v1.NodeProxyOptions": schema_k8sio_api_core_v1_NodeProxyOptions(ref), + "k8s.io/api/core/v1.NodeResources": schema_k8sio_api_core_v1_NodeResources(ref), + "k8s.io/api/core/v1.NodeSelector": schema_k8sio_api_core_v1_NodeSelector(ref), + "k8s.io/api/core/v1.NodeSelectorRequirement": schema_k8sio_api_core_v1_NodeSelectorRequirement(ref), + "k8s.io/api/core/v1.NodeSelectorTerm": schema_k8sio_api_core_v1_NodeSelectorTerm(ref), + "k8s.io/api/core/v1.NodeSpec": schema_k8sio_api_core_v1_NodeSpec(ref), + "k8s.io/api/core/v1.NodeStatus": schema_k8sio_api_core_v1_NodeStatus(ref), + "k8s.io/api/core/v1.NodeSystemInfo": schema_k8sio_api_core_v1_NodeSystemInfo(ref), + "k8s.io/api/core/v1.ObjectFieldSelector": schema_k8sio_api_core_v1_ObjectFieldSelector(ref), + "k8s.io/api/core/v1.ObjectReference": schema_k8sio_api_core_v1_ObjectReference(ref), + "k8s.io/api/core/v1.PersistentVolume": schema_k8sio_api_core_v1_PersistentVolume(ref), + "k8s.io/api/core/v1.PersistentVolumeClaim": schema_k8sio_api_core_v1_PersistentVolumeClaim(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimCondition": schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimList": schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimSpec": schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimStatus": schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref), + "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref), + "k8s.io/api/core/v1.PersistentVolumeList": schema_k8sio_api_core_v1_PersistentVolumeList(ref), + "k8s.io/api/core/v1.PersistentVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeSource(ref), + "k8s.io/api/core/v1.PersistentVolumeSpec": schema_k8sio_api_core_v1_PersistentVolumeSpec(ref), + "k8s.io/api/core/v1.PersistentVolumeStatus": schema_k8sio_api_core_v1_PersistentVolumeStatus(ref), + "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource": schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref), + "k8s.io/api/core/v1.Pod": schema_k8sio_api_core_v1_Pod(ref), + "k8s.io/api/core/v1.PodAffinity": schema_k8sio_api_core_v1_PodAffinity(ref), + "k8s.io/api/core/v1.PodAffinityTerm": schema_k8sio_api_core_v1_PodAffinityTerm(ref), + "k8s.io/api/core/v1.PodAntiAffinity": schema_k8sio_api_core_v1_PodAntiAffinity(ref), + "k8s.io/api/core/v1.PodAttachOptions": schema_k8sio_api_core_v1_PodAttachOptions(ref), + "k8s.io/api/core/v1.PodCondition": schema_k8sio_api_core_v1_PodCondition(ref), + "k8s.io/api/core/v1.PodDNSConfig": schema_k8sio_api_core_v1_PodDNSConfig(ref), + "k8s.io/api/core/v1.PodDNSConfigOption": schema_k8sio_api_core_v1_PodDNSConfigOption(ref), + "k8s.io/api/core/v1.PodExecOptions": schema_k8sio_api_core_v1_PodExecOptions(ref), + "k8s.io/api/core/v1.PodIP": schema_k8sio_api_core_v1_PodIP(ref), + "k8s.io/api/core/v1.PodList": schema_k8sio_api_core_v1_PodList(ref), + "k8s.io/api/core/v1.PodLogOptions": schema_k8sio_api_core_v1_PodLogOptions(ref), + "k8s.io/api/core/v1.PodPortForwardOptions": schema_k8sio_api_core_v1_PodPortForwardOptions(ref), + "k8s.io/api/core/v1.PodProxyOptions": schema_k8sio_api_core_v1_PodProxyOptions(ref), + "k8s.io/api/core/v1.PodReadinessGate": schema_k8sio_api_core_v1_PodReadinessGate(ref), + "k8s.io/api/core/v1.PodSecurityContext": schema_k8sio_api_core_v1_PodSecurityContext(ref), + "k8s.io/api/core/v1.PodSignature": schema_k8sio_api_core_v1_PodSignature(ref), + "k8s.io/api/core/v1.PodSpec": schema_k8sio_api_core_v1_PodSpec(ref), + "k8s.io/api/core/v1.PodStatus": schema_k8sio_api_core_v1_PodStatus(ref), + "k8s.io/api/core/v1.PodStatusResult": schema_k8sio_api_core_v1_PodStatusResult(ref), + "k8s.io/api/core/v1.PodTemplate": schema_k8sio_api_core_v1_PodTemplate(ref), + "k8s.io/api/core/v1.PodTemplateList": schema_k8sio_api_core_v1_PodTemplateList(ref), + "k8s.io/api/core/v1.PodTemplateSpec": schema_k8sio_api_core_v1_PodTemplateSpec(ref), + "k8s.io/api/core/v1.PortworxVolumeSource": schema_k8sio_api_core_v1_PortworxVolumeSource(ref), + "k8s.io/api/core/v1.PreferAvoidPodsEntry": schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref), + "k8s.io/api/core/v1.PreferredSchedulingTerm": schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref), + "k8s.io/api/core/v1.Probe": schema_k8sio_api_core_v1_Probe(ref), + "k8s.io/api/core/v1.ProjectedVolumeSource": schema_k8sio_api_core_v1_ProjectedVolumeSource(ref), + "k8s.io/api/core/v1.QuobyteVolumeSource": schema_k8sio_api_core_v1_QuobyteVolumeSource(ref), + "k8s.io/api/core/v1.RBDPersistentVolumeSource": schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref), + "k8s.io/api/core/v1.RBDVolumeSource": schema_k8sio_api_core_v1_RBDVolumeSource(ref), + "k8s.io/api/core/v1.RangeAllocation": schema_k8sio_api_core_v1_RangeAllocation(ref), + "k8s.io/api/core/v1.ReplicationController": schema_k8sio_api_core_v1_ReplicationController(ref), + "k8s.io/api/core/v1.ReplicationControllerCondition": schema_k8sio_api_core_v1_ReplicationControllerCondition(ref), + "k8s.io/api/core/v1.ReplicationControllerList": schema_k8sio_api_core_v1_ReplicationControllerList(ref), + "k8s.io/api/core/v1.ReplicationControllerSpec": schema_k8sio_api_core_v1_ReplicationControllerSpec(ref), + "k8s.io/api/core/v1.ReplicationControllerStatus": schema_k8sio_api_core_v1_ReplicationControllerStatus(ref), + "k8s.io/api/core/v1.ResourceFieldSelector": schema_k8sio_api_core_v1_ResourceFieldSelector(ref), + "k8s.io/api/core/v1.ResourceQuota": schema_k8sio_api_core_v1_ResourceQuota(ref), + "k8s.io/api/core/v1.ResourceQuotaList": schema_k8sio_api_core_v1_ResourceQuotaList(ref), + "k8s.io/api/core/v1.ResourceQuotaSpec": schema_k8sio_api_core_v1_ResourceQuotaSpec(ref), + "k8s.io/api/core/v1.ResourceQuotaStatus": schema_k8sio_api_core_v1_ResourceQuotaStatus(ref), + "k8s.io/api/core/v1.ResourceRequirements": schema_k8sio_api_core_v1_ResourceRequirements(ref), + "k8s.io/api/core/v1.SELinuxOptions": schema_k8sio_api_core_v1_SELinuxOptions(ref), + "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource": schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref), + "k8s.io/api/core/v1.ScaleIOVolumeSource": schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref), + "k8s.io/api/core/v1.ScopeSelector": schema_k8sio_api_core_v1_ScopeSelector(ref), + "k8s.io/api/core/v1.ScopedResourceSelectorRequirement": schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref), + "k8s.io/api/core/v1.Secret": schema_k8sio_api_core_v1_Secret(ref), + "k8s.io/api/core/v1.SecretEnvSource": schema_k8sio_api_core_v1_SecretEnvSource(ref), + "k8s.io/api/core/v1.SecretKeySelector": schema_k8sio_api_core_v1_SecretKeySelector(ref), + "k8s.io/api/core/v1.SecretList": schema_k8sio_api_core_v1_SecretList(ref), + "k8s.io/api/core/v1.SecretProjection": schema_k8sio_api_core_v1_SecretProjection(ref), + "k8s.io/api/core/v1.SecretReference": schema_k8sio_api_core_v1_SecretReference(ref), + "k8s.io/api/core/v1.SecretVolumeSource": schema_k8sio_api_core_v1_SecretVolumeSource(ref), + "k8s.io/api/core/v1.SecurityContext": schema_k8sio_api_core_v1_SecurityContext(ref), + "k8s.io/api/core/v1.SerializedReference": schema_k8sio_api_core_v1_SerializedReference(ref), + "k8s.io/api/core/v1.Service": schema_k8sio_api_core_v1_Service(ref), + "k8s.io/api/core/v1.ServiceAccount": schema_k8sio_api_core_v1_ServiceAccount(ref), + "k8s.io/api/core/v1.ServiceAccountList": schema_k8sio_api_core_v1_ServiceAccountList(ref), + "k8s.io/api/core/v1.ServiceAccountTokenProjection": schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref), + "k8s.io/api/core/v1.ServiceList": schema_k8sio_api_core_v1_ServiceList(ref), + "k8s.io/api/core/v1.ServicePort": schema_k8sio_api_core_v1_ServicePort(ref), + "k8s.io/api/core/v1.ServiceProxyOptions": schema_k8sio_api_core_v1_ServiceProxyOptions(ref), + "k8s.io/api/core/v1.ServiceSpec": schema_k8sio_api_core_v1_ServiceSpec(ref), + "k8s.io/api/core/v1.ServiceStatus": schema_k8sio_api_core_v1_ServiceStatus(ref), + "k8s.io/api/core/v1.SessionAffinityConfig": schema_k8sio_api_core_v1_SessionAffinityConfig(ref), + "k8s.io/api/core/v1.StorageOSPersistentVolumeSource": schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref), + "k8s.io/api/core/v1.StorageOSVolumeSource": schema_k8sio_api_core_v1_StorageOSVolumeSource(ref), + "k8s.io/api/core/v1.Sysctl": schema_k8sio_api_core_v1_Sysctl(ref), + "k8s.io/api/core/v1.TCPSocketAction": schema_k8sio_api_core_v1_TCPSocketAction(ref), + "k8s.io/api/core/v1.Taint": schema_k8sio_api_core_v1_Taint(ref), + "k8s.io/api/core/v1.Toleration": schema_k8sio_api_core_v1_Toleration(ref), + "k8s.io/api/core/v1.TopologySelectorLabelRequirement": schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref), + "k8s.io/api/core/v1.TopologySelectorTerm": schema_k8sio_api_core_v1_TopologySelectorTerm(ref), + "k8s.io/api/core/v1.TopologySpreadConstraint": schema_k8sio_api_core_v1_TopologySpreadConstraint(ref), + "k8s.io/api/core/v1.TypedLocalObjectReference": schema_k8sio_api_core_v1_TypedLocalObjectReference(ref), + "k8s.io/api/core/v1.Volume": schema_k8sio_api_core_v1_Volume(ref), + "k8s.io/api/core/v1.VolumeDevice": schema_k8sio_api_core_v1_VolumeDevice(ref), + "k8s.io/api/core/v1.VolumeMount": schema_k8sio_api_core_v1_VolumeMount(ref), + "k8s.io/api/core/v1.VolumeNodeAffinity": schema_k8sio_api_core_v1_VolumeNodeAffinity(ref), + "k8s.io/api/core/v1.VolumeProjection": schema_k8sio_api_core_v1_VolumeProjection(ref), + "k8s.io/api/core/v1.VolumeSource": schema_k8sio_api_core_v1_VolumeSource(ref), + "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource": schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref), + "k8s.io/api/core/v1.WeightedPodAffinityTerm": schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref), + "k8s.io/api/core/v1.WindowsSecurityContextOptions": schema_k8sio_api_core_v1_WindowsSecurityContextOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ExportOptions": schema_pkg_apis_meta_v1_ExportOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref), + "k8s.io/apimachinery/pkg/runtime.RawExtension": schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref), + "k8s.io/apimachinery/pkg/runtime.TypeMeta": schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref), + "k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.Agent": schema_pkg_apis_cluster_v1alpha1_Agent(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentCondition": schema_pkg_apis_cluster_v1alpha1_AgentCondition(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentList": schema_pkg_apis_cluster_v1alpha1_AgentList(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentSpec": schema_pkg_apis_cluster_v1alpha1_AgentSpec(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentStatus": schema_pkg_apis_cluster_v1alpha1_AgentStatus(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.Cluster": schema_pkg_apis_cluster_v1alpha1_Cluster(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.ClusterList": schema_pkg_apis_cluster_v1alpha1_ClusterList(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.ClusterSpec": schema_pkg_apis_cluster_v1alpha1_ClusterSpec(ref), + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.ClusterStatus": schema_pkg_apis_cluster_v1alpha1_ClusterStatus(ref), + } +} + +func schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeID": { + SchemaProps: spec.SchemaProps{ + Description: "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Type: []string{"string"}, + Format: "", + }, + }, + "partition": { + SchemaProps: spec.SchemaProps{ + Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"volumeID"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Affinity(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Affinity is a group of affinity scheduling rules.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeAffinity": { + SchemaProps: spec.SchemaProps{ + Description: "Describes node affinity scheduling rules for the pod.", + Ref: ref("k8s.io/api/core/v1.NodeAffinity"), + }, + }, + "podAffinity": { + SchemaProps: spec.SchemaProps{ + Description: "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + Ref: ref("k8s.io/api/core/v1.PodAffinity"), + }, + }, + "podAntiAffinity": { + SchemaProps: spec.SchemaProps{ + Description: "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", + Ref: ref("k8s.io/api/core/v1.PodAntiAffinity"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeAffinity", "k8s.io/api/core/v1.PodAffinity", "k8s.io/api/core/v1.PodAntiAffinity"}, + } +} + +func schema_k8sio_api_core_v1_AttachedVolume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AttachedVolume describes a volume attached to a node", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the attached volume", + Type: []string{"string"}, + Format: "", + }, + }, + "devicePath": { + SchemaProps: spec.SchemaProps{ + Description: "DevicePath represents the device path where the volume should be available", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "devicePath"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_AvoidPods(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "preferAvoidPods": { + SchemaProps: spec.SchemaProps{ + Description: "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PreferAvoidPodsEntry"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PreferAvoidPodsEntry"}, + } +} + +func schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "diskName": { + SchemaProps: spec.SchemaProps{ + Description: "The Name of the data disk in the blob storage", + Type: []string{"string"}, + Format: "", + }, + }, + "diskURI": { + SchemaProps: spec.SchemaProps{ + Description: "The URI the data disk in the blob storage", + Type: []string{"string"}, + Format: "", + }, + }, + "cachingMode": { + SchemaProps: spec.SchemaProps{ + Description: "Host Caching mode: None, Read Only, Read Write.", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"diskName", "diskURI"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secretName": { + SchemaProps: spec.SchemaProps{ + Description: "the name of secret that contains Azure Storage Account Name and Key", + Type: []string{"string"}, + Format: "", + }, + }, + "shareName": { + SchemaProps: spec.SchemaProps{ + Description: "Share Name", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"secretName", "shareName"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_AzureFileVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secretName": { + SchemaProps: spec.SchemaProps{ + Description: "the name of secret that contains Azure Storage Account Name and Key", + Type: []string{"string"}, + Format: "", + }, + }, + "shareName": { + SchemaProps: spec.SchemaProps{ + Description: "Share Name", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"secretName", "shareName"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Binding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "target": { + SchemaProps: spec.SchemaProps{ + Description: "The target object that you want to bind to the standard object.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + }, + Required: []string{"target"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents storage that is managed by an external CSI volume driver (Beta feature)", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "driver": { + SchemaProps: spec.SchemaProps{ + Description: "Driver is the name of the driver to use for this volume. Required.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeHandle": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + Type: []string{"boolean"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeAttributes": { + SchemaProps: spec.SchemaProps{ + Description: "Attributes of the volume to publish.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "controllerPublishSecretRef": { + SchemaProps: spec.SchemaProps{ + Description: "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "nodeStageSecretRef": { + SchemaProps: spec.SchemaProps{ + Description: "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "nodePublishSecretRef": { + SchemaProps: spec.SchemaProps{ + Description: "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "controllerExpandSecretRef": { + SchemaProps: spec.SchemaProps{ + Description: "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + }, + Required: []string{"driver", "volumeHandle"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + } +} + +func schema_k8sio_api_core_v1_CSIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a source location of a volume to mount, managed by an external CSI driver", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "driver": { + SchemaProps: spec.SchemaProps{ + Description: "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies a read-only configuration for the volume. Defaults to false (read/write).", + Type: []string{"boolean"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeAttributes": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "nodePublishSecretRef": { + SchemaProps: spec.SchemaProps{ + Description: "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + Required: []string{"driver"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_Capabilities(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Adds and removes POSIX capabilities from running containers.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "add": { + SchemaProps: spec.SchemaProps{ + Description: "Added capabilities", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "drop": { + SchemaProps: spec.SchemaProps{ + Description: "Removed capabilities", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "monitors": { + SchemaProps: spec.SchemaProps{ + Description: "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + Type: []string{"string"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "secretFile": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"monitors"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + } +} + +func schema_k8sio_api_core_v1_CephFSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "monitors": { + SchemaProps: spec.SchemaProps{ + Description: "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + Type: []string{"string"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "secretFile": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"monitors"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeID": { + SchemaProps: spec.SchemaProps{ + Description: "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: points to a secret object containing parameters used to connect to OpenStack.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + }, + Required: []string{"volumeID"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + } +} + +func schema_k8sio_api_core_v1_CinderVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeID": { + SchemaProps: spec.SchemaProps{ + Description: "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: points to a secret object containing parameters used to connect to OpenStack.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + Required: []string{"volumeID"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_ClientIPConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClientIPConfig represents the configurations of Client IP based session affinity.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "timeoutSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ComponentCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Information about the condition of a component.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of condition for a component. Valid value: \"Healthy\"", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Message about the condition for a component. For example, information about a health check.", + Type: []string{"string"}, + Format: "", + }, + }, + "error": { + SchemaProps: spec.SchemaProps{ + Description: "Condition error code for a component. For example, a health check error code.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ComponentStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of component conditions observed", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ComponentCondition"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ComponentCondition", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_ComponentStatusList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Status of all the conditions for the component as a list of ComponentStatus objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of ComponentStatus objects.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ComponentStatus"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ComponentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_ConfigMap(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap holds configuration data for pods to consume.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "binaryData": { + SchemaProps: spec.SchemaProps{ + Description: "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_ConfigMapEnvSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the ConfigMap must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ConfigMapKeySelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Selects a key from a ConfigMap.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The key to select.", + Type: []string{"string"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the ConfigMap or its key must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"key"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ConfigMapList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ConfigMapList is a resource containing a list of ConfigMap objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is the list of ConfigMaps.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ConfigMap"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMap", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", + Type: []string{"string"}, + Format: "", + }, + }, + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + Type: []string{"string"}, + Format: "", + }, + }, + "resourceVersion": { + SchemaProps: spec.SchemaProps{ + Description: "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + Type: []string{"string"}, + Format: "", + }, + }, + "kubeletConfigKey": { + SchemaProps: spec.SchemaProps{ + Description: "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"namespace", "name", "kubeletConfigKey"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ConfigMapProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.KeyToPath"), + }, + }, + }, + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the ConfigMap or its keys must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.KeyToPath"}, + } +} + +func schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.KeyToPath"), + }, + }, + }, + }, + }, + "defaultMode": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the ConfigMap or its keys must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.KeyToPath"}, + } +} + +func schema_k8sio_api_core_v1_Container(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A single application container that you want to run within a pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_k8sio_api_core_v1_ContainerImage(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Describe a container image", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "names": { + SchemaProps: spec.SchemaProps{ + Description: "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "sizeBytes": { + SchemaProps: spec.SchemaProps{ + Description: "The size of the image in bytes.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + Required: []string{"names"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ContainerPort(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContainerPort represents a network port in a single container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPort": { + SchemaProps: spec.SchemaProps{ + Description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "containerPort": { + SchemaProps: spec.SchemaProps{ + Description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "protocol": { + SchemaProps: spec.SchemaProps{ + Description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", + Type: []string{"string"}, + Format: "", + }, + }, + "hostIP": { + SchemaProps: spec.SchemaProps{ + Description: "What host IP to bind the external port to.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"containerPort"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ContainerState(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "waiting": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a waiting container", + Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"), + }, + }, + "running": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a running container", + Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"), + }, + }, + "terminated": { + SchemaProps: spec.SchemaProps{ + Description: "Details about a terminated container", + Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"}, + } +} + +func schema_k8sio_api_core_v1_ContainerStateRunning(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContainerStateRunning is a running state of a container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "startedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which the container was last (re-)started", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_ContainerStateTerminated(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContainerStateTerminated is a terminated state of a container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "exitCode": { + SchemaProps: spec.SchemaProps{ + Description: "Exit status from the last termination of the container", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "signal": { + SchemaProps: spec.SchemaProps{ + Description: "Signal from the last termination of the container", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "(brief) reason from the last termination of the container", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Message regarding the last termination of the container", + Type: []string{"string"}, + Format: "", + }, + }, + "startedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which previous execution of the container started", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "finishedAt": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which the container last terminated", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "containerID": { + SchemaProps: spec.SchemaProps{ + Description: "Container's ID in the format 'docker://'", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"exitCode"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_ContainerStateWaiting(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContainerStateWaiting is a waiting state of a container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "(brief) reason the container is not yet running.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Message regarding why the container is not yet running.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ContainerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ContainerStatus contains details for the current status of this container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "state": { + SchemaProps: spec.SchemaProps{ + Description: "Details about the container's current condition.", + Ref: ref("k8s.io/api/core/v1.ContainerState"), + }, + }, + "lastState": { + SchemaProps: spec.SchemaProps{ + Description: "Details about the container's last termination condition.", + Ref: ref("k8s.io/api/core/v1.ContainerState"), + }, + }, + "ready": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies whether the container has passed its readiness probe.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "restartCount": { + SchemaProps: spec.SchemaProps{ + Description: "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", + Type: []string{"string"}, + Format: "", + }, + }, + "imageID": { + SchemaProps: spec.SchemaProps{ + Description: "ImageID of the container's image.", + Type: []string{"string"}, + Format: "", + }, + }, + "containerID": { + SchemaProps: spec.SchemaProps{ + Description: "Container's ID in the format 'docker://'.", + Type: []string{"string"}, + Format: "", + }, + }, + "started": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name", "ready", "restartCount", "image", "imageID"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerState"}, + } +} + +func schema_k8sio_api_core_v1_DaemonEndpoint(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DaemonEndpoint contains information about a single Daemon endpoint.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Port": { + SchemaProps: spec.SchemaProps{ + Description: "Port number of the given endpoint.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"Port"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_DownwardAPIProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of DownwardAPIVolume file", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeFile"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.DownwardAPIVolumeFile"}, + } +} + +func schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DownwardAPIVolumeFile represents information to create the file containing the pod field", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + Type: []string{"string"}, + Format: "", + }, + }, + "fieldRef": { + SchemaProps: spec.SchemaProps{ + Description: "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + Ref: ref("k8s.io/api/core/v1.ObjectFieldSelector"), + }, + }, + "resourceFieldRef": { + SchemaProps: spec.SchemaProps{ + Description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + Ref: ref("k8s.io/api/core/v1.ResourceFieldSelector"), + }, + }, + "mode": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"path"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ObjectFieldSelector", "k8s.io/api/core/v1.ResourceFieldSelector"}, + } +} + +func schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of downward API volume file", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeFile"), + }, + }, + }, + }, + }, + "defaultMode": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.DownwardAPIVolumeFile"}, + } +} + +func schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "medium": { + SchemaProps: spec.SchemaProps{ + Description: "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Type: []string{"string"}, + Format: "", + }, + }, + "sizeLimit": { + SchemaProps: spec.SchemaProps{ + Description: "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_EndpointAddress(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EndpointAddress is a tuple that describes single IP address.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ip": { + SchemaProps: spec.SchemaProps{ + Description: "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + Type: []string{"string"}, + Format: "", + }, + }, + "hostname": { + SchemaProps: spec.SchemaProps{ + Description: "The Hostname of this endpoint", + Type: []string{"string"}, + Format: "", + }, + }, + "nodeName": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", + Type: []string{"string"}, + Format: "", + }, + }, + "targetRef": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to object providing the endpoint.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + }, + Required: []string{"ip"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_EndpointPort(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EndpointPort is a tuple that describes a single port.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", + Type: []string{"string"}, + Format: "", + }, + }, + "port": { + SchemaProps: spec.SchemaProps{ + Description: "The port number of the endpoint.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "protocol": { + SchemaProps: spec.SchemaProps{ + Description: "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"port"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_EndpointSubset(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "addresses": { + SchemaProps: spec.SchemaProps{ + Description: "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EndpointAddress"), + }, + }, + }, + }, + }, + "notReadyAddresses": { + SchemaProps: spec.SchemaProps{ + Description: "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EndpointAddress"), + }, + }, + }, + }, + }, + "ports": { + SchemaProps: spec.SchemaProps{ + Description: "Port numbers available on the related IP addresses.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EndpointPort"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EndpointAddress", "k8s.io/api/core/v1.EndpointPort"}, + } +} + +func schema_k8sio_api_core_v1_Endpoints(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "subsets": { + SchemaProps: spec.SchemaProps{ + Description: "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EndpointSubset"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EndpointSubset", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_EndpointsList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EndpointsList is a list of endpoints.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of endpoints.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Endpoints"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Endpoints", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_EnvFromSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EnvFromSource represents the source of a set of ConfigMaps", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "prefix": { + SchemaProps: spec.SchemaProps{ + Description: "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + Type: []string{"string"}, + Format: "", + }, + }, + "configMapRef": { + SchemaProps: spec.SchemaProps{ + Description: "The ConfigMap to select from", + Ref: ref("k8s.io/api/core/v1.ConfigMapEnvSource"), + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "The Secret to select from", + Ref: ref("k8s.io/api/core/v1.SecretEnvSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapEnvSource", "k8s.io/api/core/v1.SecretEnvSource"}, + } +} + +func schema_k8sio_api_core_v1_EnvVar(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EnvVar represents an environment variable present in a Container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the environment variable. Must be a C_IDENTIFIER.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + Type: []string{"string"}, + Format: "", + }, + }, + "valueFrom": { + SchemaProps: spec.SchemaProps{ + Description: "Source for the environment variable's value. Cannot be used if value is not empty.", + Ref: ref("k8s.io/api/core/v1.EnvVarSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EnvVarSource"}, + } +} + +func schema_k8sio_api_core_v1_EnvVarSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EnvVarSource represents a source for the value of an EnvVar.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "fieldRef": { + SchemaProps: spec.SchemaProps{ + Description: "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", + Ref: ref("k8s.io/api/core/v1.ObjectFieldSelector"), + }, + }, + "resourceFieldRef": { + SchemaProps: spec.SchemaProps{ + Description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + Ref: ref("k8s.io/api/core/v1.ResourceFieldSelector"), + }, + }, + "configMapKeyRef": { + SchemaProps: spec.SchemaProps{ + Description: "Selects a key of a ConfigMap.", + Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), + }, + }, + "secretKeyRef": { + SchemaProps: spec.SchemaProps{ + Description: "Selects a key of a secret in the pod's namespace", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.ObjectFieldSelector", "k8s.io/api/core/v1.ResourceFieldSelector", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_k8sio_api_core_v1_EphemeralContainer(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + SchemaProps: spec.SchemaProps{ + Description: "Ports are not allowed for ephemeral containers.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Probes are not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Probes are not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Probes are not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Lifecycle is not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext is not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "targetContainerName": { + SchemaProps: spec.SchemaProps{ + Description: "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_k8sio_api_core_v1_EphemeralContainerCommon(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + SchemaProps: spec.SchemaProps{ + Description: "Ports are not allowed for ephemeral containers.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Probes are not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Probes are not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Probes are not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Lifecycle is not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext is not allowed for ephemeral containers.", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_k8sio_api_core_v1_EphemeralContainers(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "ephemeralContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EphemeralContainer"), + }, + }, + }, + }, + }, + }, + Required: []string{"ephemeralContainers"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EphemeralContainer", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_Event(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Event is a report of an event somewhere in the cluster.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "involvedObject": { + SchemaProps: spec.SchemaProps{ + Description: "The object that this event is about.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human-readable description of the status of this operation.", + Type: []string{"string"}, + Format: "", + }, + }, + "source": { + SchemaProps: spec.SchemaProps{ + Description: "The component reporting this event. Should be a short machine understandable string.", + Ref: ref("k8s.io/api/core/v1.EventSource"), + }, + }, + "firstTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "The time at which the most recent occurrence of this event was recorded.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "count": { + SchemaProps: spec.SchemaProps{ + Description: "The number of times this event has occurred.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of this event (Normal, Warning), new types could be added in the future", + Type: []string{"string"}, + Format: "", + }, + }, + "eventTime": { + SchemaProps: spec.SchemaProps{ + Description: "Time when this Event was first observed.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "series": { + SchemaProps: spec.SchemaProps{ + Description: "Data about the Event series this event represents or nil if it's a singleton Event.", + Ref: ref("k8s.io/api/core/v1.EventSeries"), + }, + }, + "action": { + SchemaProps: spec.SchemaProps{ + Description: "What action was taken/failed regarding to the Regarding object.", + Type: []string{"string"}, + Format: "", + }, + }, + "related": { + SchemaProps: spec.SchemaProps{ + Description: "Optional secondary object for more complex actions.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + "reportingComponent": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + Type: []string{"string"}, + Format: "", + }, + }, + "reportingInstance": { + SchemaProps: spec.SchemaProps{ + Description: "ID of the controller instance, e.g. `kubelet-xyzf`.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"metadata", "involvedObject"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EventSeries", "k8s.io/api/core/v1.EventSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_EventList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EventList is a list of events.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of events", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Event"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Event", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_EventSeries(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "count": { + SchemaProps: spec.SchemaProps{ + Description: "Number of occurrences in this series up to the last heartbeat time", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "lastObservedTime": { + SchemaProps: spec.SchemaProps{ + Description: "Time of the last occurrence observed", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), + }, + }, + "state": { + SchemaProps: spec.SchemaProps{ + Description: "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"}, + } +} + +func schema_k8sio_api_core_v1_EventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EventSource contains information for an event.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "component": { + SchemaProps: spec.SchemaProps{ + Description: "Component from which the event is generated.", + Type: []string{"string"}, + Format: "", + }, + }, + "host": { + SchemaProps: spec.SchemaProps{ + Description: "Node name on which the event is generated.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ExecAction(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ExecAction describes a \"run in container\" action.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "command": { + SchemaProps: spec.SchemaProps{ + Description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_FCVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "targetWWNs": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: FC target worldwide names (WWNs)", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "lun": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: FC target lun number", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "wwids": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "driver": { + SchemaProps: spec.SchemaProps{ + Description: "Driver is the name of the driver to use for this volume.", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "options": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Extra command options if any.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"driver"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + } +} + +func schema_k8sio_api_core_v1_FlexVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "driver": { + SchemaProps: spec.SchemaProps{ + Description: "Driver is the name of the driver to use for this volume.", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "options": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Extra command options if any.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"driver"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_FlockerVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "datasetName": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", + Type: []string{"string"}, + Format: "", + }, + }, + "datasetUUID": { + SchemaProps: spec.SchemaProps{ + Description: "UUID of the dataset. This is unique identifier of a Flocker dataset", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pdName": { + SchemaProps: spec.SchemaProps{ + Description: "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Type: []string{"string"}, + Format: "", + }, + }, + "partition": { + SchemaProps: spec.SchemaProps{ + Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"pdName"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_GitRepoVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "repository": { + SchemaProps: spec.SchemaProps{ + Description: "Repository URL", + Type: []string{"string"}, + Format: "", + }, + }, + "revision": { + SchemaProps: spec.SchemaProps{ + Description: "Commit hash for the specified revision.", + Type: []string{"string"}, + Format: "", + }, + }, + "directory": { + SchemaProps: spec.SchemaProps{ + Description: "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"repository"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoints": { + SchemaProps: spec.SchemaProps{ + Description: "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + Type: []string{"boolean"}, + Format: "", + }, + }, + "endpointsNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"endpoints", "path"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoints": { + SchemaProps: spec.SchemaProps{ + Description: "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"endpoints", "path"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_HTTPGetAction(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HTTPGetAction describes an action based on HTTP Get requests.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path to access on the HTTP server.", + Type: []string{"string"}, + Format: "", + }, + }, + "port": { + SchemaProps: spec.SchemaProps{ + Description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "host": { + SchemaProps: spec.SchemaProps{ + Description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", + Type: []string{"string"}, + Format: "", + }, + }, + "scheme": { + SchemaProps: spec.SchemaProps{ + Description: "Scheme to use for connecting to the host. Defaults to HTTP.", + Type: []string{"string"}, + Format: "", + }, + }, + "httpHeaders": { + SchemaProps: spec.SchemaProps{ + Description: "Custom headers to set in the request. HTTP allows repeated headers.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.HTTPHeader"), + }, + }, + }, + }, + }, + }, + Required: []string{"port"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.HTTPHeader", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_k8sio_api_core_v1_HTTPHeader(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HTTPHeader describes a custom header to be used in HTTP probes", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "The header field name", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "The header field value", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Handler(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Handler defines a specific action that should be taken", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "exec": { + SchemaProps: spec.SchemaProps{ + Description: "One and only one of the following should be specified. Exec specifies the action to take.", + Ref: ref("k8s.io/api/core/v1.ExecAction"), + }, + }, + "httpGet": { + SchemaProps: spec.SchemaProps{ + Description: "HTTPGet specifies the http request to perform.", + Ref: ref("k8s.io/api/core/v1.HTTPGetAction"), + }, + }, + "tcpSocket": { + SchemaProps: spec.SchemaProps{ + Description: "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", + Ref: ref("k8s.io/api/core/v1.TCPSocketAction"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"}, + } +} + +func schema_k8sio_api_core_v1_HostAlias(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ip": { + SchemaProps: spec.SchemaProps{ + Description: "IP address of the host file entry.", + Type: []string{"string"}, + Format: "", + }, + }, + "hostnames": { + SchemaProps: spec.SchemaProps{ + Description: "Hostnames for the above IP address.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_HostPathVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"path"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "targetPortal": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Type: []string{"string"}, + Format: "", + }, + }, + "iqn": { + SchemaProps: spec.SchemaProps{ + Description: "Target iSCSI Qualified Name.", + Type: []string{"string"}, + Format: "", + }, + }, + "lun": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Lun number.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "iscsiInterface": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "portals": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "chapAuthDiscovery": { + SchemaProps: spec.SchemaProps{ + Description: "whether support iSCSI Discovery CHAP authentication", + Type: []string{"boolean"}, + Format: "", + }, + }, + "chapAuthSession": { + SchemaProps: spec.SchemaProps{ + Description: "whether support iSCSI Session CHAP authentication", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "CHAP Secret for iSCSI target and initiator authentication", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "initiatorName": { + SchemaProps: spec.SchemaProps{ + Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"targetPortal", "iqn", "lun"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + } +} + +func schema_k8sio_api_core_v1_ISCSIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "targetPortal": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Type: []string{"string"}, + Format: "", + }, + }, + "iqn": { + SchemaProps: spec.SchemaProps{ + Description: "Target iSCSI Qualified Name.", + Type: []string{"string"}, + Format: "", + }, + }, + "lun": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Lun number.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "iscsiInterface": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "portals": { + SchemaProps: spec.SchemaProps{ + Description: "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "chapAuthDiscovery": { + SchemaProps: spec.SchemaProps{ + Description: "whether support iSCSI Discovery CHAP authentication", + Type: []string{"boolean"}, + Format: "", + }, + }, + "chapAuthSession": { + SchemaProps: spec.SchemaProps{ + Description: "whether support iSCSI Session CHAP authentication", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "CHAP Secret for iSCSI target and initiator authentication", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "initiatorName": { + SchemaProps: spec.SchemaProps{ + Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"targetPortal", "iqn", "lun"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_KeyToPath(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Maps a string key to a path within a volume.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The key to project.", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + Type: []string{"string"}, + Format: "", + }, + }, + "mode": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"key", "path"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Lifecycle(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "postStart": { + SchemaProps: spec.SchemaProps{ + Description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + Ref: ref("k8s.io/api/core/v1.Handler"), + }, + }, + "preStop": { + SchemaProps: spec.SchemaProps{ + Description: "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + Ref: ref("k8s.io/api/core/v1.Handler"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Handler"}, + } +} + +func schema_k8sio_api_core_v1_LimitRange(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LimitRange sets resource usage limits for each kind of resource in a Namespace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.LimitRangeSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LimitRangeSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of resource that this limit applies to.", + Type: []string{"string"}, + Format: "", + }, + }, + "max": { + SchemaProps: spec.SchemaProps{ + Description: "Max usage constraints on this kind by resource name.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "min": { + SchemaProps: spec.SchemaProps{ + Description: "Min usage constraints on this kind by resource name.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "default": { + SchemaProps: spec.SchemaProps{ + Description: "Default resource requirement limit value by resource name if resource limit is omitted.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "defaultRequest": { + SchemaProps: spec.SchemaProps{ + Description: "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "maxLimitRequestRatio": { + SchemaProps: spec.SchemaProps{ + Description: "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_LimitRangeList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LimitRangeList is a list of LimitRange items.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.LimitRange"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LimitRange", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_LimitRangeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "limits": { + SchemaProps: spec.SchemaProps{ + Description: "Limits is the list of LimitRangeItem objects that are enforced.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.LimitRangeItem"), + }, + }, + }, + }, + }, + }, + Required: []string{"limits"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LimitRangeItem"}, + } +} + +func schema_k8sio_api_core_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "List holds a list of objects, which may not be known by the server.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of objects", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + } +} + +func schema_k8sio_api_core_v1_LoadBalancerIngress(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ip": { + SchemaProps: spec.SchemaProps{ + Description: "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", + Type: []string{"string"}, + Format: "", + }, + }, + "hostname": { + SchemaProps: spec.SchemaProps{ + Description: "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_LoadBalancerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LoadBalancerStatus represents the status of a load-balancer.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ingress": { + SchemaProps: spec.SchemaProps{ + Description: "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.LoadBalancerIngress"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LoadBalancerIngress"}, + } +} + +func schema_k8sio_api_core_v1_LocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_LocalVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Local represents directly-attached storage with node affinity (Beta feature)", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"path"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_NFSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "server": { + SchemaProps: spec.SchemaProps{ + Description: "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"server", "path"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Namespace(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Namespace provides a scope for Names. Use of multiple namespaces is optional.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.NamespaceSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.NamespaceStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NamespaceSpec", "k8s.io/api/core/v1.NamespaceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_NamespaceCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NamespaceCondition contains details about state of namespace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of namespace controller condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_NamespaceList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NamespaceList is a list of Namespaces.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Namespace"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Namespace", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_NamespaceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NamespaceSpec describes the attributes on a Namespace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "finalizers": { + SchemaProps: spec.SchemaProps{ + Description: "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_NamespaceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NamespaceStatus is information about the current status of a Namespace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + Type: []string{"string"}, + Format: "", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a namespace's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.NamespaceCondition"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NamespaceCondition"}, + } +} + +func schema_k8sio_api_core_v1_Node(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.NodeSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.NodeStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeSpec", "k8s.io/api/core/v1.NodeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_NodeAddress(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeAddress contains information for the node's address.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Node address type, one of Hostname, ExternalIP or InternalIP.", + Type: []string{"string"}, + Format: "", + }, + }, + "address": { + SchemaProps: spec.SchemaProps{ + Description: "The node address.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "address"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_NodeAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Node affinity is a group of node affinity scheduling rules.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "requiredDuringSchedulingIgnoredDuringExecution": { + SchemaProps: spec.SchemaProps{ + Description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + Ref: ref("k8s.io/api/core/v1.NodeSelector"), + }, + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + SchemaProps: spec.SchemaProps{ + Description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PreferredSchedulingTerm"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeSelector", "k8s.io/api/core/v1.PreferredSchedulingTerm"}, + } +} + +func schema_k8sio_api_core_v1_NodeCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeCondition contains condition information for a node.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of node condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastHeartbeatTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time we got an update on a given condition.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transit from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "(brief) reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Human readable message indicating details about last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_NodeConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap is a reference to a Node's ConfigMap", + Ref: ref("k8s.io/api/core/v1.ConfigMapNodeConfigSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapNodeConfigSource"}, + } +} + +func schema_k8sio_api_core_v1_NodeConfigStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "assigned": { + SchemaProps: spec.SchemaProps{ + Description: "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.", + Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), + }, + }, + "active": { + SchemaProps: spec.SchemaProps{ + Description: "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.", + Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), + }, + }, + "lastKnownGood": { + SchemaProps: spec.SchemaProps{ + Description: "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.", + Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), + }, + }, + "error": { + SchemaProps: spec.SchemaProps{ + Description: "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeConfigSource"}, + } +} + +func schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kubeletEndpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint on which Kubelet is listening.", + Ref: ref("k8s.io/api/core/v1.DaemonEndpoint"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.DaemonEndpoint"}, + } +} + +func schema_k8sio_api_core_v1_NodeList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeList is the whole list of all Nodes which have been registered with master.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of nodes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Node"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Node", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_NodeProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeProxyOptions is the query options to a Node's proxy call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the URL path to use for the current proxy request to node.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_NodeResources(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Capacity": { + SchemaProps: spec.SchemaProps{ + Description: "Capacity represents the available resources of a node", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + }, + Required: []string{"Capacity"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_NodeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeSelectorTerms": { + SchemaProps: spec.SchemaProps{ + Description: "Required. A list of node selector terms. The terms are ORed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.NodeSelectorTerm"), + }, + }, + }, + }, + }, + }, + Required: []string{"nodeSelectorTerms"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeSelectorTerm"}, + } +} + +func schema_k8sio_api_core_v1_NodeSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The label key that the selector applies to.", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + Type: []string{"string"}, + Format: "", + }, + }, + "values": { + SchemaProps: spec.SchemaProps{ + Description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"key", "operator"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_NodeSelectorTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "matchExpressions": { + SchemaProps: spec.SchemaProps{ + Description: "A list of node selector requirements by node's labels.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.NodeSelectorRequirement"), + }, + }, + }, + }, + }, + "matchFields": { + SchemaProps: spec.SchemaProps{ + Description: "A list of node selector requirements by node's fields.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.NodeSelectorRequirement"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeSelectorRequirement"}, + } +} + +func schema_k8sio_api_core_v1_NodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeSpec describes the attributes that a node is created with.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "podCIDR": { + SchemaProps: spec.SchemaProps{ + Description: "PodCIDR represents the pod IP range assigned to the node.", + Type: []string{"string"}, + Format: "", + }, + }, + "podCIDRs": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "providerID": { + SchemaProps: spec.SchemaProps{ + Description: "ID of the node assigned by the cloud provider in the format: ://", + Type: []string{"string"}, + Format: "", + }, + }, + "unschedulable": { + SchemaProps: spec.SchemaProps{ + Description: "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", + Type: []string{"boolean"}, + Format: "", + }, + }, + "taints": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the node's taints.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Taint"), + }, + }, + }, + }, + }, + "configSource": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field", + Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), + }, + }, + "externalID": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeConfigSource", "k8s.io/api/core/v1.Taint"}, + } +} + +func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeStatus is information about the current status of a node.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "capacity": { + SchemaProps: spec.SchemaProps{ + Description: "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "allocatable": { + SchemaProps: spec.SchemaProps{ + Description: "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", + Type: []string{"string"}, + Format: "", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.NodeCondition"), + }, + }, + }, + }, + }, + "addresses": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.NodeAddress"), + }, + }, + }, + }, + }, + "daemonEndpoints": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoints of daemons running on the Node.", + Ref: ref("k8s.io/api/core/v1.NodeDaemonEndpoints"), + }, + }, + "nodeInfo": { + SchemaProps: spec.SchemaProps{ + Description: "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", + Ref: ref("k8s.io/api/core/v1.NodeSystemInfo"), + }, + }, + "images": { + SchemaProps: spec.SchemaProps{ + Description: "List of container images on this node", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ContainerImage"), + }, + }, + }, + }, + }, + "volumesInUse": { + SchemaProps: spec.SchemaProps{ + Description: "List of attachable volumes in use (mounted) by the node.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "volumesAttached": { + SchemaProps: spec.SchemaProps{ + Description: "List of volumes that are attached to the node.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.AttachedVolume"), + }, + }, + }, + }, + }, + "config": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the config assigned to the node via the dynamic Kubelet config feature.", + Ref: ref("k8s.io/api/core/v1.NodeConfigStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AttachedVolume", "k8s.io/api/core/v1.ContainerImage", "k8s.io/api/core/v1.NodeAddress", "k8s.io/api/core/v1.NodeCondition", "k8s.io/api/core/v1.NodeConfigStatus", "k8s.io/api/core/v1.NodeDaemonEndpoints", "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_NodeSystemInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "machineID": { + SchemaProps: spec.SchemaProps{ + Description: "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + Type: []string{"string"}, + Format: "", + }, + }, + "systemUUID": { + SchemaProps: spec.SchemaProps{ + Description: "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", + Type: []string{"string"}, + Format: "", + }, + }, + "bootID": { + SchemaProps: spec.SchemaProps{ + Description: "Boot ID reported by the node.", + Type: []string{"string"}, + Format: "", + }, + }, + "kernelVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + Type: []string{"string"}, + Format: "", + }, + }, + "osImage": { + SchemaProps: spec.SchemaProps{ + Description: "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + Type: []string{"string"}, + Format: "", + }, + }, + "containerRuntimeVersion": { + SchemaProps: spec.SchemaProps{ + Description: "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", + Type: []string{"string"}, + Format: "", + }, + }, + "kubeletVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Kubelet Version reported by the node.", + Type: []string{"string"}, + Format: "", + }, + }, + "kubeProxyVersion": { + SchemaProps: spec.SchemaProps{ + Description: "KubeProxy Version reported by the node.", + Type: []string{"string"}, + Format: "", + }, + }, + "operatingSystem": { + SchemaProps: spec.SchemaProps{ + Description: "The Operating System reported by the node", + Type: []string{"string"}, + Format: "", + }, + }, + "architecture": { + SchemaProps: spec.SchemaProps{ + Description: "The Architecture reported by the node", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"machineID", "systemUUID", "bootID", "kernelVersion", "osImage", "containerRuntimeVersion", "kubeletVersion", "kubeProxyVersion", "operatingSystem", "architecture"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ObjectFieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ObjectFieldSelector selects an APIVersioned field of an object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + Type: []string{"string"}, + Format: "", + }, + }, + "fieldPath": { + SchemaProps: spec.SchemaProps{ + Description: "Path of the field to select in the specified API version.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"fieldPath"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ObjectReference contains enough information to let you inspect or modify the referred object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "API version of the referent.", + Type: []string{"string"}, + Format: "", + }, + }, + "resourceVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + Type: []string{"string"}, + Format: "", + }, + }, + "fieldPath": { + SchemaProps: spec.SchemaProps{ + Description: "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PersistentVolume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeSpec", "k8s.io/api/core/v1.PersistentVolumeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeClaim(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaim is a user's request for and claim to a persistent volume", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "k8s.io/api/core/v1.PersistentVolumeClaimStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimCondition contails details about state of pvc", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "lastProbeTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time we probed the condition.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Human-readable message indicating details about last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "accessModes": { + SchemaProps: spec.SchemaProps{ + Description: "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "A label query over volumes to consider for binding.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeName": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeName is the binding reference to the PersistentVolume backing this claim.", + Type: []string{"string"}, + Format: "", + }, + }, + "storageClassName": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeMode": { + SchemaProps: spec.SchemaProps{ + Description: "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.", + Type: []string{"string"}, + Format: "", + }, + }, + "dataSource": { + SchemaProps: spec.SchemaProps{ + Description: "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", + Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.TypedLocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase represents the current phase of PersistentVolumeClaim.", + Type: []string{"string"}, + Format: "", + }, + }, + "accessModes": { + SchemaProps: spec.SchemaProps{ + Description: "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "capacity": { + SchemaProps: spec.SchemaProps{ + Description: "Represents the actual resources of the underlying volume.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimCondition"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolumeClaimCondition", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "claimName": { + SchemaProps: spec.SchemaProps{ + Description: "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Will force the ReadOnly setting in VolumeMounts. Default false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"claimName"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeList is a list of PersistentVolume items.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PersistentVolume"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PersistentVolume", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsPersistentVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderPersistentVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSPersistentVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexPersistentVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFilePersistentVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"), + }, + }, + "local": { + SchemaProps: spec.SchemaProps{ + Description: "Local represents directly-attached storage with node affinity", + Ref: ref("k8s.io/api/core/v1.LocalVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI represents storage that is handled by an external CSI driver (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderPersistentVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexPersistentVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeSpec is the specification of a persistent volume.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "capacity": { + SchemaProps: spec.SchemaProps{ + Description: "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsPersistentVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderPersistentVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSPersistentVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexPersistentVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFilePersistentVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"), + }, + }, + "local": { + SchemaProps: spec.SchemaProps{ + Description: "Local represents directly-attached storage with node affinity", + Ref: ref("k8s.io/api/core/v1.LocalVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI represents storage that is handled by an external CSI driver (Beta feature).", + Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"), + }, + }, + "accessModes": { + SchemaProps: spec.SchemaProps{ + Description: "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "claimRef": { + SchemaProps: spec.SchemaProps{ + Description: "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + "persistentVolumeReclaimPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", + Type: []string{"string"}, + Format: "", + }, + }, + "storageClassName": { + SchemaProps: spec.SchemaProps{ + Description: "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", + Type: []string{"string"}, + Format: "", + }, + }, + "mountOptions": { + SchemaProps: spec.SchemaProps{ + Description: "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "volumeMode": { + SchemaProps: spec.SchemaProps{ + Description: "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.", + Type: []string{"string"}, + Format: "", + }, + }, + "nodeAffinity": { + SchemaProps: spec.SchemaProps{ + Description: "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", + Ref: ref("k8s.io/api/core/v1.VolumeNodeAffinity"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderPersistentVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexPersistentVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VolumeNodeAffinity", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_PersistentVolumeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeStatus is the current status of a persistent volume.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human-readable message indicating details about why the volume is in this state.", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Photon Controller persistent disk resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "pdID": { + SchemaProps: spec.SchemaProps{ + Description: "ID that identifies Photon Controller persistent disk", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"pdID"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Pod(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.PodSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.PodStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodSpec", "k8s.io/api/core/v1.PodStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_PodAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Pod affinity is a group of inter pod affinity scheduling rules.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "requiredDuringSchedulingIgnoredDuringExecution": { + SchemaProps: spec.SchemaProps{ + Description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"), + }, + }, + }, + }, + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + SchemaProps: spec.SchemaProps{ + Description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.WeightedPodAffinityTerm"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodAffinityTerm", "k8s.io/api/core/v1.WeightedPodAffinityTerm"}, + } +} + +func schema_k8sio_api_core_v1_PodAffinityTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "labelSelector": { + SchemaProps: spec.SchemaProps{ + Description: "A label query over a set of resources, in this case pods.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + "namespaces": { + SchemaProps: spec.SchemaProps{ + Description: "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "topologyKey": { + SchemaProps: spec.SchemaProps{ + Description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"topologyKey"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + } +} + +func schema_k8sio_api_core_v1_PodAntiAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "requiredDuringSchedulingIgnoredDuringExecution": { + SchemaProps: spec.SchemaProps{ + Description: "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"), + }, + }, + }, + }, + }, + "preferredDuringSchedulingIgnoredDuringExecution": { + SchemaProps: spec.SchemaProps{ + Description: "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.WeightedPodAffinityTerm"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodAffinityTerm", "k8s.io/api/core/v1.WeightedPodAffinityTerm"}, + } +} + +func schema_k8sio_api_core_v1_PodAttachOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodAttachOptions is the query options to a Pod's remote attach call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdout": { + SchemaProps: spec.SchemaProps{ + Description: "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stderr": { + SchemaProps: spec.SchemaProps{ + Description: "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PodCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodCondition contains details for the current condition of this pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + Type: []string{"string"}, + Format: "", + }, + }, + "lastProbeTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time we probed the condition.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "Unique, one-word, CamelCase reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Human-readable message indicating details about last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_PodDNSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nameservers": { + SchemaProps: spec.SchemaProps{ + Description: "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "searches": { + SchemaProps: spec.SchemaProps{ + Description: "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "options": { + SchemaProps: spec.SchemaProps{ + Description: "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodDNSConfigOption"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodDNSConfigOption"}, + } +} + +func schema_k8sio_api_core_v1_PodDNSConfigOption(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodDNSConfigOption defines DNS resolver options of a pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Required.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PodExecOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodExecOptions is the query options to a Pod's remote exec call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Redirect the standard input stream of the pod for this call. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdout": { + SchemaProps: spec.SchemaProps{ + Description: "Redirect the standard output stream of the pod for this call. Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stderr": { + SchemaProps: spec.SchemaProps{ + Description: "Redirect the standard error stream of the pod for this call. Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + SchemaProps: spec.SchemaProps{ + Description: "Command is the remote command to execute. argv array. Not executed within a shell.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"command"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PodIP(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ip": { + SchemaProps: spec.SchemaProps{ + Description: "ip is an IP address (IPv4 or IPv6) assigned to the pod", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PodList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodList is a list of Pods.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Pod"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Pod", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_PodLogOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodLogOptions is the query options for a Pod's logs REST call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "container": { + SchemaProps: spec.SchemaProps{ + Description: "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + Type: []string{"string"}, + Format: "", + }, + }, + "follow": { + SchemaProps: spec.SchemaProps{ + Description: "Follow the log stream of the pod. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "previous": { + SchemaProps: spec.SchemaProps{ + Description: "Return previous terminated container logs. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "sinceSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "sinceTime": { + SchemaProps: spec.SchemaProps{ + Description: "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "timestamps": { + SchemaProps: spec.SchemaProps{ + Description: "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tailLines": { + SchemaProps: spec.SchemaProps{ + Description: "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "limitBytes": { + SchemaProps: spec.SchemaProps{ + Description: "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_PodPortForwardOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + SchemaProps: spec.SchemaProps{ + Description: "List of ports to forward Required when using WebSockets", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PodProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodProxyOptions is the query options to a Pod's proxy call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the URL path to use for the current proxy request to pod.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PodReadinessGate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodReadinessGate contains the reference to a pod condition", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "conditionType": { + SchemaProps: spec.SchemaProps{ + Description: "ConditionType refers to a condition in the pod's condition list with matching type.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"conditionType"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PodSecurityContext(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "seLinuxOptions": { + SchemaProps: spec.SchemaProps{ + Description: "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + Ref: ref("k8s.io/api/core/v1.SELinuxOptions"), + }, + }, + "windowsOptions": { + SchemaProps: spec.SchemaProps{ + Description: "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + Ref: ref("k8s.io/api/core/v1.WindowsSecurityContextOptions"), + }, + }, + "runAsUser": { + SchemaProps: spec.SchemaProps{ + Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "runAsGroup": { + SchemaProps: spec.SchemaProps{ + Description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "runAsNonRoot": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "supplementalGroups": { + SchemaProps: spec.SchemaProps{ + Description: "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + "fsGroup": { + SchemaProps: spec.SchemaProps{ + Description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "sysctls": { + SchemaProps: spec.SchemaProps{ + Description: "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Sysctl"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SELinuxOptions", "k8s.io/api/core/v1.Sysctl", "k8s.io/api/core/v1.WindowsSecurityContextOptions"}, + } +} + +func schema_k8sio_api_core_v1_PodSignature(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Describes the class of pods that should avoid this node. Exactly one field should be set.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "podController": { + SchemaProps: spec.SchemaProps{ + Description: "Reference to controller whose pods should avoid this node.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"}, + } +} + +func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodSpec is a description of a pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "initContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Container"), + }, + }, + }, + }, + }, + "containers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Container"), + }, + }, + }, + }, + }, + "ephemeralContainers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.EphemeralContainer"), + }, + }, + }, + }, + }, + "restartPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationGracePeriodSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "activeDeadlineSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + Type: []string{"string"}, + Format: "", + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + Type: []string{"string"}, + Format: "", + }, + }, + "serviceAccount": { + SchemaProps: spec.SchemaProps{ + Description: "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + Type: []string{"string"}, + Format: "", + }, + }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "nodeName": { + SchemaProps: spec.SchemaProps{ + Description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + Type: []string{"string"}, + Format: "", + }, + }, + "hostNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "hostPID": { + SchemaProps: spec.SchemaProps{ + Description: "Use the host's pid namespace. Optional: Default to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "hostIPC": { + SchemaProps: spec.SchemaProps{ + Description: "Use the host's ipc namespace. Optional: Default to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "shareProcessNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + "hostname": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + Type: []string{"string"}, + Format: "", + }, + }, + "subdomain": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", + Type: []string{"string"}, + Format: "", + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + Type: []string{"string"}, + Format: "", + }, + }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "hostAliases": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.HostAlias"), + }, + }, + }, + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "readinessGates": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodReadinessGate"), + }, + }, + }, + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + Type: []string{"string"}, + Format: "", + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "preemptionPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + Type: []string{"string"}, + Format: "", + }, + }, + "overhead": { + SchemaProps: spec.SchemaProps{ + Description: "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "topologySpreadConstraints": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "topologyKey", + "whenUnsatisfiable", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.TopologySpreadConstraint"), + }, + }, + }, + }, + }, + }, + Required: []string{"containers"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.EphemeralContainer", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodReadinessGate", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.TopologySpreadConstraint", "k8s.io/api/core/v1.Volume", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_PodStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + Type: []string{"string"}, + Format: "", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodCondition"), + }, + }, + }, + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about why the pod is in this condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + Type: []string{"string"}, + Format: "", + }, + }, + "nominatedNodeName": { + SchemaProps: spec.SchemaProps{ + Description: "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + Type: []string{"string"}, + Format: "", + }, + }, + "hostIP": { + SchemaProps: spec.SchemaProps{ + Description: "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + Type: []string{"string"}, + Format: "", + }, + }, + "podIP": { + SchemaProps: spec.SchemaProps{ + Description: "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + Type: []string{"string"}, + Format: "", + }, + }, + "podIPs": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodIP"), + }, + }, + }, + }, + }, + "startTime": { + SchemaProps: spec.SchemaProps{ + Description: "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "initContainerStatuses": { + SchemaProps: spec.SchemaProps{ + Description: "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ContainerStatus"), + }, + }, + }, + }, + }, + "containerStatuses": { + SchemaProps: spec.SchemaProps{ + Description: "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ContainerStatus"), + }, + }, + }, + }, + }, + "qosClass": { + SchemaProps: spec.SchemaProps{ + Description: "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + Type: []string{"string"}, + Format: "", + }, + }, + "ephemeralContainerStatuses": { + SchemaProps: spec.SchemaProps{ + Description: "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ContainerStatus"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ContainerStatus", "k8s.io/api/core/v1.PodCondition", "k8s.io/api/core/v1.PodIP", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_PodStatusResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.PodStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_PodTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodTemplate describes a template for creating copies of a predefined pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_PodTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodTemplateList is a list of PodTemplates.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of pod templates", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.PodTemplate"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_PodTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PodTemplateSpec describes the data a pod should have when created from a template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.PodSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_PortworxVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PortworxVolumeSource represents a Portworx volume resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeID": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeID uniquely identifies a Portworx volume", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"volumeID"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Describes a class of pods that should avoid this node.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "podSignature": { + SchemaProps: spec.SchemaProps{ + Description: "The class of pods.", + Ref: ref("k8s.io/api/core/v1.PodSignature"), + }, + }, + "evictionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Time at which this entry was added to the list.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "(brief) reason why this entry was added to the list.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Human readable message indicating why this entry was added to the list.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"podSignature"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodSignature", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "weight": { + SchemaProps: spec.SchemaProps{ + Description: "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "preference": { + SchemaProps: spec.SchemaProps{ + Description: "A node selector term, associated with the corresponding weight.", + Ref: ref("k8s.io/api/core/v1.NodeSelectorTerm"), + }, + }, + }, + Required: []string{"weight", "preference"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeSelectorTerm"}, + } +} + +func schema_k8sio_api_core_v1_Probe(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "exec": { + SchemaProps: spec.SchemaProps{ + Description: "One and only one of the following should be specified. Exec specifies the action to take.", + Ref: ref("k8s.io/api/core/v1.ExecAction"), + }, + }, + "httpGet": { + SchemaProps: spec.SchemaProps{ + Description: "HTTPGet specifies the http request to perform.", + Ref: ref("k8s.io/api/core/v1.HTTPGetAction"), + }, + }, + "tcpSocket": { + SchemaProps: spec.SchemaProps{ + Description: "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", + Ref: ref("k8s.io/api/core/v1.TCPSocketAction"), + }, + }, + "initialDelaySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "timeoutSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "periodSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "successThreshold": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failureThreshold": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"}, + } +} + +func schema_k8sio_api_core_v1_ProjectedVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a projected volume source", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "sources": { + SchemaProps: spec.SchemaProps{ + Description: "list of volume projections", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.VolumeProjection"), + }, + }, + }, + }, + }, + "defaultMode": { + SchemaProps: spec.SchemaProps{ + Description: "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"sources"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.VolumeProjection"}, + } +} + +func schema_k8sio_api_core_v1_QuobyteVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "registry": { + SchemaProps: spec.SchemaProps{ + Description: "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + Type: []string{"string"}, + Format: "", + }, + }, + "volume": { + SchemaProps: spec.SchemaProps{ + Description: "Volume is a string that references an already created Quobyte volume by name.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "User to map volume access to Defaults to serivceaccount user", + Type: []string{"string"}, + Format: "", + }, + }, + "group": { + SchemaProps: spec.SchemaProps{ + Description: "Group to map volume access to Default is no group", + Type: []string{"string"}, + Format: "", + }, + }, + "tenant": { + SchemaProps: spec.SchemaProps{ + Description: "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"registry", "volume"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "monitors": { + SchemaProps: spec.SchemaProps{ + Description: "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + Type: []string{"string"}, + Format: "", + }, + }, + "pool": { + SchemaProps: spec.SchemaProps{ + Description: "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "keyring": { + SchemaProps: spec.SchemaProps{ + Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"monitors", "image"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + } +} + +func schema_k8sio_api_core_v1_RBDVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "monitors": { + SchemaProps: spec.SchemaProps{ + Description: "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + Type: []string{"string"}, + Format: "", + }, + }, + "pool": { + SchemaProps: spec.SchemaProps{ + Description: "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "user": { + SchemaProps: spec.SchemaProps{ + Description: "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "keyring": { + SchemaProps: spec.SchemaProps{ + Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"monitors", "image"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_RangeAllocation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RangeAllocation is not a public type.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "range": { + SchemaProps: spec.SchemaProps{ + Description: "Range is string that identifies the range represented by 'data'.", + Type: []string{"string"}, + Format: "", + }, + }, + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data is a bit array containing all allocated addresses in the previous segment.", + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + Required: []string{"range", "data"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_ReplicationController(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicationController represents the configuration of a replication controller.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.ReplicationControllerSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.ReplicationControllerStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ReplicationControllerSpec", "k8s.io/api/core/v1.ReplicationControllerStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_ReplicationControllerCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicationControllerCondition describes the state of a replication controller at a certain point.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of replication controller condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_ReplicationControllerList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicationControllerList is a collection of replication controllers.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ReplicationController"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ReplicationController", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_ReplicationControllerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicationControllerSpec is the specification of a replication controller.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "minReadySeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodTemplateSpec"}, + } +} + +func schema_k8sio_api_core_v1_ReplicationControllerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicationControllerStatus represents the current status of a replication controller.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "fullyLabeledReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods that have labels matching the labels of the pod template of the replication controller.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "readyReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of ready replicas for this replication controller.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "availableReplicas": { + SchemaProps: spec.SchemaProps{ + Description: "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "observedGeneration": { + SchemaProps: spec.SchemaProps{ + Description: "ObservedGeneration reflects the generation of the most recently observed replication controller.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "conditions": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a replication controller's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ReplicationControllerCondition"), + }, + }, + }, + }, + }, + }, + Required: []string{"replicas"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ReplicationControllerCondition"}, + } +} + +func schema_k8sio_api_core_v1_ResourceFieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "containerName": { + SchemaProps: spec.SchemaProps{ + Description: "Container name: required for volumes, optional for env vars", + Type: []string{"string"}, + Format: "", + }, + }, + "resource": { + SchemaProps: spec.SchemaProps{ + Description: "Required: resource to select", + Type: []string{"string"}, + Format: "", + }, + }, + "divisor": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the output format of the exposed resources, defaults to \"1\"", + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + Required: []string{"resource"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_ResourceQuota(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceQuota sets aggregate quota restrictions enforced per namespace", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.ResourceQuotaSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.ResourceQuotaStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ResourceQuotaSpec", "k8s.io/api/core/v1.ResourceQuotaStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_ResourceQuotaList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceQuotaList is a list of ResourceQuota items.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ResourceQuota"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ResourceQuota", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_ResourceQuotaSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hard": { + SchemaProps: spec.SchemaProps{ + Description: "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "scopes": { + SchemaProps: spec.SchemaProps{ + Description: "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "scopeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.", + Ref: ref("k8s.io/api/core/v1.ScopeSelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ScopeSelector", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_ResourceQuotaStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceQuotaStatus defines the enforced hard limits and observed use.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hard": { + SchemaProps: spec.SchemaProps{ + Description: "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "used": { + SchemaProps: spec.SchemaProps{ + Description: "Used is the current observed total usage of the resource in the namespace.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_ResourceRequirements(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResourceRequirements describes the compute resource requirements.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "limits": { + SchemaProps: spec.SchemaProps{ + Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + "requests": { + SchemaProps: spec.SchemaProps{ + Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/api/resource.Quantity"}, + } +} + +func schema_k8sio_api_core_v1_SELinuxOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SELinuxOptions are the labels to be applied to the container", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "user": { + SchemaProps: spec.SchemaProps{ + Description: "User is a SELinux user label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + "role": { + SchemaProps: spec.SchemaProps{ + Description: "Role is a SELinux role label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is a SELinux type label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + "level": { + SchemaProps: spec.SchemaProps{ + Description: "Level is SELinux level label that applies to the container.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "gateway": { + SchemaProps: spec.SchemaProps{ + Description: "The host address of the ScaleIO API Gateway.", + Type: []string{"string"}, + Format: "", + }, + }, + "system": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the storage system as configured in ScaleIO.", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + Ref: ref("k8s.io/api/core/v1.SecretReference"), + }, + }, + "sslEnabled": { + SchemaProps: spec.SchemaProps{ + Description: "Flag to enable/disable SSL communication with Gateway, default false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "protectionDomain": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the ScaleIO Protection Domain for the configured storage.", + Type: []string{"string"}, + Format: "", + }, + }, + "storagePool": { + SchemaProps: spec.SchemaProps{ + Description: "The ScaleIO Storage Pool associated with the protection domain.", + Type: []string{"string"}, + Format: "", + }, + }, + "storageMode": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeName": { + SchemaProps: spec.SchemaProps{ + Description: "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"gateway", "system", "secretRef"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretReference"}, + } +} + +func schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ScaleIOVolumeSource represents a persistent ScaleIO volume", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "gateway": { + SchemaProps: spec.SchemaProps{ + Description: "The host address of the ScaleIO API Gateway.", + Type: []string{"string"}, + Format: "", + }, + }, + "system": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the storage system as configured in ScaleIO.", + Type: []string{"string"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + "sslEnabled": { + SchemaProps: spec.SchemaProps{ + Description: "Flag to enable/disable SSL communication with Gateway, default false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "protectionDomain": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the ScaleIO Protection Domain for the configured storage.", + Type: []string{"string"}, + Format: "", + }, + }, + "storagePool": { + SchemaProps: spec.SchemaProps{ + Description: "The ScaleIO Storage Pool associated with the protection domain.", + Type: []string{"string"}, + Format: "", + }, + }, + "storageMode": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeName": { + SchemaProps: spec.SchemaProps{ + Description: "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"gateway", "system", "secretRef"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_ScopeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "matchExpressions": { + SchemaProps: spec.SchemaProps{ + Description: "A list of scope selector requirements by scope of the resources.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ScopedResourceSelectorRequirement"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ScopedResourceSelectorRequirement"}, + } +} + +func schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "scopeName": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the scope that the selector applies to.", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.", + Type: []string{"string"}, + Format: "", + }, + }, + "values": { + SchemaProps: spec.SchemaProps{ + Description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"scopeName", "operator"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Secret(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "data": { + SchemaProps: spec.SchemaProps{ + Description: "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + }, + }, + "stringData": { + SchemaProps: spec.SchemaProps{ + Description: "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Used to facilitate programmatic handling of secret data.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_SecretEnvSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the Secret must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_SecretKeySelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SecretKeySelector selects a key of a Secret.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The key of the secret to select from. Must be a valid secret key.", + Type: []string{"string"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the Secret or its key must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"key"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_SecretList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SecretList is a list of Secret.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Secret"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Secret", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_SecretProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.KeyToPath"), + }, + }, + }, + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the Secret or its key must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.KeyToPath"}, + } +} + +func schema_k8sio_api_core_v1_SecretReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is unique within a namespace to reference a secret resource.", + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace defines the space within which the secret name must be unique.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_SecretVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secretName": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Type: []string{"string"}, + Format: "", + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.KeyToPath"), + }, + }, + }, + }, + }, + "defaultMode": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Specify whether the Secret or its keys must be defined", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.KeyToPath"}, + } +} + +func schema_k8sio_api_core_v1_SecurityContext(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "capabilities": { + SchemaProps: spec.SchemaProps{ + Description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", + Ref: ref("k8s.io/api/core/v1.Capabilities"), + }, + }, + "privileged": { + SchemaProps: spec.SchemaProps{ + Description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "seLinuxOptions": { + SchemaProps: spec.SchemaProps{ + Description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + Ref: ref("k8s.io/api/core/v1.SELinuxOptions"), + }, + }, + "windowsOptions": { + SchemaProps: spec.SchemaProps{ + Description: "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + Ref: ref("k8s.io/api/core/v1.WindowsSecurityContextOptions"), + }, + }, + "runAsUser": { + SchemaProps: spec.SchemaProps{ + Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "runAsGroup": { + SchemaProps: spec.SchemaProps{ + Description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "runAsNonRoot": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "readOnlyRootFilesystem": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container has a read-only root filesystem. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "allowPrivilegeEscalation": { + SchemaProps: spec.SchemaProps{ + Description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", + Type: []string{"boolean"}, + Format: "", + }, + }, + "procMount": { + SchemaProps: spec.SchemaProps{ + Description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Capabilities", "k8s.io/api/core/v1.SELinuxOptions", "k8s.io/api/core/v1.WindowsSecurityContextOptions"}, + } +} + +func schema_k8sio_api_core_v1_SerializedReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SerializedReference is a reference to serialized object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "reference": { + SchemaProps: spec.SchemaProps{ + Description: "The reference to an object in the system.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_Service(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.ServiceSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Ref: ref("k8s.io/api/core/v1.ServiceStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ServiceSpec", "k8s.io/api/core/v1.ServiceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_ServiceAccount(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "secrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + }, + }, + }, + "imagePullSecrets": { + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_core_v1_ServiceAccountList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountList is a list of ServiceAccount objects", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ServiceAccount"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ServiceAccount", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "audience": { + SchemaProps: spec.SchemaProps{ + Description: "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + Type: []string{"string"}, + Format: "", + }, + }, + "expirationSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the path relative to the mount point of the file to project the token into.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"path"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ServiceList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServiceList holds a list of services.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of services", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.Service"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Service", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_core_v1_ServicePort(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServicePort contains information on service's port.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", + Type: []string{"string"}, + Format: "", + }, + }, + "protocol": { + SchemaProps: spec.SchemaProps{ + Description: "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + Type: []string{"string"}, + Format: "", + }, + }, + "port": { + SchemaProps: spec.SchemaProps{ + Description: "The port that will be exposed by this service.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "targetPort": { + SchemaProps: spec.SchemaProps{ + Description: "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "nodePort": { + SchemaProps: spec.SchemaProps{ + Description: "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"port"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_k8sio_api_core_v1_ServiceProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServiceProxyOptions is the query options to a Service's proxy call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_ServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServiceSpec describes the attributes that a user creates on a service.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "port", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "port", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.ServicePort"), + }, + }, + }, + }, + }, + "selector": { + SchemaProps: spec.SchemaProps{ + Description: "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "clusterIP": { + SchemaProps: spec.SchemaProps{ + Description: "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + Type: []string{"string"}, + Format: "", + }, + }, + "externalIPs": { + SchemaProps: spec.SchemaProps{ + Description: "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "sessionAffinity": { + SchemaProps: spec.SchemaProps{ + Description: "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + Type: []string{"string"}, + Format: "", + }, + }, + "loadBalancerIP": { + SchemaProps: spec.SchemaProps{ + Description: "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + Type: []string{"string"}, + Format: "", + }, + }, + "loadBalancerSourceRanges": { + SchemaProps: spec.SchemaProps{ + Description: "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "externalName": { + SchemaProps: spec.SchemaProps{ + Description: "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", + Type: []string{"string"}, + Format: "", + }, + }, + "externalTrafficPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", + Type: []string{"string"}, + Format: "", + }, + }, + "healthCheckNodePort": { + SchemaProps: spec.SchemaProps{ + Description: "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "publishNotReadyAddresses": { + SchemaProps: spec.SchemaProps{ + Description: "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "sessionAffinityConfig": { + SchemaProps: spec.SchemaProps{ + Description: "sessionAffinityConfig contains the configurations of session affinity.", + Ref: ref("k8s.io/api/core/v1.SessionAffinityConfig"), + }, + }, + "ipFamily": { + SchemaProps: spec.SchemaProps{ + Description: "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ServicePort", "k8s.io/api/core/v1.SessionAffinityConfig"}, + } +} + +func schema_k8sio_api_core_v1_ServiceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServiceStatus represents the current status of a service.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "loadBalancer": { + SchemaProps: spec.SchemaProps{ + Description: "LoadBalancer contains the current status of the load-balancer, if one is present.", + Ref: ref("k8s.io/api/core/v1.LoadBalancerStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LoadBalancerStatus"}, + } +} + +func schema_k8sio_api_core_v1_SessionAffinityConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SessionAffinityConfig represents the configurations of session affinity.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "clientIP": { + SchemaProps: spec.SchemaProps{ + Description: "clientIP contains the configurations of Client IP based session affinity.", + Ref: ref("k8s.io/api/core/v1.ClientIPConfig"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ClientIPConfig"}, + } +} + +func schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a StorageOS persistent volume resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeName": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", + Ref: ref("k8s.io/api/core/v1.ObjectReference"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_StorageOSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a StorageOS persistent volume resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumeName": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "secretRef": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference"}, + } +} + +func schema_k8sio_api_core_v1_Sysctl(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Sysctl defines a kernel parameter to be set", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of a property to set", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value of a property to set", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_TCPSocketAction(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TCPSocketAction describes an action based on opening a socket", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "port": { + SchemaProps: spec.SchemaProps{ + Description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), + }, + }, + "host": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Host name to connect to, defaults to the pod IP.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"port"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, + } +} + +func schema_k8sio_api_core_v1_Taint(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Required. The taint key to be applied to a node.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Required. The taint value corresponding to the taint key.", + Type: []string{"string"}, + Format: "", + }, + }, + "effect": { + SchemaProps: spec.SchemaProps{ + Description: "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + Type: []string{"string"}, + Format: "", + }, + }, + "timeAdded": { + SchemaProps: spec.SchemaProps{ + Description: "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + Required: []string{"key", "effect"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_k8sio_api_core_v1_Toleration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + Type: []string{"string"}, + Format: "", + }, + }, + "effect": { + SchemaProps: spec.SchemaProps{ + Description: "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + Type: []string{"string"}, + Format: "", + }, + }, + "tolerationSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + SchemaProps: spec.SchemaProps{ + Description: "The label key that the selector applies to.", + Type: []string{"string"}, + Format: "", + }, + }, + "values": { + SchemaProps: spec.SchemaProps{ + Description: "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"key", "values"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_TopologySelectorTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "matchLabelExpressions": { + SchemaProps: spec.SchemaProps{ + Description: "A list of topology selector requirements by labels.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.TopologySelectorLabelRequirement"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.TopologySelectorLabelRequirement"}, + } +} + +func schema_k8sio_api_core_v1_TopologySpreadConstraint(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "maxSkew": { + SchemaProps: spec.SchemaProps{ + Description: "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It's a required field. Default value is 1 and 0 is not allowed.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "topologyKey": { + SchemaProps: spec.SchemaProps{ + Description: "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", + Type: []string{"string"}, + Format: "", + }, + }, + "whenUnsatisfiable": { + SchemaProps: spec.SchemaProps{ + Description: "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.", + Type: []string{"string"}, + Format: "", + }, + }, + "labelSelector": { + SchemaProps: spec.SchemaProps{ + Description: "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), + }, + }, + }, + Required: []string{"maxSkew", "topologyKey", "whenUnsatisfiable"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, + } +} + +func schema_k8sio_api_core_v1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiGroup": { + SchemaProps: spec.SchemaProps{ + Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is the type of resource being referenced", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of resource being referenced", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"kind", "name"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + Type: []string{"string"}, + Format: "", + }, + }, + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "gitRepo": { + SchemaProps: spec.SchemaProps{ + Description: "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + Ref: ref("k8s.io/api/core/v1.GitRepoVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "DownwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "Items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GitRepoVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, + } +} + +func schema_k8sio_api_core_v1_VolumeDevice(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "volumeDevice describes a mapping of a raw block device within a container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name must match the name of a persistentVolumeClaim in the pod", + Type: []string{"string"}, + Format: "", + }, + }, + "devicePath": { + SchemaProps: spec.SchemaProps{ + Description: "devicePath is the path inside of the container that the device will be mapped to.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "devicePath"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_VolumeMount(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeMount describes a mounting of a Volume within a container.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "This must match the Name of a Volume.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "mountPath": { + SchemaProps: spec.SchemaProps{ + Description: "Path within the container at which the volume should be mounted. Must not contain ':'.", + Type: []string{"string"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + Type: []string{"string"}, + Format: "", + }, + }, + "mountPropagation": { + SchemaProps: spec.SchemaProps{ + Description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + Type: []string{"string"}, + Format: "", + }, + }, + "subPathExpr": { + SchemaProps: spec.SchemaProps{ + Description: "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive. This field is beta in 1.15.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "mountPath"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_VolumeNodeAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "required": { + SchemaProps: spec.SchemaProps{ + Description: "Required specifies hard node constraints that must be met.", + Ref: ref("k8s.io/api/core/v1.NodeSelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeSelector"}, + } +} + +func schema_k8sio_api_core_v1_VolumeProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Projection that may be projected along with other supported volume types", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "information about the secret data to project", + Ref: ref("k8s.io/api/core/v1.SecretProjection"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "information about the downwardAPI data to project", + Ref: ref("k8s.io/api/core/v1.DownwardAPIProjection"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "information about the configMap data to project", + Ref: ref("k8s.io/api/core/v1.ConfigMapProjection"), + }, + }, + "serviceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "information about the serviceAccountToken data to project", + Ref: ref("k8s.io/api/core/v1.ServiceAccountTokenProjection"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapProjection", "k8s.io/api/core/v1.DownwardAPIProjection", "k8s.io/api/core/v1.SecretProjection", "k8s.io/api/core/v1.ServiceAccountTokenProjection"}, + } +} + +func schema_k8sio_api_core_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents the source of a volume to mount. Only one of its members may be specified.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostPath": { + SchemaProps: spec.SchemaProps{ + Description: "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "gcePersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), + }, + }, + "awsElasticBlockStore": { + SchemaProps: spec.SchemaProps{ + Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), + }, + }, + "gitRepo": { + SchemaProps: spec.SchemaProps{ + Description: "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + Ref: ref("k8s.io/api/core/v1.GitRepoVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + "nfs": { + SchemaProps: spec.SchemaProps{ + Description: "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), + }, + }, + "iscsi": { + SchemaProps: spec.SchemaProps{ + Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), + }, + }, + "glusterfs": { + SchemaProps: spec.SchemaProps{ + Description: "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "rbd": { + SchemaProps: spec.SchemaProps{ + Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), + }, + }, + "flexVolume": { + SchemaProps: spec.SchemaProps{ + Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), + }, + }, + "cinder": { + SchemaProps: spec.SchemaProps{ + Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), + }, + }, + "cephfs": { + SchemaProps: spec.SchemaProps{ + Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), + }, + }, + "flocker": { + SchemaProps: spec.SchemaProps{ + Description: "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), + }, + }, + "downwardAPI": { + SchemaProps: spec.SchemaProps{ + Description: "DownwardAPI represents downward API about the pod that should populate this volume", + Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), + }, + }, + "fc": { + SchemaProps: spec.SchemaProps{ + Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), + }, + }, + "azureFile": { + SchemaProps: spec.SchemaProps{ + Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap represents a configMap that should populate this volume", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "vsphereVolume": { + SchemaProps: spec.SchemaProps{ + Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), + }, + }, + "quobyte": { + SchemaProps: spec.SchemaProps{ + Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), + }, + }, + "azureDisk": { + SchemaProps: spec.SchemaProps{ + Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), + }, + }, + "photonPersistentDisk": { + SchemaProps: spec.SchemaProps{ + Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), + }, + }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "Items for all in one resources secrets, configmaps, and downward API", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "portworxVolume": { + SchemaProps: spec.SchemaProps{ + Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), + }, + }, + "scaleIO": { + SchemaProps: spec.SchemaProps{ + Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), + }, + }, + "storageos": { + SchemaProps: spec.SchemaProps{ + Description: "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GitRepoVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, + } +} + +func schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Represents a vSphere volume resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "volumePath": { + SchemaProps: spec.SchemaProps{ + Description: "Path that identifies vSphere volume vmdk", + Type: []string{"string"}, + Format: "", + }, + }, + "fsType": { + SchemaProps: spec.SchemaProps{ + Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + Type: []string{"string"}, + Format: "", + }, + }, + "storagePolicyName": { + SchemaProps: spec.SchemaProps{ + Description: "Storage Policy Based Management (SPBM) profile name.", + Type: []string{"string"}, + Format: "", + }, + }, + "storagePolicyID": { + SchemaProps: spec.SchemaProps{ + Description: "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"volumePath"}, + }, + }, + } +} + +func schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "weight": { + SchemaProps: spec.SchemaProps{ + Description: "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "podAffinityTerm": { + SchemaProps: spec.SchemaProps{ + Description: "Required. A pod affinity term, associated with the corresponding weight.", + Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"), + }, + }, + }, + Required: []string{"weight", "podAffinityTerm"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodAffinityTerm"}, + } +} + +func schema_k8sio_api_core_v1_WindowsSecurityContextOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WindowsSecurityContextOptions contain Windows-specific options and credentials.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "gmsaCredentialSpecName": { + SchemaProps: spec.SchemaProps{ + Description: "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + Type: []string{"string"}, + Format: "", + }, + }, + "gmsaCredentialSpec": { + SchemaProps: spec.SchemaProps{ + Description: "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.", + Type: []string{"string"}, + Format: "", + }, + }, + "runAsUserName": { + SchemaProps: spec.SchemaProps{ + Description: "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is alpha-level and it is only honored by servers that enable the WindowsRunAsUserName feature flag.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_APIGroup(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "APIGroup contains the name, the supported versions, and the preferred version of a group.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name is the name of the group.", + Type: []string{"string"}, + Format: "", + }, + }, + "versions": { + SchemaProps: spec.SchemaProps{ + Description: "versions are the versions supported in this group.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"), + }, + }, + }, + }, + }, + "preferredVersion": { + SchemaProps: spec.SchemaProps{ + Description: "preferredVersion is the version preferred by the API server, which probably is the storage version.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"), + }, + }, + "serverAddressByClientCIDRs": { + SchemaProps: spec.SchemaProps{ + Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"), + }, + }, + }, + }, + }, + }, + Required: []string{"name", "versions"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery", "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"}, + } +} + +func schema_pkg_apis_meta_v1_APIGroupList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "groups": { + SchemaProps: spec.SchemaProps{ + Description: "groups is a list of APIGroup.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"), + }, + }, + }, + }, + }, + }, + Required: []string{"groups"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"}, + } +} + +func schema_pkg_apis_meta_v1_APIResource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "APIResource specifies the name of a resource and whether it is namespaced.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name is the plural name of the resource.", + Type: []string{"string"}, + Format: "", + }, + }, + "singularName": { + SchemaProps: spec.SchemaProps{ + Description: "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + Type: []string{"string"}, + Format: "", + }, + }, + "namespaced": { + SchemaProps: spec.SchemaProps{ + Description: "namespaced indicates if a resource is namespaced or not.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "group": { + SchemaProps: spec.SchemaProps{ + Description: "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Description: "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + Type: []string{"string"}, + Format: "", + }, + }, + "verbs": { + SchemaProps: spec.SchemaProps{ + Description: "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "shortNames": { + SchemaProps: spec.SchemaProps{ + Description: "shortNames is a list of suggested short names of the resource.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "categories": { + SchemaProps: spec.SchemaProps{ + Description: "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "storageVersionHash": { + SchemaProps: spec.SchemaProps{ + Description: "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "singularName", "namespaced", "kind", "verbs"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_APIResourceList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "groupVersion": { + SchemaProps: spec.SchemaProps{ + Description: "groupVersion is the group and version this APIResourceList is for.", + Type: []string{"string"}, + Format: "", + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "resources contains the name of the resources and if they are namespaced.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"), + }, + }, + }, + }, + }, + }, + Required: []string{"groupVersion", "resources"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"}, + } +} + +func schema_pkg_apis_meta_v1_APIVersions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "versions": { + SchemaProps: spec.SchemaProps{ + Description: "versions are the api versions that are available.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "serverAddressByClientCIDRs": { + SchemaProps: spec.SchemaProps{ + Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"), + }, + }, + }, + }, + }, + }, + Required: []string{"versions", "serverAddressByClientCIDRs"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"}, + } +} + +func schema_pkg_apis_meta_v1_CreateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CreateOptions may be provided when creating an API object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "dryRun": { + SchemaProps: spec.SchemaProps{ + Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "fieldManager": { + SchemaProps: spec.SchemaProps{ + Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_DeleteOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DeleteOptions may be provided when deleting an API object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "gracePeriodSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "preconditions": { + SchemaProps: spec.SchemaProps{ + Description: "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"), + }, + }, + "orphanDependents": { + SchemaProps: spec.SchemaProps{ + Description: "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "propagationPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + Type: []string{"string"}, + Format: "", + }, + }, + "dryRun": { + SchemaProps: spec.SchemaProps{ + Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"}, + } +} + +func schema_pkg_apis_meta_v1_Duration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.", + Type: v1.Duration{}.OpenAPISchemaType(), + Format: v1.Duration{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_pkg_apis_meta_v1_ExportOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "export": { + SchemaProps: spec.SchemaProps{ + Description: "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "exact": { + SchemaProps: spec.SchemaProps{ + Description: "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"export", "exact"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_FieldsV1(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", + Type: []string{"object"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_GetOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GetOptions is the standard query options to the standard REST get call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "resourceVersion": { + SchemaProps: spec.SchemaProps{ + Description: "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_GroupKind(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"group", "kind"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_GroupResource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "resource": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"group", "resource"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_GroupVersion(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GroupVersion contains the \"group\" and the \"version\", which uniquely identifies the API.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"group", "version"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "groupVersion": { + SchemaProps: spec.SchemaProps{ + Description: "groupVersion specifies the API group and version in the form \"group/version\"", + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Description: "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"groupVersion", "version"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_GroupVersionKind(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"group", "version", "kind"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_GroupVersionResource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "group": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "resource": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"group", "version", "resource"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_InternalEvent(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "InternalEvent makes watch.Event versioned", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "Object": { + SchemaProps: spec.SchemaProps{ + Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Bookmark: the object (instance of a type being watched) where\n only ResourceVersion field is set. On successful restart of watch from a\n bookmark resourceVersion, client is guaranteed to not get repeat event\n nor miss any events.\n * If Type is Error: *api.Status is recommended; other types may make sense\n depending on context.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.Object"), + }, + }, + }, + Required: []string{"Type", "Object"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.Object"}, + } +} + +func schema_pkg_apis_meta_v1_LabelSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "matchLabels": { + SchemaProps: spec.SchemaProps{ + Description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "matchExpressions": { + SchemaProps: spec.SchemaProps{ + Description: "matchExpressions is a list of label selector requirements. The requirements are ANDed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"}, + } +} + +func schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "key": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "key", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "key is the label key that the selector applies to.", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", + Type: []string{"string"}, + Format: "", + }, + }, + "values": { + SchemaProps: spec.SchemaProps{ + Description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"key", "operator"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "List holds a list of objects, which may not be known by the server.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "List of objects", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + } +} + +func schema_pkg_apis_meta_v1_ListMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "selfLink": { + SchemaProps: spec.SchemaProps{ + Description: "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + Type: []string{"string"}, + Format: "", + }, + }, + "resourceVersion": { + SchemaProps: spec.SchemaProps{ + Description: "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + Type: []string{"string"}, + Format: "", + }, + }, + "continue": { + SchemaProps: spec.SchemaProps{ + Description: "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + Type: []string{"string"}, + Format: "", + }, + }, + "remainingItemCount": { + SchemaProps: spec.SchemaProps{ + Description: "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_ListOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ListOptions is the query options to a standard REST list call.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "labelSelector": { + SchemaProps: spec.SchemaProps{ + Description: "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + Type: []string{"string"}, + Format: "", + }, + }, + "fieldSelector": { + SchemaProps: spec.SchemaProps{ + Description: "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + Type: []string{"string"}, + Format: "", + }, + }, + "watch": { + SchemaProps: spec.SchemaProps{ + Description: "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "allowWatchBookmarks": { + SchemaProps: spec.SchemaProps{ + Description: "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.\n\nThis field is beta.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "resourceVersion": { + SchemaProps: spec.SchemaProps{ + Description: "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + Type: []string{"string"}, + Format: "", + }, + }, + "timeoutSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "limit": { + SchemaProps: spec.SchemaProps{ + Description: "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "continue": { + SchemaProps: spec.SchemaProps{ + Description: "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "manager": { + SchemaProps: spec.SchemaProps{ + Description: "Manager is an identifier of the workflow managing these fields.", + Type: []string{"string"}, + Format: "", + }, + }, + "operation": { + SchemaProps: spec.SchemaProps{ + Description: "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + Type: []string{"string"}, + Format: "", + }, + }, + "time": { + SchemaProps: spec.SchemaProps{ + Description: "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "fieldsType": { + SchemaProps: spec.SchemaProps{ + Description: "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + Type: []string{"string"}, + Format: "", + }, + }, + "fieldsV1": { + SchemaProps: spec.SchemaProps{ + Description: "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_pkg_apis_meta_v1_MicroTime(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MicroTime is version of Time with microsecond level precision.", + Type: v1.MicroTime{}.OpenAPISchemaType(), + Format: v1.MicroTime{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + Type: []string{"string"}, + Format: "", + }, + }, + "generateName": { + SchemaProps: spec.SchemaProps{ + Description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + Type: []string{"string"}, + Format: "", + }, + }, + "selfLink": { + SchemaProps: spec.SchemaProps{ + Description: "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + Type: []string{"string"}, + Format: "", + }, + }, + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + Type: []string{"string"}, + Format: "", + }, + }, + "resourceVersion": { + SchemaProps: spec.SchemaProps{ + Description: "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + Type: []string{"string"}, + Format: "", + }, + }, + "generation": { + SchemaProps: spec.SchemaProps{ + Description: "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "creationTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "deletionTimestamp": { + SchemaProps: spec.SchemaProps{ + Description: "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "deletionGracePeriodSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "labels": { + SchemaProps: spec.SchemaProps{ + Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "annotations": { + SchemaProps: spec.SchemaProps{ + Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "ownerReferences": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"), + }, + }, + }, + }, + }, + "finalizers": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "clusterName": { + SchemaProps: spec.SchemaProps{ + Description: "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", + Type: []string{"string"}, + Format: "", + }, + }, + "managedFields": { + SchemaProps: spec.SchemaProps{ + Description: "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry", "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_pkg_apis_meta_v1_OwnerReference(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "API version of the referent.", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + Type: []string{"string"}, + Format: "", + }, + }, + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + Type: []string{"string"}, + Format: "", + }, + }, + "controller": { + SchemaProps: spec.SchemaProps{ + Description: "If true, this reference points to the managing controller.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "blockOwnerDeletion": { + SchemaProps: spec.SchemaProps{ + Description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"apiVersion", "kind", "name", "uid"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_PartialObjectMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PartialObjectMetadataList contains a list of objects containing only their metadata", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "items contains each of the included items.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"}, + } +} + +func schema_pkg_apis_meta_v1_Patch(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", + Type: []string{"object"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_PatchOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "dryRun": { + SchemaProps: spec.SchemaProps{ + Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "force": { + SchemaProps: spec.SchemaProps{ + Description: "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "fieldManager": { + SchemaProps: spec.SchemaProps{ + Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_Preconditions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the target UID.", + Type: []string{"string"}, + Format: "", + }, + }, + "resourceVersion": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the target ResourceVersion", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_RootPaths(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "paths": { + SchemaProps: spec.SchemaProps{ + Description: "paths are the paths available at root.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"paths"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "clientCIDR": { + SchemaProps: spec.SchemaProps{ + Description: "The CIDR with which clients can match their IP to figure out the server address that they should use.", + Type: []string{"string"}, + Format: "", + }, + }, + "serverAddress": { + SchemaProps: spec.SchemaProps{ + Description: "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"clientCIDR", "serverAddress"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_Status(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Status is a return value for calls that don't return other objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human-readable description of the status of this operation.", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + Type: []string{"string"}, + Format: "", + }, + }, + "details": { + SchemaProps: spec.SchemaProps{ + Description: "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"), + }, + }, + "code": { + SchemaProps: spec.SchemaProps{ + Description: "Suggested HTTP return code for this status, 0 if not set.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"}, + } +} + +func schema_pkg_apis_meta_v1_StatusCause(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + Type: []string{"string"}, + Format: "", + }, + }, + "field": { + SchemaProps: spec.SchemaProps{ + Description: "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_StatusDetails(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + Type: []string{"string"}, + Format: "", + }, + }, + "group": { + SchemaProps: spec.SchemaProps{ + Description: "The group attribute of the resource associated with the status StatusReason.", + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "uid": { + SchemaProps: spec.SchemaProps{ + Description: "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + Type: []string{"string"}, + Format: "", + }, + }, + "causes": { + SchemaProps: spec.SchemaProps{ + Description: "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"), + }, + }, + }, + }, + }, + "retryAfterSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"}, + } +} + +func schema_pkg_apis_meta_v1_Table(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "columnDefinitions": { + SchemaProps: spec.SchemaProps{ + Description: "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition"), + }, + }, + }, + }, + }, + "rows": { + SchemaProps: spec.SchemaProps{ + Description: "rows is the list of items in the table.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"), + }, + }, + }, + }, + }, + }, + Required: []string{"columnDefinitions", "rows"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition", "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"}, + } +} + +func schema_pkg_apis_meta_v1_TableColumnDefinition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TableColumnDefinition contains information about a column returned in the Table.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name is a human readable name for the column.", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + Type: []string{"string"}, + Format: "", + }, + }, + "format": { + SchemaProps: spec.SchemaProps{ + Description: "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "description is a human readable description of this column.", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"name", "type", "format", "description", "priority"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_TableOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TableOptions are used when a Table is requested by the caller.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "includeObject": { + SchemaProps: spec.SchemaProps{ + Description: "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TableRow is an individual row in a table.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "cells": { + SchemaProps: spec.SchemaProps{ + Description: "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Format: "", + }, + }, + }, + }, + }, + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition"), + }, + }, + }, + }, + }, + "object": { + SchemaProps: spec.SchemaProps{ + Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + Required: []string{"cells"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + } +} + +func schema_pkg_apis_meta_v1_TableRowCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TableRowCondition allows a row to be marked with additional information.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "(brief) machine readable reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Human readable message indicating details about last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_Time(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", + Type: v1.Time{}.OpenAPISchemaType(), + Format: v1.Time{}.OpenAPISchemaFormat(), + }, + }, + } +} + +func schema_pkg_apis_meta_v1_Timestamp(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Timestamp is a struct that is equivalent to Time, but intended for protobuf marshalling/unmarshalling. It is generated into a serialization that matches Time. Do not use in Go structs.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "seconds": { + SchemaProps: spec.SchemaProps{ + Description: "Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "nanos": { + SchemaProps: spec.SchemaProps{ + Description: "Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"seconds", "nanos"}, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_UpdateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "dryRun": { + SchemaProps: spec.SchemaProps{ + Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "fieldManager": { + SchemaProps: spec.SchemaProps{ + Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Event represents a single event to a watched resource.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "object": { + SchemaProps: spec.SchemaProps{ + Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + Required: []string{"type", "object"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + } +} + +func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + Type: []string{"object"}, + }, + }, + } +} + +func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_k8sio_apimachinery_pkg_runtime_Unknown(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Unknown allows api objects with unknown types to be passed-through. This can be used to deal with the API objects from a plug-in. Unknown objects still have functioning TypeMeta features-- kind, version, etc. metadata and field mutatation.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "kind": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "Raw": { + SchemaProps: spec.SchemaProps{ + Description: "Raw will hold the complete serialized object which couldn't be matched with a registered type. Most likely, nothing should be done with this except for passing it through the system.", + Type: []string{"string"}, + Format: "byte", + }, + }, + "ContentEncoding": { + SchemaProps: spec.SchemaProps{ + Description: "ContentEncoding is encoding used to encode 'Raw' data. Unspecified means no encoding.", + Type: []string{"string"}, + Format: "", + }, + }, + "ContentType": { + SchemaProps: spec.SchemaProps{ + Description: "ContentType is serialization method used to serialize 'Raw'. Unspecified means ContentTypeJSON.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"Raw", "ContentEncoding", "ContentType"}, + }, + }, + } +} + +func schema_pkg_apis_cluster_v1alpha1_Agent(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Agent is the Schema for the agents API", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Ref: ref("kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Ref: ref("kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentSpec", "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentStatus"}, + } +} + +func schema_pkg_apis_cluster_v1alpha1_AgentCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of AgentCondition", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastUpdateTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time this condition was updated.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_pkg_apis_cluster_v1alpha1_AgentList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AgentList contains a list of Agent", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.Agent"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.Agent"}, + } +} + +func schema_pkg_apis_cluster_v1alpha1_AgentSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AgentSpec defines the desired state of Agent", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "token": { + SchemaProps: spec.SchemaProps{ + Description: "Token used by agents to connect to proxy.", + Type: []string{"string"}, + Format: "", + }, + }, + "proxy": { + SchemaProps: spec.SchemaProps{ + Description: "Proxy address", + Type: []string{"string"}, + Format: "", + }, + }, + "kubernetesAPIServerPort": { + SchemaProps: spec.SchemaProps{ + Description: "KubeAPIServerPort is the port which listens for forwarding kube-apiserver traffic", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "kubesphereAPIServerPort": { + SchemaProps: spec.SchemaProps{ + Description: "KubeSphereAPIServerPort is the port which listens for forwarding kubesphere apigateway traffic", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "Paused": { + SchemaProps: spec.SchemaProps{ + Description: "Indicates that the agent is paused.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_cluster_v1alpha1_AgentStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AgentStatus defines the observed state of Agent", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "Represents the latest available observations of a agent's current state.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentCondition"), + }, + }, + }, + }, + }, + "ping": { + SchemaProps: spec.SchemaProps{ + Description: "Represents the connection quality, in ms", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "KubeConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Issued new kubeconfig by proxy server", + Type: []string{"string"}, + Format: "byte", + }, + }, + }, + Required: []string{"KubeConfig"}, + }, + }, + Dependencies: []string{ + "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.AgentCondition"}, + } +} + +func schema_pkg_apis_cluster_v1alpha1_Cluster(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Cluster is the schema for the clusters API", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Ref: ref("kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.ClusterSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Ref: ref("kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.ClusterStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.ClusterSpec", "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.ClusterStatus"}, + } +} + +func schema_pkg_apis_cluster_v1alpha1_ClusterList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.Cluster"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1.Cluster"}, + } +} + +func schema_pkg_apis_cluster_v1alpha1_ClusterSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "federated": { + SchemaProps: spec.SchemaProps{ + Description: "Join cluster as kubefed cluster", + Type: []string{"boolean"}, + Format: "", + }, + }, + "state": { + SchemaProps: spec.SchemaProps{ + Description: "Desired state of the cluster", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_cluster_v1alpha1_ClusterStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of the condition", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastUpdateTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time this condition was updated.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} diff --git a/pkg/apis/tower/v1alpha1/register.go b/pkg/apis/cluster/v1alpha1/register.go similarity index 90% rename from pkg/apis/tower/v1alpha1/register.go rename to pkg/apis/cluster/v1alpha1/register.go index e22fc7864..6cb43d6bc 100644 --- a/pkg/apis/tower/v1alpha1/register.go +++ b/pkg/apis/cluster/v1alpha1/register.go @@ -17,7 +17,7 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register // +k8s:defaulter-gen=TypeMeta -// +groupName=tower.kubesphere.io +// +groupName=cluster.kubesphere.io package v1alpha1 import ( @@ -27,7 +27,7 @@ import ( var ( // GroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "tower.kubesphere.io", Version: "v1alpha1"} + SchemeGroupVersion = schema.GroupVersion{Group: "cluster.kubesphere.io", Version: "v1alpha1"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} diff --git a/pkg/apis/tower/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go similarity index 59% rename from pkg/apis/tower/v1alpha1/zz_generated.deepcopy.go rename to pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go index 87a6e5cd3..cbe0824e5 100644 --- a/pkg/apis/tower/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by controller-gen. DO NOT EDIT. +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 @@ -31,6 +31,7 @@ func (in *Agent) DeepCopyInto(out *Agent) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Agent. @@ -56,6 +57,7 @@ func (in *AgentCondition) DeepCopyInto(out *AgentCondition) { *out = *in in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentCondition. @@ -80,6 +82,7 @@ func (in *AgentList) DeepCopyInto(out *AgentList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentList. @@ -103,6 +106,7 @@ func (in *AgentList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AgentSpec) DeepCopyInto(out *AgentSpec) { *out = *in + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentSpec. @@ -130,6 +134,7 @@ func (in *AgentStatus) DeepCopyInto(out *AgentStatus) { *out = make([]byte, len(*in)) copy(*out, *in) } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentStatus. @@ -141,3 +146,98 @@ func (in *AgentStatus) DeepCopy() *AgentStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterList) DeepCopyInto(out *ClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList. +func (in *ClusterList) DeepCopy() *ClusterList { + if in == nil { + return nil + } + out := new(ClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec. +func (in *ClusterSpec) DeepCopy() *ClusterSpec { + if in == nil { + return nil + } + out := new(ClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus. +func (in *ClusterStatus) DeepCopy() *ClusterStatus { + if in == nil { + return nil + } + out := new(ClusterStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/iam/v1alpha2/user_types.go b/pkg/apis/iam/v1alpha2/user_types.go index ee9034af7..ee257c6c8 100644 --- a/pkg/apis/iam/v1alpha2/user_types.go +++ b/pkg/apis/iam/v1alpha2/user_types.go @@ -20,17 +20,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// User is the Schema for the users API -// +k8s:openapi-gen=true // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +genclient:nonNamespaced + +// +k8s:openapi-gen=true // +kubebuilder:printcolumn:name="Email",type="string",JSONPath=".spec.email" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state" // +kubebuilder:resource:categories="iam",scope="Cluster" +// User is the Schema for the users API type User struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -119,7 +117,6 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient:nonNamespaced // UserList contains a list of User type UserList struct { diff --git a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go index a70563680..f963f4f8e 100644 --- a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by controller-gen. DO NOT EDIT. +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha2 @@ -31,6 +31,7 @@ func (in *User) DeepCopyInto(out *User) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. @@ -55,6 +56,7 @@ func (in *User) DeepCopyObject() runtime.Object { func (in *UserCondition) DeepCopyInto(out *UserCondition) { *out = *in in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserCondition. @@ -79,6 +81,7 @@ func (in *UserList) DeepCopyInto(out *UserList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. @@ -112,6 +115,7 @@ func (in *UserSpec) DeepCopyInto(out *UserSpec) { *out = make([]FinalizerName, len(*in)) copy(*out, *in) } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. @@ -134,6 +138,7 @@ func (in *UserStatus) DeepCopyInto(out *UserStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. diff --git a/pkg/apis/servicemesh/v1alpha2/servicepolicy_types.go b/pkg/apis/servicemesh/v1alpha2/servicepolicy_types.go index b961c0a8d..26c018ea5 100644 --- a/pkg/apis/servicemesh/v1alpha2/servicepolicy_types.go +++ b/pkg/apis/servicemesh/v1alpha2/servicepolicy_types.go @@ -47,7 +47,7 @@ type DestinationRuleSpecTemplate struct { // Metadata of the virtual services created from this template // +optional - metav1.ObjectMeta + metav1.ObjectMeta `json:"metadata,omitempty"` // Spec indicates the behavior of a destination rule. // +optional @@ -68,42 +68,42 @@ const ( // StrategyCondition describes current state of a strategy. type ServicePolicyCondition struct { // Type of strategy condition, Complete or Failed. - Type ServicePolicyConditionType + Type ServicePolicyConditionType `json:"type,omitempty"` // Status of the condition, one of True, False, Unknown - Status apiextensions.ConditionStatus + Status apiextensions.ConditionStatus `json:"status,omitempty"` // Last time the condition was checked. // +optional - LastProbeTime metav1.Time + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` // Last time the condition transit from one status to another // +optional - LastTransitionTime metav1.Time + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` // reason for the condition's last transition - Reason string + Reason string `json:"reason,omitempty"` // Human readable message indicating details about last transition. // +optinal - Message string + Message string `json:"message,omitempty"` } // ServicePolicyStatus defines the observed state of ServicePolicy type ServicePolicyStatus struct { // The latest available observations of an object's current state. // +optional - Conditions []ServicePolicyCondition + Conditions []ServicePolicyCondition `json:"conditions,omitempty"` // Represents time when the strategy was acknowledged by the controller. // It is represented in RFC3339 form and is in UTC. // +optional - StartTime *metav1.Time + StartTime *metav1.Time `json:"startTime,omitempty"` // Represents time when the strategy was completed. // It is represented in RFC3339 form and is in UTC. // +optional - CompletionTime *metav1.Time + CompletionTime *metav1.Time `json:"completionTime,omitempty"` } // +genclient diff --git a/pkg/apis/servicemesh/v1alpha2/strategy_types.go b/pkg/apis/servicemesh/v1alpha2/strategy_types.go index 5954e4b28..c86b459d9 100644 --- a/pkg/apis/servicemesh/v1alpha2/strategy_types.go +++ b/pkg/apis/servicemesh/v1alpha2/strategy_types.go @@ -103,17 +103,17 @@ type StrategyStatus struct { // The latest available observations of an object's current state. // +optional - Conditions []StrategyCondition + Conditions []StrategyCondition `json:"conditions,omitempty"` // Represents time when the strategy was acknowledged by the controller. // It is represented in RFC3339 form and is in UTC. // +optional - StartTime *metav1.Time + StartTime *metav1.Time `json:"startTime,omitempty"` // Represents time when the strategy was completed. // It is represented in RFC3339 form and is in UTC. // +optional - CompletionTime *metav1.Time + CompletionTime *metav1.Time `json:"completionTime,omitempty"` } type StrategyConditionType string @@ -130,25 +130,25 @@ const ( // StrategyCondition describes current state of a strategy. type StrategyCondition struct { // Type of strategy condition, Complete or Failed. - Type StrategyConditionType + Type StrategyConditionType `json:"type,omitempty"` // Status of the condition, one of True, False, Unknown - Status apiextensions.ConditionStatus + Status apiextensions.ConditionStatus `json:"status,omitempty"` // Last time the condition was checked. // +optional - LastProbeTime metav1.Time + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` // Last time the condition transit from one status to another // +optional - LastTransitionTime metav1.Time + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` // reason for the condition's last transition - Reason string + Reason string `json:"reason,omitempty"` // Human readable message indicating details about last transition. // +optinal - Message string + Message string `json:"message,omitempty"` } // +genclient diff --git a/pkg/apis/tower/group.go b/pkg/apis/tower/group.go deleted file mode 100644 index 260171028..000000000 --- a/pkg/apis/tower/group.go +++ /dev/null @@ -1 +0,0 @@ -package tower diff --git a/pkg/apis/tower/v1alpha1/doc.go b/pkg/apis/tower/v1alpha1/doc.go deleted file mode 100644 index 469552839..000000000 --- a/pkg/apis/tower/v1alpha1/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package v1alpha2 contains API Schema definitions for the tower v1alpha1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=kubesphere.io/tower/pkg/apis/tower -// +k8s:defaulter-gen=TypeMeta -// +groupName=tower.kubesphere.io - -package v1alpha1 diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 323ec1de1..7c4c701d1 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -182,7 +182,7 @@ func (s *APIServer) buildHandlerChain() { handler := s.Server.Handler handler = filters.WithKubeAPIServer(handler, s.KubernetesClient.Config(), &errorResponder{}) - handler = filters.WithMultipleClusterDispatcher(handler, dispatch.NewClusterDispatch(s.InformerFactory.KubeSphereSharedInformerFactory().Tower().V1alpha1().Agents().Lister())) + handler = filters.WithMultipleClusterDispatcher(handler, dispatch.NewClusterDispatch(s.InformerFactory.KubeSphereSharedInformerFactory().Cluster().V1alpha1().Agents().Lister())) excludedPaths := []string{"/oauth/*", "/kapis/config.kubesphere.io/*"} pathAuthorizer, _ := path.NewAuthorizer(excludedPaths) diff --git a/pkg/apiserver/dispatch/dispatch.go b/pkg/apiserver/dispatch/dispatch.go index 42385c55c..fd079b134 100644 --- a/pkg/apiserver/dispatch/dispatch.go +++ b/pkg/apiserver/dispatch/dispatch.go @@ -6,9 +6,9 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/proxy" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" - towerv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" "kubesphere.io/kubesphere/pkg/apiserver/request" - "kubesphere.io/kubesphere/pkg/client/listers/tower/v1alpha1" + "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1" "net/http" "strings" ) @@ -38,7 +38,7 @@ func (c *clusterDispatch) Dispatch(w http.ResponseWriter, req *http.Request, han return } - agent, err := c.agentLister.Agents(defaultMultipleClusterAgentNamespace).Get(info.Cluster) + agent, err := c.agentLister.Get(info.Cluster) if err != nil { if errors.IsNotFound(err) { http.Error(w, fmt.Sprintf("cluster %s not found", info.Cluster), http.StatusNotFound) @@ -65,9 +65,9 @@ func (c *clusterDispatch) Error(w http.ResponseWriter, req *http.Request, err er responsewriters.InternalError(w, req, err) } -func isAgentReady(agent *towerv1alpha1.Agent) bool { +func isAgentReady(agent *clusterv1alpha1.Agent) bool { for _, condition := range agent.Status.Conditions { - if condition.Type == towerv1alpha1.AgentConnected && condition.Status == corev1.ConditionTrue { + if condition.Type == clusterv1alpha1.AgentConnected && condition.Status == corev1.ConditionTrue { return true } } diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 1edd6dc24..9bfc4434c 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -24,34 +24,39 @@ import ( discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1" devopsv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha1" iamv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/network/v1alpha1" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1" - towerv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tower/v1alpha1" ) type Interface interface { Discovery() discovery.DiscoveryInterface + ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface DevopsV1alpha1() devopsv1alpha1.DevopsV1alpha1Interface IamV1alpha2() iamv1alpha2.IamV1alpha2Interface NetworkV1alpha1() networkv1alpha1.NetworkV1alpha1Interface ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha2Interface TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface - TowerV1alpha1() towerv1alpha1.TowerV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient + clusterV1alpha1 *clusterv1alpha1.ClusterV1alpha1Client devopsV1alpha1 *devopsv1alpha1.DevopsV1alpha1Client iamV1alpha2 *iamv1alpha2.IamV1alpha2Client networkV1alpha1 *networkv1alpha1.NetworkV1alpha1Client servicemeshV1alpha2 *servicemeshv1alpha2.ServicemeshV1alpha2Client tenantV1alpha1 *tenantv1alpha1.TenantV1alpha1Client - towerV1alpha1 *towerv1alpha1.TowerV1alpha1Client +} + +// ClusterV1alpha1 retrieves the ClusterV1alpha1Client +func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface { + return c.clusterV1alpha1 } // DevopsV1alpha1 retrieves the DevopsV1alpha1Client @@ -79,11 +84,6 @@ func (c *Clientset) TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface { return c.tenantV1alpha1 } -// TowerV1alpha1 retrieves the TowerV1alpha1Client -func (c *Clientset) TowerV1alpha1() towerv1alpha1.TowerV1alpha1Interface { - return c.towerV1alpha1 -} - // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -105,6 +105,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error + cs.clusterV1alpha1, err = clusterv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.devopsV1alpha1, err = devopsv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -125,10 +129,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.towerV1alpha1, err = towerv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -141,12 +141,12 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset + cs.clusterV1alpha1 = clusterv1alpha1.NewForConfigOrDie(c) cs.devopsV1alpha1 = devopsv1alpha1.NewForConfigOrDie(c) cs.iamV1alpha2 = iamv1alpha2.NewForConfigOrDie(c) cs.networkV1alpha1 = networkv1alpha1.NewForConfigOrDie(c) cs.servicemeshV1alpha2 = servicemeshv1alpha2.NewForConfigOrDie(c) cs.tenantV1alpha1 = tenantv1alpha1.NewForConfigOrDie(c) - cs.towerV1alpha1 = towerv1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -155,12 +155,12 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.clusterV1alpha1 = clusterv1alpha1.New(c) cs.devopsV1alpha1 = devopsv1alpha1.New(c) cs.iamV1alpha2 = iamv1alpha2.New(c) cs.networkV1alpha1 = networkv1alpha1.New(c) cs.servicemeshV1alpha2 = servicemeshv1alpha2.New(c) cs.tenantV1alpha1 = tenantv1alpha1.New(c) - cs.towerV1alpha1 = towerv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 81af882ab..bcf78bb29 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -25,6 +25,8 @@ import ( fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/testing" clientset "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1" + fakeclusterv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake" devopsv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha1" fakedevopsv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/devops/v1alpha1/fake" iamv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/iam/v1alpha2" @@ -35,8 +37,6 @@ import ( fakeservicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/servicemesh/v1alpha2/fake" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1" faketenantv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tenant/v1alpha1/fake" - towerv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tower/v1alpha1" - faketowerv1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tower/v1alpha1/fake" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -86,6 +86,11 @@ func (c *Clientset) Tracker() testing.ObjectTracker { var _ clientset.Interface = &Clientset{} +// ClusterV1alpha1 retrieves the ClusterV1alpha1Client +func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface { + return &fakeclusterv1alpha1.FakeClusterV1alpha1{Fake: &c.Fake} +} + // DevopsV1alpha1 retrieves the DevopsV1alpha1Client func (c *Clientset) DevopsV1alpha1() devopsv1alpha1.DevopsV1alpha1Interface { return &fakedevopsv1alpha1.FakeDevopsV1alpha1{Fake: &c.Fake} @@ -110,8 +115,3 @@ func (c *Clientset) ServicemeshV1alpha2() servicemeshv1alpha2.ServicemeshV1alpha func (c *Clientset) TenantV1alpha1() tenantv1alpha1.TenantV1alpha1Interface { return &faketenantv1alpha1.FakeTenantV1alpha1{Fake: &c.Fake} } - -// TowerV1alpha1 retrieves the TowerV1alpha1Client -func (c *Clientset) TowerV1alpha1() towerv1alpha1.TowerV1alpha1Interface { - return &faketowerv1alpha1.FakeTowerV1alpha1{Fake: &c.Fake} -} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index 8adecc487..542e7cc0f 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -24,24 +24,24 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" devopsv1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1" iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" - towerv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" ) var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + clusterv1alpha1.AddToScheme, devopsv1alpha1.AddToScheme, iamv1alpha2.AddToScheme, networkv1alpha1.AddToScheme, servicemeshv1alpha2.AddToScheme, tenantv1alpha1.AddToScheme, - towerv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index f9a270a9a..0d828a2f6 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -24,24 +24,24 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" devopsv1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1" iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" - towerv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" ) var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + clusterv1alpha1.AddToScheme, devopsv1alpha1.AddToScheme, iamv1alpha2.AddToScheme, networkv1alpha1.AddToScheme, servicemeshv1alpha2.AddToScheme, tenantv1alpha1.AddToScheme, - towerv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/pkg/client/clientset/versioned/typed/tower/v1alpha1/agent.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/agent.go similarity index 93% rename from pkg/client/clientset/versioned/typed/tower/v1alpha1/agent.go rename to pkg/client/clientset/versioned/typed/cluster/v1alpha1/agent.go index c445c9142..0fa113684 100644 --- a/pkg/client/clientset/versioned/typed/tower/v1alpha1/agent.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/agent.go @@ -25,14 +25,14 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" ) // AgentsGetter has a method to return a AgentInterface. // A group's client should implement this interface. type AgentsGetter interface { - Agents(namespace string) AgentInterface + Agents() AgentInterface } // AgentInterface has methods to work with Agent resources. @@ -52,14 +52,12 @@ type AgentInterface interface { // agents implements AgentInterface type agents struct { client rest.Interface - ns string } // newAgents returns a Agents -func newAgents(c *TowerV1alpha1Client, namespace string) *agents { +func newAgents(c *ClusterV1alpha1Client) *agents { return &agents{ client: c.RESTClient(), - ns: namespace, } } @@ -67,7 +65,6 @@ func newAgents(c *TowerV1alpha1Client, namespace string) *agents { func (c *agents) Get(name string, options v1.GetOptions) (result *v1alpha1.Agent, err error) { result = &v1alpha1.Agent{} err = c.client.Get(). - Namespace(c.ns). Resource("agents"). Name(name). VersionedParams(&options, scheme.ParameterCodec). @@ -84,7 +81,6 @@ func (c *agents) List(opts v1.ListOptions) (result *v1alpha1.AgentList, err erro } result = &v1alpha1.AgentList{} err = c.client.Get(). - Namespace(c.ns). Resource("agents"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). @@ -101,7 +97,6 @@ func (c *agents) Watch(opts v1.ListOptions) (watch.Interface, error) { } opts.Watch = true return c.client.Get(). - Namespace(c.ns). Resource("agents"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). @@ -112,7 +107,6 @@ func (c *agents) Watch(opts v1.ListOptions) (watch.Interface, error) { func (c *agents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { result = &v1alpha1.Agent{} err = c.client.Post(). - Namespace(c.ns). Resource("agents"). Body(agent). Do(). @@ -124,7 +118,6 @@ func (c *agents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err erro func (c *agents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { result = &v1alpha1.Agent{} err = c.client.Put(). - Namespace(c.ns). Resource("agents"). Name(agent.Name). Body(agent). @@ -139,7 +132,6 @@ func (c *agents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err erro func (c *agents) UpdateStatus(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { result = &v1alpha1.Agent{} err = c.client.Put(). - Namespace(c.ns). Resource("agents"). Name(agent.Name). SubResource("status"). @@ -152,7 +144,6 @@ func (c *agents) UpdateStatus(agent *v1alpha1.Agent) (result *v1alpha1.Agent, er // Delete takes name of the agent and deletes it. Returns an error if one occurs. func (c *agents) Delete(name string, options *v1.DeleteOptions) error { return c.client.Delete(). - Namespace(c.ns). Resource("agents"). Name(name). Body(options). @@ -167,7 +158,6 @@ func (c *agents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.List timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second } return c.client.Delete(). - Namespace(c.ns). Resource("agents"). VersionedParams(&listOptions, scheme.ParameterCodec). Timeout(timeout). @@ -180,7 +170,6 @@ func (c *agents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.List func (c *agents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error) { result = &v1alpha1.Agent{} err = c.client.Patch(pt). - Namespace(c.ns). Resource("agents"). SubResource(subresources...). Name(name). diff --git a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster.go new file mode 100644 index 000000000..d5fc7e6d8 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster.go @@ -0,0 +1,180 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" + scheme "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" +) + +// ClustersGetter has a method to return a ClusterInterface. +// A group's client should implement this interface. +type ClustersGetter interface { + Clusters() ClusterInterface +} + +// ClusterInterface has methods to work with Cluster resources. +type ClusterInterface interface { + Create(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) + Update(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) + UpdateStatus(*v1alpha1.Cluster) (*v1alpha1.Cluster, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Cluster, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) + ClusterExpansion +} + +// clusters implements ClusterInterface +type clusters struct { + client rest.Interface +} + +// newClusters returns a Clusters +func newClusters(c *ClusterV1alpha1Client) *clusters { + return &clusters{ + client: c.RESTClient(), + } +} + +// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. +func (c *clusters) Get(name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Get(). + Resource("clusters"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Clusters that match those selectors. +func (c *clusters) List(opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterList{} + err = c.client.Get(). + Resource("clusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusters. +func (c *clusters) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Post(). + Resource("clusters"). + Body(cluster). + Do(). + Into(result) + return +} + +// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *clusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + Body(cluster). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *clusters) UpdateStatus(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Put(). + Resource("clusters"). + Name(cluster.Name). + SubResource("status"). + Body(cluster). + Do(). + Into(result) + return +} + +// Delete takes name of the cluster and deletes it. Returns an error if one occurs. +func (c *clusters) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusters"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusters"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cluster. +func (c *clusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) { + result = &v1alpha1.Cluster{} + err = c.client.Patch(pt). + Resource("clusters"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/tower/v1alpha1/tower_client.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go similarity index 62% rename from pkg/client/clientset/versioned/typed/tower/v1alpha1/tower_client.go rename to pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go index 2d87fe8c7..12b7dd159 100644 --- a/pkg/client/clientset/versioned/typed/tower/v1alpha1/tower_client.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/cluster_client.go @@ -20,26 +20,31 @@ package v1alpha1 import ( rest "k8s.io/client-go/rest" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" "kubesphere.io/kubesphere/pkg/client/clientset/versioned/scheme" ) -type TowerV1alpha1Interface interface { +type ClusterV1alpha1Interface interface { RESTClient() rest.Interface AgentsGetter + ClustersGetter } -// TowerV1alpha1Client is used to interact with features provided by the tower.kubesphere.io group. -type TowerV1alpha1Client struct { +// ClusterV1alpha1Client is used to interact with features provided by the cluster.kubesphere.io group. +type ClusterV1alpha1Client struct { restClient rest.Interface } -func (c *TowerV1alpha1Client) Agents(namespace string) AgentInterface { - return newAgents(c, namespace) +func (c *ClusterV1alpha1Client) Agents() AgentInterface { + return newAgents(c) } -// NewForConfig creates a new TowerV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*TowerV1alpha1Client, error) { +func (c *ClusterV1alpha1Client) Clusters() ClusterInterface { + return newClusters(c) +} + +// NewForConfig creates a new ClusterV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*ClusterV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -48,12 +53,12 @@ func NewForConfig(c *rest.Config) (*TowerV1alpha1Client, error) { if err != nil { return nil, err } - return &TowerV1alpha1Client{client}, nil + return &ClusterV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new TowerV1alpha1Client for the given config and +// NewForConfigOrDie creates a new ClusterV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *TowerV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *ClusterV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -61,9 +66,9 @@ func NewForConfigOrDie(c *rest.Config) *TowerV1alpha1Client { return client } -// New creates a new TowerV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *TowerV1alpha1Client { - return &TowerV1alpha1Client{c} +// New creates a new ClusterV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *ClusterV1alpha1Client { + return &ClusterV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -81,7 +86,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *TowerV1alpha1Client) RESTClient() rest.Interface { +func (c *ClusterV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/pkg/client/clientset/versioned/typed/tower/v1alpha1/doc.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/doc.go similarity index 100% rename from pkg/client/clientset/versioned/typed/tower/v1alpha1/doc.go rename to pkg/client/clientset/versioned/typed/cluster/v1alpha1/doc.go diff --git a/pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/doc.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/doc.go similarity index 100% rename from pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/doc.go rename to pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/doc.go diff --git a/pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/fake_agent.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_agent.go similarity index 76% rename from pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/fake_agent.go rename to pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_agent.go index f7755fc28..e08a54c4d 100644 --- a/pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/fake_agent.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_agent.go @@ -25,24 +25,22 @@ import ( types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" ) // FakeAgents implements AgentInterface type FakeAgents struct { - Fake *FakeTowerV1alpha1 - ns string + Fake *FakeClusterV1alpha1 } -var agentsResource = schema.GroupVersionResource{Group: "tower.kubesphere.io", Version: "v1alpha1", Resource: "agents"} +var agentsResource = schema.GroupVersionResource{Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "agents"} -var agentsKind = schema.GroupVersionKind{Group: "tower.kubesphere.io", Version: "v1alpha1", Kind: "Agent"} +var agentsKind = schema.GroupVersionKind{Group: "cluster.kubesphere.io", Version: "v1alpha1", Kind: "Agent"} // Get takes name of the agent, and returns the corresponding agent object, and an error if there is any. func (c *FakeAgents) Get(name string, options v1.GetOptions) (result *v1alpha1.Agent, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(agentsResource, c.ns, name), &v1alpha1.Agent{}) - + Invokes(testing.NewRootGetAction(agentsResource, name), &v1alpha1.Agent{}) if obj == nil { return nil, err } @@ -52,8 +50,7 @@ func (c *FakeAgents) Get(name string, options v1.GetOptions) (result *v1alpha1.A // List takes label and field selectors, and returns the list of Agents that match those selectors. func (c *FakeAgents) List(opts v1.ListOptions) (result *v1alpha1.AgentList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(agentsResource, agentsKind, c.ns, opts), &v1alpha1.AgentList{}) - + Invokes(testing.NewRootListAction(agentsResource, agentsKind, opts), &v1alpha1.AgentList{}) if obj == nil { return nil, err } @@ -74,15 +71,13 @@ func (c *FakeAgents) List(opts v1.ListOptions) (result *v1alpha1.AgentList, err // Watch returns a watch.Interface that watches the requested agents. func (c *FakeAgents) Watch(opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(agentsResource, c.ns, opts)) - + InvokesWatch(testing.NewRootWatchAction(agentsResource, opts)) } // Create takes the representation of a agent and creates it. Returns the server's representation of the agent, and an error, if there is any. func (c *FakeAgents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(agentsResource, c.ns, agent), &v1alpha1.Agent{}) - + Invokes(testing.NewRootCreateAction(agentsResource, agent), &v1alpha1.Agent{}) if obj == nil { return nil, err } @@ -92,8 +87,7 @@ func (c *FakeAgents) Create(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err // Update takes the representation of a agent and updates it. Returns the server's representation of the agent, and an error, if there is any. func (c *FakeAgents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(agentsResource, c.ns, agent), &v1alpha1.Agent{}) - + Invokes(testing.NewRootUpdateAction(agentsResource, agent), &v1alpha1.Agent{}) if obj == nil { return nil, err } @@ -104,8 +98,7 @@ func (c *FakeAgents) Update(agent *v1alpha1.Agent) (result *v1alpha1.Agent, err // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *FakeAgents) UpdateStatus(agent *v1alpha1.Agent) (*v1alpha1.Agent, error) { obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(agentsResource, "status", c.ns, agent), &v1alpha1.Agent{}) - + Invokes(testing.NewRootUpdateSubresourceAction(agentsResource, "status", agent), &v1alpha1.Agent{}) if obj == nil { return nil, err } @@ -115,14 +108,13 @@ func (c *FakeAgents) UpdateStatus(agent *v1alpha1.Agent) (*v1alpha1.Agent, error // Delete takes name of the agent and deletes it. Returns an error if one occurs. func (c *FakeAgents) Delete(name string, options *v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(agentsResource, c.ns, name), &v1alpha1.Agent{}) - + Invokes(testing.NewRootDeleteAction(agentsResource, name), &v1alpha1.Agent{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeAgents) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(agentsResource, c.ns, listOptions) + action := testing.NewRootDeleteCollectionAction(agentsResource, listOptions) _, err := c.Fake.Invokes(action, &v1alpha1.AgentList{}) return err @@ -131,8 +123,7 @@ func (c *FakeAgents) DeleteCollection(options *v1.DeleteOptions, listOptions v1. // Patch applies the patch and returns the patched agent. func (c *FakeAgents) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Agent, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(agentsResource, c.ns, name, pt, data, subresources...), &v1alpha1.Agent{}) - + Invokes(testing.NewRootPatchSubresourceAction(agentsResource, name, pt, data, subresources...), &v1alpha1.Agent{}) if obj == nil { return nil, err } diff --git a/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster.go new file mode 100644 index 000000000..6691c17bd --- /dev/null +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster.go @@ -0,0 +1,131 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" +) + +// FakeClusters implements ClusterInterface +type FakeClusters struct { + Fake *FakeClusterV1alpha1 +} + +var clustersResource = schema.GroupVersionResource{Group: "cluster.kubesphere.io", Version: "v1alpha1", Resource: "clusters"} + +var clustersKind = schema.GroupVersionKind{Group: "cluster.kubesphere.io", Version: "v1alpha1", Kind: "Cluster"} + +// Get takes name of the cluster, and returns the corresponding cluster object, and an error if there is any. +func (c *FakeClusters) Get(name string, options v1.GetOptions) (result *v1alpha1.Cluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clustersResource, name), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +// List takes label and field selectors, and returns the list of Clusters that match those selectors. +func (c *FakeClusters) List(opts v1.ListOptions) (result *v1alpha1.ClusterList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clustersResource, clustersKind, opts), &v1alpha1.ClusterList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterList{ListMeta: obj.(*v1alpha1.ClusterList).ListMeta} + for _, item := range obj.(*v1alpha1.ClusterList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusters. +func (c *FakeClusters) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clustersResource, opts)) +} + +// Create takes the representation of a cluster and creates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *FakeClusters) Create(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clustersResource, cluster), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +// Update takes the representation of a cluster and updates it. Returns the server's representation of the cluster, and an error, if there is any. +func (c *FakeClusters) Update(cluster *v1alpha1.Cluster) (result *v1alpha1.Cluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clustersResource, cluster), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeClusters) UpdateStatus(cluster *v1alpha1.Cluster) (*v1alpha1.Cluster, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(clustersResource, "status", cluster), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} + +// Delete takes name of the cluster and deletes it. Returns an error if one occurs. +func (c *FakeClusters) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(clustersResource, name), &v1alpha1.Cluster{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusters) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clustersResource, listOptions) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterList{}) + return err +} + +// Patch applies the patch and returns the patched cluster. +func (c *FakeClusters) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Cluster, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clustersResource, name, pt, data, subresources...), &v1alpha1.Cluster{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.Cluster), err +} diff --git a/pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/fake_tower_client.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster_client.go similarity index 75% rename from pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/fake_tower_client.go rename to pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster_client.go index 5df0777d4..bea6f6387 100644 --- a/pkg/client/clientset/versioned/typed/tower/v1alpha1/fake/fake_tower_client.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/fake/fake_cluster_client.go @@ -21,20 +21,24 @@ package fake import ( rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" - v1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/tower/v1alpha1" + v1alpha1 "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1" ) -type FakeTowerV1alpha1 struct { +type FakeClusterV1alpha1 struct { *testing.Fake } -func (c *FakeTowerV1alpha1) Agents(namespace string) v1alpha1.AgentInterface { - return &FakeAgents{c, namespace} +func (c *FakeClusterV1alpha1) Agents() v1alpha1.AgentInterface { + return &FakeAgents{c} +} + +func (c *FakeClusterV1alpha1) Clusters() v1alpha1.ClusterInterface { + return &FakeClusters{c} } // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *FakeTowerV1alpha1) RESTClient() rest.Interface { +func (c *FakeClusterV1alpha1) RESTClient() rest.Interface { var ret *rest.RESTClient return ret } diff --git a/pkg/client/clientset/versioned/typed/tower/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go similarity index 95% rename from pkg/client/clientset/versioned/typed/tower/v1alpha1/generated_expansion.go rename to pkg/client/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go index 4e8868cb6..d87ae7ed2 100644 --- a/pkg/client/clientset/versioned/typed/tower/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/cluster/v1alpha1/generated_expansion.go @@ -19,3 +19,5 @@ limitations under the License. package v1alpha1 type AgentExpansion interface{} + +type ClusterExpansion interface{} diff --git a/pkg/client/informers/externalversions/tower/interface.go b/pkg/client/informers/externalversions/cluster/interface.go similarity index 97% rename from pkg/client/informers/externalversions/tower/interface.go rename to pkg/client/informers/externalversions/cluster/interface.go index f02645536..fd741d36b 100644 --- a/pkg/client/informers/externalversions/tower/interface.go +++ b/pkg/client/informers/externalversions/cluster/interface.go @@ -16,11 +16,11 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package tower +package cluster import ( + v1alpha1 "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster/v1alpha1" internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tower/v1alpha1" ) // Interface provides access to each of this group's versions. diff --git a/pkg/client/informers/externalversions/tower/v1alpha1/agent.go b/pkg/client/informers/externalversions/cluster/v1alpha1/agent.go similarity index 71% rename from pkg/client/informers/externalversions/tower/v1alpha1/agent.go rename to pkg/client/informers/externalversions/cluster/v1alpha1/agent.go index 98f84ce8c..5db92dcf3 100644 --- a/pkg/client/informers/externalversions/tower/v1alpha1/agent.go +++ b/pkg/client/informers/externalversions/cluster/v1alpha1/agent.go @@ -25,10 +25,10 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" - towerv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/tower/v1alpha1" + v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1" ) // AgentInformer provides access to a shared informer and lister for @@ -41,47 +41,46 @@ type AgentInformer interface { type agentInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string } // NewAgentInformer constructs a new informer for Agent type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewAgentInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredAgentInformer(client, namespace, resyncPeriod, indexers, nil) +func NewAgentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAgentInformer(client, resyncPeriod, indexers, nil) } // NewFilteredAgentInformer constructs a new informer for Agent type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredAgentInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredAgentInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.TowerV1alpha1().Agents(namespace).List(options) + return client.ClusterV1alpha1().Agents().List(options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.TowerV1alpha1().Agents(namespace).Watch(options) + return client.ClusterV1alpha1().Agents().Watch(options) }, }, - &towerv1alpha1.Agent{}, + &clusterv1alpha1.Agent{}, resyncPeriod, indexers, ) } func (f *agentInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredAgentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) + return NewFilteredAgentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } func (f *agentInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&towerv1alpha1.Agent{}, f.defaultInformer) + return f.factory.InformerFor(&clusterv1alpha1.Agent{}, f.defaultInformer) } func (f *agentInformer) Lister() v1alpha1.AgentLister { diff --git a/pkg/client/informers/externalversions/cluster/v1alpha1/cluster.go b/pkg/client/informers/externalversions/cluster/v1alpha1/cluster.go new file mode 100644 index 000000000..883314e59 --- /dev/null +++ b/pkg/client/informers/externalversions/cluster/v1alpha1/cluster.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" + versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" + v1alpha1 "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1" +) + +// ClusterInformer provides access to a shared informer and lister for +// Clusters. +type ClusterInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterLister +} + +type clusterInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterInformer constructs a new informer for Cluster type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterInformer constructs a new informer for Cluster type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterV1alpha1().Clusters().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ClusterV1alpha1().Clusters().Watch(options) + }, + }, + &clusterv1alpha1.Cluster{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&clusterv1alpha1.Cluster{}, f.defaultInformer) +} + +func (f *clusterInformer) Lister() v1alpha1.ClusterLister { + return v1alpha1.NewClusterLister(f.Informer().GetIndexer()) +} diff --git a/pkg/client/informers/externalversions/tower/v1alpha1/interface.go b/pkg/client/informers/externalversions/cluster/v1alpha1/interface.go similarity index 81% rename from pkg/client/informers/externalversions/tower/v1alpha1/interface.go rename to pkg/client/informers/externalversions/cluster/v1alpha1/interface.go index 0beae5610..48aa93887 100644 --- a/pkg/client/informers/externalversions/tower/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/cluster/v1alpha1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // Agents returns a AgentInformer. Agents() AgentInformer + // Clusters returns a ClusterInformer. + Clusters() ClusterInformer } type version struct { @@ -41,5 +43,10 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList // Agents returns a AgentInformer. func (v *version) Agents() AgentInformer { - return &agentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} + return &agentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Clusters returns a ClusterInformer. +func (v *version) Clusters() ClusterInformer { + return &clusterInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index df5bbb39f..ae9c313aa 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -28,13 +28,13 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" versioned "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + cluster "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster" devops "kubesphere.io/kubesphere/pkg/client/informers/externalversions/devops" iam "kubesphere.io/kubesphere/pkg/client/informers/externalversions/iam" internalinterfaces "kubesphere.io/kubesphere/pkg/client/informers/externalversions/internalinterfaces" network "kubesphere.io/kubesphere/pkg/client/informers/externalversions/network" servicemesh "kubesphere.io/kubesphere/pkg/client/informers/externalversions/servicemesh" tenant "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tenant" - tower "kubesphere.io/kubesphere/pkg/client/informers/externalversions/tower" ) // SharedInformerOption defines the functional option type for SharedInformerFactory. @@ -177,12 +177,16 @@ type SharedInformerFactory interface { ForResource(resource schema.GroupVersionResource) (GenericInformer, error) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + Cluster() cluster.Interface Devops() devops.Interface Iam() iam.Interface Network() network.Interface Servicemesh() servicemesh.Interface Tenant() tenant.Interface - Tower() tower.Interface +} + +func (f *sharedInformerFactory) Cluster() cluster.Interface { + return cluster.New(f, f.namespace, f.tweakListOptions) } func (f *sharedInformerFactory) Devops() devops.Interface { @@ -204,7 +208,3 @@ func (f *sharedInformerFactory) Servicemesh() servicemesh.Interface { func (f *sharedInformerFactory) Tenant() tenant.Interface { return tenant.New(f, f.namespace, f.tweakListOptions) } - -func (f *sharedInformerFactory) Tower() tower.Interface { - return tower.New(f, f.namespace, f.tweakListOptions) -} diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 027a8d386..e873d5481 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -23,12 +23,12 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" + devopsv1alpha1 "kubesphere.io/kubesphere/pkg/apis/devops/v1alpha1" v1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" networkv1alpha1 "kubesphere.io/kubesphere/pkg/apis/network/v1alpha1" servicemeshv1alpha2 "kubesphere.io/kubesphere/pkg/apis/servicemesh/v1alpha2" tenantv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" - towerv1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other @@ -57,14 +57,20 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=devops.kubesphere.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("s2ibinaries"): + // Group=cluster.kubesphere.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("agents"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Agents().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("clusters"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cluster().V1alpha1().Clusters().Informer()}, nil + + // Group=devops.kubesphere.io, Version=v1alpha1 + case devopsv1alpha1.SchemeGroupVersion.WithResource("s2ibinaries"): return &genericInformer{resource: resource.GroupResource(), informer: f.Devops().V1alpha1().S2iBinaries().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("s2ibuilders"): + case devopsv1alpha1.SchemeGroupVersion.WithResource("s2ibuilders"): return &genericInformer{resource: resource.GroupResource(), informer: f.Devops().V1alpha1().S2iBuilders().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("s2ibuildertemplates"): + case devopsv1alpha1.SchemeGroupVersion.WithResource("s2ibuildertemplates"): return &genericInformer{resource: resource.GroupResource(), informer: f.Devops().V1alpha1().S2iBuilderTemplates().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("s2iruns"): + case devopsv1alpha1.SchemeGroupVersion.WithResource("s2iruns"): return &genericInformer{resource: resource.GroupResource(), informer: f.Devops().V1alpha1().S2iRuns().Informer()}, nil // Group=iam.kubesphere.io, Version=v1alpha2 @@ -87,10 +93,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case tenantv1alpha1.SchemeGroupVersion.WithResource("workspaces"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tenant().V1alpha1().Workspaces().Informer()}, nil - // Group=tower.kubesphere.io, Version=v1alpha1 - case towerv1alpha1.SchemeGroupVersion.WithResource("agents"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Tower().V1alpha1().Agents().Informer()}, nil - } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/pkg/client/listers/tower/v1alpha1/agent.go b/pkg/client/listers/cluster/v1alpha1/agent.go similarity index 54% rename from pkg/client/listers/tower/v1alpha1/agent.go rename to pkg/client/listers/cluster/v1alpha1/agent.go index 9f78cb328..55326bbee 100644 --- a/pkg/client/listers/tower/v1alpha1/agent.go +++ b/pkg/client/listers/cluster/v1alpha1/agent.go @@ -22,15 +22,15 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - v1alpha1 "kubesphere.io/kubesphere/pkg/apis/tower/v1alpha1" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" ) // AgentLister helps list Agents. type AgentLister interface { // List lists all Agents in the indexer. List(selector labels.Selector) (ret []*v1alpha1.Agent, err error) - // Agents returns an object that can list and get Agents. - Agents(namespace string) AgentNamespaceLister + // Get retrieves the Agent from the index for a given name. + Get(name string) (*v1alpha1.Agent, error) AgentListerExpansion } @@ -52,38 +52,9 @@ func (s *agentLister) List(selector labels.Selector) (ret []*v1alpha1.Agent, err return ret, err } -// Agents returns an object that can list and get Agents. -func (s *agentLister) Agents(namespace string) AgentNamespaceLister { - return agentNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// AgentNamespaceLister helps list and get Agents. -type AgentNamespaceLister interface { - // List lists all Agents in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1alpha1.Agent, err error) - // Get retrieves the Agent from the indexer for a given namespace and name. - Get(name string) (*v1alpha1.Agent, error) - AgentNamespaceListerExpansion -} - -// agentNamespaceLister implements the AgentNamespaceLister -// interface. -type agentNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Agents in the indexer for a given namespace. -func (s agentNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Agent, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Agent)) - }) - return ret, err -} - -// Get retrieves the Agent from the indexer for a given namespace and name. -func (s agentNamespaceLister) Get(name string) (*v1alpha1.Agent, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) +// Get retrieves the Agent from the index for a given name. +func (s *agentLister) Get(name string) (*v1alpha1.Agent, error) { + obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } diff --git a/pkg/client/listers/cluster/v1alpha1/cluster.go b/pkg/client/listers/cluster/v1alpha1/cluster.go new file mode 100644 index 000000000..8408a6994 --- /dev/null +++ b/pkg/client/listers/cluster/v1alpha1/cluster.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The KubeSphere authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" +) + +// ClusterLister helps list Clusters. +type ClusterLister interface { + // List lists all Clusters in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) + // Get retrieves the Cluster from the index for a given name. + Get(name string) (*v1alpha1.Cluster, error) + ClusterListerExpansion +} + +// clusterLister implements the ClusterLister interface. +type clusterLister struct { + indexer cache.Indexer +} + +// NewClusterLister returns a new ClusterLister. +func NewClusterLister(indexer cache.Indexer) ClusterLister { + return &clusterLister{indexer: indexer} +} + +// List lists all Clusters in the indexer. +func (s *clusterLister) List(selector labels.Selector) (ret []*v1alpha1.Cluster, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Cluster)) + }) + return ret, err +} + +// Get retrieves the Cluster from the index for a given name. +func (s *clusterLister) Get(name string) (*v1alpha1.Cluster, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("cluster"), name) + } + return obj.(*v1alpha1.Cluster), nil +} diff --git a/pkg/client/listers/tower/v1alpha1/expansion_generated.go b/pkg/client/listers/cluster/v1alpha1/expansion_generated.go similarity index 84% rename from pkg/client/listers/tower/v1alpha1/expansion_generated.go rename to pkg/client/listers/cluster/v1alpha1/expansion_generated.go index d094cbfb6..0fc0286f9 100644 --- a/pkg/client/listers/tower/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/cluster/v1alpha1/expansion_generated.go @@ -22,6 +22,6 @@ package v1alpha1 // AgentLister. type AgentListerExpansion interface{} -// AgentNamespaceListerExpansion allows custom methods to be added to -// AgentNamespaceLister. -type AgentNamespaceListerExpansion interface{} +// ClusterListerExpansion allows custom methods to be added to +// ClusterLister. +type ClusterListerExpansion interface{} diff --git a/pkg/controller/cluster/cluster_controller.go b/pkg/controller/cluster/cluster_controller.go new file mode 100644 index 000000000..7fc3abe80 --- /dev/null +++ b/pkg/controller/cluster/cluster_controller.go @@ -0,0 +1,230 @@ +package cluster + +import ( + "fmt" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" + clusterclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned/typed/cluster/v1alpha1" + clusterinformer "kubesphere.io/kubesphere/pkg/client/informers/externalversions/cluster/v1alpha1" + clusterlister "kubesphere.io/kubesphere/pkg/client/listers/cluster/v1alpha1" + "time" +) + +const ( + // maxRetries is the number of times a service will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the + // sequence of delays between successive queuings of a service. + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 +) + +type ClusterController struct { + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + + agentClient clusterclient.AgentInterface + clusterClient clusterclient.ClusterInterface + + agentLister clusterlister.AgentLister + agentHasSynced cache.InformerSynced + + clusterLister clusterlister.ClusterLister + clusterHasSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface + + workerLoopPeriod time.Duration +} + +func NewClusterController( + client kubernetes.Interface, + clusterInformer clusterinformer.ClusterInformer, + agentInformer clusterinformer.AgentInformer, + agentClient clusterclient.AgentInterface, + clusterClient clusterclient.ClusterInterface, +) *ClusterController { + + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(func(format string, args ...interface{}) { + klog.Info(fmt.Sprintf(format, args)) + }) + broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) + recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cluster-controller"}) + + c := &ClusterController{ + eventBroadcaster: broadcaster, + eventRecorder: recorder, + agentClient: agentClient, + clusterClient: clusterClient, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "cluster"), + workerLoopPeriod: time.Second, + } + + c.agentLister = agentInformer.Lister() + c.agentHasSynced = agentInformer.Informer().HasSynced + + c.clusterLister = clusterInformer.Lister() + c.clusterHasSynced = clusterInformer.Informer().HasSynced + + clusterInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addCluster, + UpdateFunc: func(oldObj, newObj interface{}) { + c.addCluster(newObj) + }, + DeleteFunc: c.addCluster, + }) + + return c +} + +func (c *ClusterController) Start(stopCh <-chan struct{}) error { + return c.Run(5, stopCh) +} + +func (c *ClusterController) Run(workers int, stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.V(0).Info("starting cluster controller") + defer klog.Info("shutting down cluster controller") + + if !cache.WaitForCacheSync(stopCh, c.clusterHasSynced, c.agentHasSynced) { + return fmt.Errorf("failed to wait for caches to sync") + } + + for i := 0; i < workers; i++ { + go wait.Until(c.worker, c.workerLoopPeriod, stopCh) + } + + <-stopCh + return nil +} + +func (c *ClusterController) worker() { + for c.processNextItem() { + } +} + +func (c *ClusterController) processNextItem() bool { + key, quit := c.queue.Get() + if quit { + return false + } + + defer c.queue.Done(key) + + err := c.syncCluster(key.(string)) + c.handleErr(err, key) + return true +} + +func (c *ClusterController) syncCluster(key string) error { + startTime := time.Now() + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + klog.Errorf("not a valid controller key %s, %#v", key, err) + return err + } + + defer func() { + klog.V(4).Info("Finished syncing cluster.", "name", name, "duration", time.Since(startTime)) + }() + + cluster, err := c.clusterLister.Get(name) + if err != nil { + // cluster not found, possibly been deleted + // need to do the cleanup + if errors.IsNotFound(err) { + _, err = c.agentLister.Get(name) + if err != nil && errors.IsNotFound(err) { + return nil + } + + if err != nil { + klog.Errorf("Failed to get cluster agent %s, %#v", name, err) + return err + } + + // do the real cleanup work + err = c.agentClient.Delete(name, &metav1.DeleteOptions{}) + return err + } + + klog.Errorf("Failed to get cluster with name %s, %#v", name, err) + return err + } + + newAgent := &clusterv1alpha1.Agent{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "app.kubernetes.io/name": "tower", + "cluster.kubesphere.io/name": name, + }, + }, + Spec: clusterv1alpha1.AgentSpec{ + Token: "", + KubeSphereAPIServerPort: 0, + KubernetesAPIServerPort: 0, + Proxy: "", + Paused: !cluster.Spec.Active, + }, + } + + agent, err := c.agentLister.Get(name) + if err != nil && errors.IsNotFound(err) { + agent, err = c.agentClient.Create(newAgent) + if err != nil { + klog.Errorf("Failed to create agent %s, %#v", name, err) + return err + } + + return nil + } + + if err != nil { + klog.Errorf("Failed to get agent %s, %#v", name, err) + return err + } + + if agent.Spec.Paused != newAgent.Spec.Paused { + agent.Spec.Paused = newAgent.Spec.Paused + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, err = c.agentClient.Update(agent) + return err + }) + } + + return nil +} + +func (c *ClusterController) addCluster(obj interface{}) { + cluster := obj.(*clusterv1alpha1.Cluster) + + key, err := cache.MetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(fmt.Errorf("get cluster key %s/%s failed", cluster.Namespace, cluster.Name)) + return + } + + c.queue.Add(key) + +} + +func (c *ClusterController) handleErr(err error, key interface{}) { + +} diff --git a/pkg/controller/cluster/cluster_controller_test.go b/pkg/controller/cluster/cluster_controller_test.go new file mode 100644 index 000000000..916b1b53b --- /dev/null +++ b/pkg/controller/cluster/cluster_controller_test.go @@ -0,0 +1 @@ +package cluster diff --git a/tools/cmd/crd-doc-gen/main.go b/tools/cmd/crd-doc-gen/main.go index 20f46094b..b218fa86d 100644 --- a/tools/cmd/crd-doc-gen/main.go +++ b/tools/cmd/crd-doc-gen/main.go @@ -4,6 +4,8 @@ import ( "flag" "io/ioutil" "k8s.io/apimachinery/pkg/api/meta" + urlruntime "k8s.io/apimachinery/pkg/util/runtime" + clusterv1alpha1 "kubesphere.io/kubesphere/pkg/apis/cluster/v1alpha1" "kubesphere.io/kubesphere/tools/lib" "log" "os" @@ -42,6 +44,9 @@ func main() { networkinstall.Install(Scheme) devopsinstall.Install(Scheme) + urlruntime.Must(clusterv1alpha1.AddToScheme(Scheme)) + urlruntime.Must(Scheme.SetVersionPriority(clusterv1alpha1.SchemeGroupVersion)) + mapper := meta.NewDefaultRESTMapper(nil) mapper.AddSpecific(servicemeshv1alpha2.SchemeGroupVersion.WithKind(servicemeshv1alpha2.ResourceKindServicePolicy), @@ -75,6 +80,14 @@ func main() { networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralWorkspaceNetworkPolicy), networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourceSingularWorkspaceNetworkPolicy), meta.RESTScopeRoot) + mapper.AddSpecific(clusterv1alpha1.SchemeGroupVersion.WithKind(clusterv1alpha1.ResourceKindCluster), + clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesPluralCluster), + clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesSingularCluster), meta.RESTScopeRoot) + + mapper.AddSpecific(clusterv1alpha1.SchemeGroupVersion.WithKind(clusterv1alpha1.ResourceKindAgent), + clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesPluralAgent), + clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesSingularAgent), meta.RESTScopeRoot) + spec, err := lib.RenderOpenAPISpec(lib.Config{ Scheme: Scheme, Codecs: Codecs, @@ -96,6 +109,7 @@ func main() { tenantv1alpha1.GetOpenAPIDefinitions, networkv1alpha1.GetOpenAPIDefinitions, devopsv1alpha1.GetOpenAPIDefinitions, + clusterv1alpha1.GetOpenAPIDefinitions, }, Resources: []schema.GroupVersionResource{ //TODO(runzexia) At present, the document generation requires the openapi structure of the go language, @@ -109,6 +123,8 @@ func main() { devopsv1alpha1.SchemeGroupVersion.WithResource(devopsv1alpha1.ResourcePluralS2iBuilderTemplate), devopsv1alpha1.SchemeGroupVersion.WithResource(devopsv1alpha1.ResourcePluralS2iBuilder), networkv1alpha1.SchemeGroupVersion.WithResource(networkv1alpha1.ResourcePluralWorkspaceNetworkPolicy), + clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesPluralAgent), + clusterv1alpha1.SchemeGroupVersion.WithResource(clusterv1alpha1.ResourcesPluralCluster), }, Mapper: mapper, }) From e174dcb3d6557b9d0af222c50d7f2f4a6e7a3583 Mon Sep 17 00:00:00 2001 From: zryfish Date: Wed, 8 Apr 2020 00:40:28 +0800 Subject: [PATCH 02/12] add cluster controller (#1993) --- cmd/controller-manager/app/controllers.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/controller-manager/app/controllers.go b/cmd/controller-manager/app/controllers.go index 5c471ee69..df7867e7f 100644 --- a/cmd/controller-manager/app/controllers.go +++ b/cmd/controller-manager/app/controllers.go @@ -20,6 +20,7 @@ package app import ( "k8s.io/klog" "kubesphere.io/kubesphere/pkg/controller/application" + "kubesphere.io/kubesphere/pkg/controller/cluster" "kubesphere.io/kubesphere/pkg/controller/destinationrule" "kubesphere.io/kubesphere/pkg/controller/job" "kubesphere.io/kubesphere/pkg/controller/s2ibinary" @@ -93,6 +94,13 @@ func AddControllers( client.KubeSphere(), kubesphereInformer.Iam().V1alpha2().Users()) + clusterController := cluster.NewClusterController( + client.Kubernetes(), + kubesphereInformer.Cluster().V1alpha1().Clusters(), + kubesphereInformer.Cluster().V1alpha1().Agents(), + client.KubeSphere().ClusterV1alpha1().Agents(), + client.KubeSphere().ClusterV1alpha1().Clusters()) + controllers := map[string]manager.Runnable{ "virtualservice-controller": vsController, "destinationrule-controller": drController, @@ -102,6 +110,7 @@ func AddControllers( "s2irun-controller": s2iRunController, "volumeexpansion-controller": volumeExpansionController, "user-controller": userController, + "cluster-controller": clusterController, } for name, ctrl := range controllers { From 0e814bb5e44bc7beffdf4829f4e86b529206bb95 Mon Sep 17 00:00:00 2001 From: hongming Date: Sun, 5 Apr 2020 03:52:12 +0800 Subject: [PATCH 03/12] add iam crd Signed-off-by: hongming --- cmd/controller-manager/app/server.go | 8 + .../crds/iam.kubesphere.io_policyrules.yaml | 53 + .../crds/iam.kubesphere.io_rolebindings.yaml | 93 + config/crds/iam.kubesphere.io_roles.yaml | 81 + config/samples/iam_v1alpha2_policyrule.yaml | 9 + config/samples/iam_v1alpha2_role.yaml | 9 + config/samples/iam_v1alpha2_rolebinding.yaml | 9 + config/samples/iam_v1alpha2_user.yaml | 2 +- config/webhook/iam.yaml | 55 + go.mod | 22 +- go.sum | 38 +- pkg/api/iam/user.go | 41 - pkg/api/types.go | 4 +- pkg/apis/iam/v1alpha2/register.go | 20 +- .../iam/v1alpha2/{user_types.go => types.go} | 119 +- .../{user_types_test.go => types_test.go} | 0 .../iam/v1alpha2/zz_generated.deepcopy.go | 250 +- pkg/apiserver/apiserver.go | 3 +- .../authenticators/basic/basic.go | 2 +- pkg/apiserver/authentication/token/jwt.go | 4 +- .../authentication/token/jwt_test.go | 4 +- pkg/apiserver/query/field.go | 14 +- pkg/apiserver/query/types.go | 79 +- pkg/apiserver/query/types_test.go | 25 +- .../clusterrolebinding_controller.go | 1 - pkg/controller/user/user_webhook.go | 102 + pkg/kapis/iam/v1alpha2/register.go | 2 +- pkg/kapis/resources/v1alpha2/register.go | 2 + pkg/kapis/resources/v1alpha3/handler.go | 22 +- pkg/kapis/resources/v1alpha3/register.go | 16 +- pkg/models/iam/im/im.go | 74 +- pkg/models/iam/im/im_operator.go | 85 + pkg/models/iam/im/im_test.go | 22 + pkg/models/iam/im/ldap_operator.go | 80 + .../v1alpha3/deployment/deployments.go | 21 +- .../v1alpha3/deployment/deployments_test.go | 6 +- pkg/models/resources/v1alpha3/interface.go | 86 +- .../v1alpha3/namespace/namespaces.go | 19 +- .../resources/v1alpha3/resource/resource.go | 17 +- .../v1alpha3/resource/resource_test.go | 111 + pkg/models/tenant/workspaces.go | 5 +- pkg/simple/client/ldap/interface.go | 10 +- pkg/simple/client/ldap/ldap.go | 45 +- pkg/simple/client/ldap/simple_ldap.go | 35 +- pkg/simple/client/ldap/simple_ldap_test.go | 28 +- vendor/github.com/cenkalti/backoff/.gitignore | 22 - .../github.com/cenkalti/backoff/.travis.yml | 10 - vendor/github.com/cenkalti/backoff/LICENSE | 20 - vendor/github.com/cenkalti/backoff/README.md | 30 - vendor/github.com/cenkalti/backoff/backoff.go | 66 - vendor/github.com/cenkalti/backoff/context.go | 63 - .../cenkalti/backoff/exponential.go | 153 - vendor/github.com/cenkalti/backoff/retry.go | 82 - vendor/github.com/cenkalti/backoff/ticker.go | 82 - vendor/github.com/cenkalti/backoff/tries.go | 35 - vendor/github.com/cheekybits/genny/LICENSE | 22 - .../cheekybits/genny/generic/doc.go | 2 - .../cheekybits/genny/generic/generic.go | 13 - .../github.com/dustin/go-humanize/.travis.yml | 21 - vendor/github.com/dustin/go-humanize/LICENSE | 21 - .../dustin/go-humanize/README.markdown | 124 - vendor/github.com/dustin/go-humanize/big.go | 31 - .../github.com/dustin/go-humanize/bigbytes.go | 173 - vendor/github.com/dustin/go-humanize/bytes.go | 143 - vendor/github.com/dustin/go-humanize/comma.go | 116 - .../github.com/dustin/go-humanize/commaf.go | 40 - vendor/github.com/dustin/go-humanize/ftoa.go | 46 - .../github.com/dustin/go-humanize/humanize.go | 8 - .../github.com/dustin/go-humanize/number.go | 192 - .../github.com/dustin/go-humanize/ordinals.go | 25 - vendor/github.com/dustin/go-humanize/si.go | 123 - vendor/github.com/dustin/go-humanize/times.go | 117 - vendor/github.com/flynn/go-shlex/COPYING | 202 - vendor/github.com/flynn/go-shlex/Makefile | 21 - vendor/github.com/flynn/go-shlex/README.md | 2 - vendor/github.com/flynn/go-shlex/shlex.go | 457 -- .../bson/bson_corpus_spec_test_generator.go | 294 -- vendor/github.com/go-acme/lego/LICENSE | 21 - .../go-acme/lego/acme/api/account.go | 69 - .../github.com/go-acme/lego/acme/api/api.go | 166 - .../go-acme/lego/acme/api/authorization.go | 34 - .../go-acme/lego/acme/api/certificate.go | 99 - .../go-acme/lego/acme/api/challenge.go | 45 - .../acme/api/internal/nonces/nonce_manager.go | 78 - .../lego/acme/api/internal/secure/jws.go | 130 - .../lego/acme/api/internal/sender/sender.go | 146 - .../acme/api/internal/sender/useragent.go | 14 - .../github.com/go-acme/lego/acme/api/order.go | 65 - .../go-acme/lego/acme/api/service.go | 45 - .../github.com/go-acme/lego/acme/commons.go | 284 -- vendor/github.com/go-acme/lego/acme/errors.go | 58 - .../go-acme/lego/certcrypto/crypto.go | 256 -- .../go-acme/lego/certificate/authorization.go | 69 - .../go-acme/lego/certificate/certificates.go | 495 -- .../go-acme/lego/certificate/errors.go | 30 - .../go-acme/lego/challenge/challenges.go | 44 - .../go-acme/lego/challenge/dns01/cname.go | 16 - .../lego/challenge/dns01/dns_challenge.go | 188 - .../challenge/dns01/dns_challenge_manual.go | 52 - .../go-acme/lego/challenge/dns01/fqdn.go | 19 - .../lego/challenge/dns01/nameserver.go | 232 - .../go-acme/lego/challenge/dns01/precheck.go | 127 - .../lego/challenge/http01/http_challenge.go | 65 - .../challenge/http01/http_challenge_server.go | 96 - .../go-acme/lego/challenge/provider.go | 28 - .../go-acme/lego/challenge/resolver/errors.go | 25 - .../go-acme/lego/challenge/resolver/prober.go | 173 - .../lego/challenge/resolver/solver_manager.go | 169 - .../challenge/tlsalpn01/tls_alpn_challenge.go | 129 - .../tlsalpn01/tls_alpn_challenge_server.go | 95 - vendor/github.com/go-acme/lego/lego/client.go | 74 - .../go-acme/lego/lego/client_config.go | 104 - vendor/github.com/go-acme/lego/log/logger.go | 59 - .../go-acme/lego/platform/wait/wait.go | 33 - .../go-acme/lego/registration/registar.go | 146 - .../go-acme/lego/registration/user.go | 13 - .../github.com/hashicorp/go-syslog/.gitignore | 22 - vendor/github.com/hashicorp/go-syslog/LICENSE | 20 - .../github.com/hashicorp/go-syslog/README.md | 11 - .../github.com/hashicorp/go-syslog/builtin.go | 214 - vendor/github.com/hashicorp/go-syslog/go.mod | 1 - .../github.com/hashicorp/go-syslog/syslog.go | 27 - vendor/github.com/hashicorp/go-syslog/unix.go | 123 - .../hashicorp/go-syslog/unsupported.go | 17 - .../jimstudt/http-authentication/LICENSE | 21 - .../http-authentication/basic/README.md | 65 - .../http-authentication/basic/bcrypt.go | 15 - .../http-authentication/basic/htpasswd.go | 208 - .../jimstudt/http-authentication/basic/md5.go | 143 - .../http-authentication/basic/plain.go | 28 - .../jimstudt/http-authentication/basic/sha.go | 43 - .../http-authentication/basic/util.go | 18 - vendor/github.com/klauspost/cpuid/.gitignore | 24 - vendor/github.com/klauspost/cpuid/.travis.yml | 23 - .../klauspost/cpuid/CONTRIBUTING.txt | 35 - vendor/github.com/klauspost/cpuid/LICENSE | 22 - vendor/github.com/klauspost/cpuid/README.md | 147 - vendor/github.com/klauspost/cpuid/cpuid.go | 1049 ----- vendor/github.com/klauspost/cpuid/cpuid_386.s | 42 - .../github.com/klauspost/cpuid/cpuid_amd64.s | 42 - .../klauspost/cpuid/detect_intel.go | 17 - .../github.com/klauspost/cpuid/detect_ref.go | 23 - vendor/github.com/klauspost/cpuid/generate.go | 4 - .../github.com/klauspost/cpuid/private-gen.go | 476 -- .../versioned/fake/clientset_generated.go | 82 + .../client/clientset/versioned/fake/doc.go | 20 + .../clientset/versioned/fake/register.go | 56 + .../versioned/typed/app/v1beta1/fake/doc.go | 20 + .../typed/app/v1beta1/fake/fake_app_client.go | 40 + .../app/v1beta1/fake/fake_application.go | 140 + .../lucas-clemente/quic-go/.editorconfig | 5 - .../lucas-clemente/quic-go/.gitignore | 3 - .../lucas-clemente/quic-go/.golangci.yml | 25 - .../lucas-clemente/quic-go/.travis.yml | 39 - .../lucas-clemente/quic-go/Changelog.md | 52 - .../github.com/lucas-clemente/quic-go/LICENSE | 21 - .../lucas-clemente/quic-go/README.md | 68 - .../lucas-clemente/quic-go/appveyor.yml | 34 - .../lucas-clemente/quic-go/buffer_pool.go | 75 - .../lucas-clemente/quic-go/client.go | 413 -- .../lucas-clemente/quic-go/codecov.yml | 18 - .../github.com/lucas-clemente/quic-go/conn.go | 54 - .../lucas-clemente/quic-go/crypto_stream.go | 135 - .../quic-go/crypto_stream_manager.go | 60 - .../lucas-clemente/quic-go/frame_sorter.go | 159 - .../lucas-clemente/quic-go/framer.go | 109 - .../github.com/lucas-clemente/quic-go/go.mod | 13 - .../github.com/lucas-clemente/quic-go/go.sum | 37 - .../lucas-clemente/quic-go/h2quic/client.go | 311 -- .../quic-go/h2quic/gzipreader.go | 35 - .../lucas-clemente/quic-go/h2quic/request.go | 77 - .../quic-go/h2quic/request_body.go | 29 - .../quic-go/h2quic/request_writer.go | 203 - .../lucas-clemente/quic-go/h2quic/response.go | 95 - .../quic-go/h2quic/response_body.go | 18 - .../quic-go/h2quic/response_writer.go | 114 - .../h2quic/response_writer_closenotifier.go | 9 - .../quic-go/h2quic/roundtrip.go | 179 - .../lucas-clemente/quic-go/h2quic/server.go | 402 -- .../lucas-clemente/quic-go/interface.go | 224 - .../quic-go/internal/ackhandler/gen.go | 3 - .../quic-go/internal/ackhandler/interfaces.go | 50 - .../quic-go/internal/ackhandler/packet.go | 29 - .../internal/ackhandler/packet_linkedlist.go | 217 - .../ackhandler/packet_number_generator.go | 78 - .../ackhandler/received_packet_handler.go | 98 - .../ackhandler/received_packet_history.go | 122 - .../ackhandler/received_packet_tracker.go | 191 - .../internal/ackhandler/retransmittable.go | 34 - .../quic-go/internal/ackhandler/send_mode.go | 36 - .../ackhandler/sent_packet_handler.go | 647 --- .../ackhandler/sent_packet_history.go | 148 - .../quic-go/internal/congestion/bandwidth.go | 22 - .../quic-go/internal/congestion/clock.go | 18 - .../quic-go/internal/congestion/cubic.go | 210 - .../internal/congestion/cubic_sender.go | 314 -- .../internal/congestion/hybrid_slow_start.go | 111 - .../quic-go/internal/congestion/interface.go | 36 - .../quic-go/internal/congestion/prr_sender.go | 54 - .../quic-go/internal/congestion/rtt_stats.go | 101 - .../quic-go/internal/congestion/stats.go | 8 - .../flowcontrol/base_flow_controller.go | 122 - .../flowcontrol/connection_flow_controller.go | 92 - .../quic-go/internal/flowcontrol/interface.go | 41 - .../flowcontrol/stream_flow_controller.go | 139 - .../quic-go/internal/handshake/aead.go | 104 - .../internal/handshake/cookie_generator.go | 109 - .../internal/handshake/cookie_protector.go | 86 - .../internal/handshake/crypto_setup.go | 581 --- .../internal/handshake/initial_aead.go | 52 - .../quic-go/internal/handshake/interface.go | 52 - .../quic-go/internal/handshake/qtls.go | 132 - .../handshake/tls_extension_handler.go | 58 - .../handshake/transport_parameters.go | 260 -- .../quic-go/internal/handshake/unsafe.go | 38 - .../internal/protocol/connection_id.go | 69 - .../internal/protocol/encryption_level.go | 28 - .../internal/protocol/packet_number.go | 85 - .../quic-go/internal/protocol/params.go | 126 - .../quic-go/internal/protocol/perspective.go | 26 - .../quic-go/internal/protocol/protocol.go | 75 - .../quic-go/internal/protocol/stream_id.go | 67 - .../quic-go/internal/protocol/version.go | 119 - .../quic-go/internal/qerr/error_codes.go | 71 - .../quic-go/internal/qerr/quic_error.go | 73 - .../quic-go/internal/utils/atomic_bool.go | 22 - .../internal/utils/byteinterval_linkedlist.go | 217 - .../quic-go/internal/utils/byteorder.go | 17 - .../internal/utils/byteorder_big_endian.go | 74 - .../quic-go/internal/utils/gen.go | 4 - .../quic-go/internal/utils/host.go | 27 - .../quic-go/internal/utils/log.go | 131 - .../quic-go/internal/utils/minmax.go | 159 - .../quic-go/internal/utils/packet_interval.go | 9 - .../utils/packetinterval_linkedlist.go | 217 - .../internal/utils/streamframe_interval.go | 9 - .../quic-go/internal/utils/timer.go | 48 - .../quic-go/internal/utils/varint.go | 101 - .../quic-go/internal/wire/ack_frame.go | 226 - .../quic-go/internal/wire/ack_range.go | 14 - .../internal/wire/connection_close_frame.go | 81 - .../quic-go/internal/wire/crypto_frame.go | 71 - .../internal/wire/data_blocked_frame.go | 38 - .../quic-go/internal/wire/extended_header.go | 204 - .../quic-go/internal/wire/frame_parser.go | 97 - .../quic-go/internal/wire/header.go | 251 -- .../quic-go/internal/wire/interface.go | 19 - .../quic-go/internal/wire/log.go | 43 - .../quic-go/internal/wire/max_data_frame.go | 40 - .../internal/wire/max_stream_data_frame.go | 46 - .../internal/wire/max_streams_frame.go | 51 - .../internal/wire/new_connection_id_frame.go | 70 - .../quic-go/internal/wire/new_token_frame.go | 44 - .../internal/wire/path_challenge_frame.go | 38 - .../internal/wire/path_response_frame.go | 38 - .../quic-go/internal/wire/ping_frame.go | 27 - .../internal/wire/reset_stream_frame.go | 58 - .../wire/retire_connection_id_frame.go | 36 - .../internal/wire/stop_sending_frame.go | 47 - .../wire/stream_data_blocked_frame.go | 46 - .../quic-go/internal/wire/stream_frame.go | 168 - .../internal/wire/streams_blocked_frame.go | 52 - .../internal/wire/version_negotiation.go | 31 - .../lucas-clemente/quic-go/mockgen.go | 21 - .../lucas-clemente/quic-go/mockgen_private.sh | 28 - .../lucas-clemente/quic-go/multiplexer.go | 89 - .../quic-go/packet_handler_map.go | 274 -- .../lucas-clemente/quic-go/packet_packer.go | 491 -- .../lucas-clemente/quic-go/packet_unpacker.go | 102 - .../lucas-clemente/quic-go/receive_stream.go | 324 -- .../lucas-clemente/quic-go/send_stream.go | 332 -- .../lucas-clemente/quic-go/server.go | 558 --- .../lucas-clemente/quic-go/session.go | 1287 ------ .../lucas-clemente/quic-go/stream.go | 163 - .../lucas-clemente/quic-go/streams_map.go | 192 - .../quic-go/streams_map_generic_helper.go | 17 - .../quic-go/streams_map_incoming_bidi.go | 162 - .../quic-go/streams_map_incoming_generic.go | 160 - .../quic-go/streams_map_incoming_uni.go | 162 - .../quic-go/streams_map_outgoing_bidi.go | 147 - .../quic-go/streams_map_outgoing_generic.go | 145 - .../quic-go/streams_map_outgoing_uni.go | 147 - .../quic-go/window_update_queue.go | 71 - .../github.com/marten-seemann/qtls/alert.go | 88 - vendor/github.com/marten-seemann/qtls/auth.go | 221 - .../marten-seemann/qtls/cipher_suites.go | 487 -- .../github.com/marten-seemann/qtls/common.go | 1148 ----- vendor/github.com/marten-seemann/qtls/conn.go | 1467 ------ .../marten-seemann/qtls/generate_cert.go | 169 - vendor/github.com/marten-seemann/qtls/go.mod | 8 - vendor/github.com/marten-seemann/qtls/go.sum | 5 - .../marten-seemann/qtls/handshake_client.go | 1023 ----- .../qtls/handshake_client_tls13.go | 680 --- .../marten-seemann/qtls/handshake_messages.go | 1900 -------- .../marten-seemann/qtls/handshake_server.go | 822 ---- .../qtls/handshake_server_tls13.go | 872 ---- .../marten-seemann/qtls/key_agreement.go | 313 -- .../marten-seemann/qtls/key_schedule.go | 216 - vendor/github.com/marten-seemann/qtls/prf.go | 385 -- .../github.com/marten-seemann/qtls/ticket.go | 215 - vendor/github.com/marten-seemann/qtls/tls.go | 306 -- vendor/github.com/mholt/caddy/.gitattributes | 19 - vendor/github.com/mholt/caddy/.gitignore | 22 - vendor/github.com/mholt/caddy/LICENSE.txt | 201 - vendor/github.com/mholt/caddy/README.md | 225 - vendor/github.com/mholt/caddy/assets.go | 48 - .../mholt/caddy/azure-pipelines.yml | 88 - vendor/github.com/mholt/caddy/caddy.go | 1026 ----- .../mholt/caddy/caddy/caddymain/run.go | 601 --- .../mholt/caddy/caddyfile/dispenser.go | 260 -- .../github.com/mholt/caddy/caddyfile/json.go | 198 - .../github.com/mholt/caddy/caddyfile/lexer.go | 150 - .../github.com/mholt/caddy/caddyfile/parse.go | 493 -- .../caddy/caddyhttp/basicauth/basicauth.go | 209 - .../mholt/caddy/caddyhttp/basicauth/setup.go | 113 - .../mholt/caddy/caddyhttp/bind/bind.go | 38 - .../mholt/caddy/caddyhttp/browse/browse.go | 527 --- .../mholt/caddy/caddyhttp/browse/setup.go | 515 --- .../mholt/caddy/caddyhttp/caddyhttp.go | 49 - .../mholt/caddy/caddyhttp/errors/errors.go | 164 - .../mholt/caddy/caddyhttp/errors/setup.go | 138 - .../mholt/caddy/caddyhttp/expvar/expvar.go | 59 - .../mholt/caddy/caddyhttp/expvar/setup.go | 83 - .../mholt/caddy/caddyhttp/extensions/ext.go | 58 - .../mholt/caddy/caddyhttp/extensions/setup.go | 67 - .../mholt/caddy/caddyhttp/fastcgi/fastcgi.go | 496 --- .../caddy/caddyhttp/fastcgi/fcgi_test.php | 79 - .../caddy/caddyhttp/fastcgi/fcgiclient.go | 578 --- .../mholt/caddy/caddyhttp/fastcgi/setup.go | 220 - .../mholt/caddy/caddyhttp/gzip/gzip.go | 163 - .../caddy/caddyhttp/gzip/requestfilter.go | 105 - .../caddy/caddyhttp/gzip/responsefilter.go | 107 - .../mholt/caddy/caddyhttp/gzip/setup.go | 183 - .../mholt/caddy/caddyhttp/header/header.go | 124 - .../mholt/caddy/caddyhttp/header/setup.go | 113 - .../caddy/caddyhttp/httpserver/condition.go | 202 - .../mholt/caddy/caddyhttp/httpserver/error.go | 70 - .../mholt/caddy/caddyhttp/httpserver/https.go | 223 - .../caddy/caddyhttp/httpserver/logger.go | 196 - .../caddy/caddyhttp/httpserver/middleware.go | 228 - .../mholt/caddy/caddyhttp/httpserver/mitm.go | 780 ---- .../mholt/caddy/caddyhttp/httpserver/path.go | 67 - .../caddy/caddyhttp/httpserver/plugin.go | 730 --- .../caddy/caddyhttp/httpserver/recorder.go | 261 -- .../caddy/caddyhttp/httpserver/replacer.go | 561 --- .../httpserver/responsewriterwrapper.go | 79 - .../caddy/caddyhttp/httpserver/roller.go | 143 - .../caddy/caddyhttp/httpserver/server.go | 625 --- .../caddy/caddyhttp/httpserver/siteconfig.go | 151 - .../caddy/caddyhttp/httpserver/tplcontext.go | 473 -- .../caddy/caddyhttp/httpserver/vhosttrie.go | 180 - .../mholt/caddy/caddyhttp/index/index.go | 49 - .../caddy/caddyhttp/internalsrv/internal.go | 111 - .../caddy/caddyhttp/internalsrv/setup.go | 61 - .../mholt/caddy/caddyhttp/limits/handler.go | 104 - .../mholt/caddy/caddyhttp/limits/setup.go | 233 - .../mholt/caddy/caddyhttp/log/log.go | 117 - .../mholt/caddy/caddyhttp/log/setup.go | 172 - .../caddy/caddyhttp/markdown/markdown.go | 195 - .../caddyhttp/markdown/metadata/metadata.go | 160 - .../markdown/metadata/metadata_json.go | 70 - .../markdown/metadata/metadata_none.go | 56 - .../markdown/metadata/metadata_toml.go | 60 - .../markdown/metadata/metadata_yaml.go | 60 - .../mholt/caddy/caddyhttp/markdown/process.go | 106 - .../mholt/caddy/caddyhttp/markdown/setup.go | 179 - .../caddyhttp/markdown/summary/render.go | 167 - .../caddyhttp/markdown/summary/summary.go | 31 - .../caddy/caddyhttp/markdown/template.go | 162 - .../mholt/caddy/caddyhttp/mime/mime.go | 45 - .../mholt/caddy/caddyhttp/mime/setup.go | 88 - .../mholt/caddy/caddyhttp/pprof/pprof.go | 61 - .../mholt/caddy/caddyhttp/pprof/setup.go | 51 - .../mholt/caddy/caddyhttp/proxy/body.go | 54 - .../mholt/caddy/caddyhttp/proxy/policy.go | 202 - .../mholt/caddy/caddyhttp/proxy/proxy.go | 420 -- .../caddy/caddyhttp/proxy/reverseproxy.go | 776 ---- .../mholt/caddy/caddyhttp/proxy/setup.go | 45 - .../mholt/caddy/caddyhttp/proxy/upstream.go | 782 ---- .../mholt/caddy/caddyhttp/push/handler.go | 144 - .../mholt/caddy/caddyhttp/push/link_parser.go | 77 - .../mholt/caddy/caddyhttp/push/push.go | 46 - .../mholt/caddy/caddyhttp/push/setup.go | 193 - .../caddy/caddyhttp/redirect/redirect.go | 74 - .../mholt/caddy/caddyhttp/redirect/setup.go | 187 - .../caddy/caddyhttp/requestid/requestid.go | 54 - .../mholt/caddy/caddyhttp/requestid/setup.go | 46 - .../mholt/caddy/caddyhttp/rewrite/rewrite.go | 263 -- .../mholt/caddy/caddyhttp/rewrite/setup.go | 129 - .../mholt/caddy/caddyhttp/rewrite/to.go | 103 - .../mholt/caddy/caddyhttp/root/root.go | 66 - .../caddy/caddyhttp/staticfiles/fileserver.go | 294 -- .../mholt/caddy/caddyhttp/status/setup.go | 107 - .../mholt/caddy/caddyhttp/status/status.go | 73 - .../mholt/caddy/caddyhttp/templates/setup.go | 122 - .../caddy/caddyhttp/templates/templates.go | 147 - .../caddy/caddyhttp/timeouts/timeouts.go | 120 - .../mholt/caddy/caddyhttp/websocket/setup.go | 110 - .../caddy/caddyhttp/websocket/websocket.go | 293 -- .../github.com/mholt/caddy/caddytls/config.go | 567 --- .../github.com/mholt/caddy/caddytls/crypto.go | 113 - .../mholt/caddy/caddytls/handshake.go | 152 - .../mholt/caddy/caddytls/selfsigned.go | 103 - .../github.com/mholt/caddy/caddytls/setup.go | 471 -- vendor/github.com/mholt/caddy/caddytls/tls.go | 126 - vendor/github.com/mholt/caddy/commands.go | 133 - vendor/github.com/mholt/caddy/controller.go | 145 - vendor/github.com/mholt/caddy/go.mod | 25 - vendor/github.com/mholt/caddy/go.sum | 100 - .../mholt/caddy/onevent/hook/config.go | 20 - .../mholt/caddy/onevent/hook/hook.go | 41 - vendor/github.com/mholt/caddy/onevent/on.go | 71 - vendor/github.com/mholt/caddy/plugins.go | 470 -- .../github.com/mholt/caddy/rlimit_nonposix.go | 22 - vendor/github.com/mholt/caddy/rlimit_posix.go | 37 - vendor/github.com/mholt/caddy/sigtrap.go | 108 - .../mholt/caddy/sigtrap_nonposix.go | 19 - .../github.com/mholt/caddy/sigtrap_posix.go | 114 - .../mholt/caddy/telemetry/collection.go | 310 -- .../mholt/caddy/telemetry/telemetry.go | 428 -- vendor/github.com/mholt/caddy/upgrade.go | 232 - vendor/github.com/mholt/certmagic/.gitignore | 1 - vendor/github.com/mholt/certmagic/.travis.yml | 23 - vendor/github.com/mholt/certmagic/LICENSE.txt | 201 - vendor/github.com/mholt/certmagic/README.md | 500 --- .../github.com/mholt/certmagic/appveyor.yml | 29 - vendor/github.com/mholt/certmagic/cache.go | 271 -- .../mholt/certmagic/certificates.go | 314 -- .../github.com/mholt/certmagic/certmagic.go | 482 -- vendor/github.com/mholt/certmagic/client.go | 411 -- vendor/github.com/mholt/certmagic/config.go | 469 -- vendor/github.com/mholt/certmagic/crypto.go | 188 - .../github.com/mholt/certmagic/filestorage.go | 275 -- vendor/github.com/mholt/certmagic/go.mod | 15 - vendor/github.com/mholt/certmagic/go.sum | 29 - .../github.com/mholt/certmagic/handshake.go | 424 -- .../github.com/mholt/certmagic/httphandler.go | 117 - vendor/github.com/mholt/certmagic/maintain.go | 339 -- vendor/github.com/mholt/certmagic/ocsp.go | 209 - vendor/github.com/mholt/certmagic/solvers.go | 148 - vendor/github.com/mholt/certmagic/storage.go | 269 -- vendor/github.com/mholt/certmagic/user.go | 280 -- vendor/github.com/miekg/dns/.codecov.yml | 8 - vendor/github.com/miekg/dns/.gitignore | 4 - vendor/github.com/miekg/dns/.travis.yml | 19 - vendor/github.com/miekg/dns/AUTHORS | 1 - vendor/github.com/miekg/dns/CONTRIBUTORS | 10 - vendor/github.com/miekg/dns/COPYRIGHT | 9 - vendor/github.com/miekg/dns/Gopkg.lock | 57 - vendor/github.com/miekg/dns/Gopkg.toml | 38 - vendor/github.com/miekg/dns/LICENSE | 32 - vendor/github.com/miekg/dns/Makefile.fuzz | 33 - vendor/github.com/miekg/dns/Makefile.release | 52 - vendor/github.com/miekg/dns/README.md | 172 - vendor/github.com/miekg/dns/acceptfunc.go | 56 - vendor/github.com/miekg/dns/client.go | 417 -- vendor/github.com/miekg/dns/clientconfig.go | 135 - vendor/github.com/miekg/dns/dane.go | 43 - vendor/github.com/miekg/dns/defaults.go | 378 -- vendor/github.com/miekg/dns/dns.go | 134 - vendor/github.com/miekg/dns/dnssec.go | 791 ---- vendor/github.com/miekg/dns/dnssec_keygen.go | 178 - vendor/github.com/miekg/dns/dnssec_keyscan.go | 352 -- vendor/github.com/miekg/dns/dnssec_privkey.go | 94 - vendor/github.com/miekg/dns/doc.go | 269 -- vendor/github.com/miekg/dns/duplicate.go | 38 - .../miekg/dns/duplicate_generate.go | 144 - vendor/github.com/miekg/dns/edns.go | 659 --- vendor/github.com/miekg/dns/format.go | 93 - vendor/github.com/miekg/dns/fuzz.go | 23 - vendor/github.com/miekg/dns/generate.go | 242 - vendor/github.com/miekg/dns/labels.go | 188 - vendor/github.com/miekg/dns/listen_go111.go | 44 - .../github.com/miekg/dns/listen_go_not111.go | 23 - vendor/github.com/miekg/dns/msg.go | 1228 ----- vendor/github.com/miekg/dns/msg_generate.go | 328 -- vendor/github.com/miekg/dns/msg_helpers.go | 648 --- vendor/github.com/miekg/dns/msg_truncate.go | 106 - vendor/github.com/miekg/dns/nsecx.go | 95 - vendor/github.com/miekg/dns/privaterr.go | 132 - vendor/github.com/miekg/dns/reverse.go | 52 - vendor/github.com/miekg/dns/sanitize.go | 86 - vendor/github.com/miekg/dns/scan.go | 1337 ------ vendor/github.com/miekg/dns/scan_rr.go | 1945 -------- vendor/github.com/miekg/dns/serve_mux.go | 147 - vendor/github.com/miekg/dns/server.go | 757 ---- vendor/github.com/miekg/dns/sig0.go | 209 - vendor/github.com/miekg/dns/singleinflight.go | 61 - vendor/github.com/miekg/dns/smimea.go | 44 - vendor/github.com/miekg/dns/tlsa.go | 44 - vendor/github.com/miekg/dns/tsig.go | 389 -- vendor/github.com/miekg/dns/types.go | 1434 ------ vendor/github.com/miekg/dns/types_generate.go | 287 -- vendor/github.com/miekg/dns/udp.go | 102 - vendor/github.com/miekg/dns/udp_windows.go | 35 - vendor/github.com/miekg/dns/update.go | 110 - vendor/github.com/miekg/dns/version.go | 15 - vendor/github.com/miekg/dns/xfr.go | 260 -- vendor/github.com/miekg/dns/zduplicate.go | 1140 ----- vendor/github.com/miekg/dns/zmsg.go | 2722 ----------- vendor/github.com/miekg/dns/ztypes.go | 881 ---- .../naoina/go-stringutil/.travis.yml | 9 - .../github.com/naoina/go-stringutil/LICENSE | 19 - .../github.com/naoina/go-stringutil/README.md | 13 - vendor/github.com/naoina/go-stringutil/da.go | 253 -- .../naoina/go-stringutil/strings.go | 320 -- vendor/github.com/naoina/toml/.travis.yml | 11 - vendor/github.com/naoina/toml/LICENSE | 19 - vendor/github.com/naoina/toml/README.md | 392 -- vendor/github.com/naoina/toml/ast/ast.go | 192 - vendor/github.com/naoina/toml/config.go | 86 - vendor/github.com/naoina/toml/decode.go | 478 -- vendor/github.com/naoina/toml/encode.go | 398 -- vendor/github.com/naoina/toml/error.go | 107 - vendor/github.com/naoina/toml/parse.go | 362 -- vendor/github.com/naoina/toml/parse.peg | 147 - vendor/github.com/naoina/toml/parse.peg.go | 2579 ----------- vendor/github.com/naoina/toml/util.go | 65 - vendor/github.com/openshift/api/LICENSE | 14 +- .../openshift/api/apps/v1/consts.go | 108 + .../api/apps/v1/deprecated_consts.go | 38 + .../openshift/api/apps/v1/generated.pb.go | 3968 +++++++++++------ .../openshift/api/apps/v1/generated.proto | 18 +- .../openshift/api/apps/v1/legacy.go | 28 + .../openshift/api/apps/v1/register.go | 47 +- .../github.com/openshift/api/apps/v1/types.go | 11 +- .../api/apps/v1/zz_generated.deepcopy.go | 330 +- ... => zz_generated.swagger_doc_generated.go} | 20 +- .../openshift/api/project/v1/generated.pb.go | 718 ++- .../openshift/api/project/v1/generated.proto | 17 +- .../openshift/api/project/v1/legacy.go | 23 + .../openshift/api/project/v1/register.go | 43 +- .../openshift/api/project/v1/types.go | 24 +- .../api/project/v1/zz_generated.deepcopy.go | 26 +- ... => zz_generated.swagger_doc_generated.go} | 20 +- .../russross/blackfriday/.gitignore | 8 - .../russross/blackfriday/.travis.yml | 17 - .../russross/blackfriday/LICENSE.txt | 29 - .../github.com/russross/blackfriday/README.md | 369 -- .../github.com/russross/blackfriday/block.go | 1474 ------ vendor/github.com/russross/blackfriday/doc.go | 32 - vendor/github.com/russross/blackfriday/go.mod | 1 - .../github.com/russross/blackfriday/html.go | 938 ---- .../github.com/russross/blackfriday/inline.go | 1154 ----- .../github.com/russross/blackfriday/latex.go | 334 -- .../russross/blackfriday/markdown.go | 941 ---- .../russross/blackfriday/smartypants.go | 430 -- vendor/golang.org/x/crypto/bcrypt/base64.go | 35 + vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 ++ vendor/golang.org/x/crypto/blowfish/block.go | 159 + vendor/golang.org/x/crypto/blowfish/cipher.go | 99 + vendor/golang.org/x/crypto/blowfish/const.go | 199 + .../chacha20poly1305/chacha20poly1305.go | 101 - .../chacha20poly1305_amd64.go | 86 - .../chacha20poly1305/chacha20poly1305_amd64.s | 2695 ----------- .../chacha20poly1305_generic.go | 81 - .../chacha20poly1305_noasm.go | 15 - .../chacha20poly1305/xchacha20poly1305.go | 104 - vendor/golang.org/x/crypto/hkdf/hkdf.go | 93 - vendor/golang.org/x/crypto/ocsp/ocsp.go | 784 ---- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 - vendor/golang.org/x/net/bpf/asm.go | 41 - vendor/golang.org/x/net/bpf/constants.go | 222 - vendor/golang.org/x/net/bpf/doc.go | 82 - vendor/golang.org/x/net/bpf/instructions.go | 726 --- vendor/golang.org/x/net/bpf/setter.go | 10 - vendor/golang.org/x/net/bpf/vm.go | 150 - .../golang.org/x/net/bpf/vm_instructions.go | 182 - vendor/golang.org/x/net/html/atom/gen.go | 712 --- .../golang.org/x/net/internal/iana/const.go | 223 - vendor/golang.org/x/net/internal/iana/gen.go | 383 -- .../x/net/internal/socket/cmsghdr.go | 11 - .../x/net/internal/socket/cmsghdr_bsd.go | 13 - .../internal/socket/cmsghdr_linux_32bit.go | 14 - .../internal/socket/cmsghdr_linux_64bit.go | 14 - .../internal/socket/cmsghdr_solaris_64bit.go | 14 - .../x/net/internal/socket/cmsghdr_stub.go | 17 - .../x/net/internal/socket/defs_aix.go | 39 - .../x/net/internal/socket/defs_darwin.go | 36 - .../x/net/internal/socket/defs_dragonfly.go | 36 - .../x/net/internal/socket/defs_freebsd.go | 36 - .../x/net/internal/socket/defs_linux.go | 41 - .../x/net/internal/socket/defs_netbsd.go | 39 - .../x/net/internal/socket/defs_openbsd.go | 36 - .../x/net/internal/socket/defs_solaris.go | 36 - .../golang.org/x/net/internal/socket/empty.s | 7 - .../x/net/internal/socket/error_unix.go | 31 - .../x/net/internal/socket/error_windows.go | 26 - .../x/net/internal/socket/iovec_32bit.go | 19 - .../x/net/internal/socket/iovec_64bit.go | 19 - .../internal/socket/iovec_solaris_64bit.go | 19 - .../x/net/internal/socket/iovec_stub.go | 11 - .../x/net/internal/socket/mmsghdr_stub.go | 21 - .../x/net/internal/socket/mmsghdr_unix.go | 42 - .../x/net/internal/socket/msghdr_bsd.go | 39 - .../x/net/internal/socket/msghdr_bsdvar.go | 16 - .../x/net/internal/socket/msghdr_linux.go | 36 - .../net/internal/socket/msghdr_linux_32bit.go | 24 - .../net/internal/socket/msghdr_linux_64bit.go | 24 - .../x/net/internal/socket/msghdr_openbsd.go | 14 - .../internal/socket/msghdr_solaris_64bit.go | 36 - .../x/net/internal/socket/msghdr_stub.go | 14 - .../x/net/internal/socket/rawconn.go | 64 - .../x/net/internal/socket/rawconn_mmsg.go | 73 - .../x/net/internal/socket/rawconn_msg.go | 76 - .../x/net/internal/socket/rawconn_nommsg.go | 15 - .../x/net/internal/socket/rawconn_nomsg.go | 15 - .../x/net/internal/socket/socket.go | 288 -- .../golang.org/x/net/internal/socket/sys.go | 33 - .../x/net/internal/socket/sys_bsd.go | 15 - .../x/net/internal/socket/sys_bsdvar.go | 23 - .../x/net/internal/socket/sys_const_unix.go | 17 - .../x/net/internal/socket/sys_darwin.go | 7 - .../x/net/internal/socket/sys_dragonfly.go | 7 - .../net/internal/socket/sys_go1_11_darwin.go | 33 - .../x/net/internal/socket/sys_linkname.go | 42 - .../x/net/internal/socket/sys_linux.go | 27 - .../x/net/internal/socket/sys_linux_386.go | 55 - .../x/net/internal/socket/sys_linux_386.s | 11 - .../x/net/internal/socket/sys_linux_amd64.go | 10 - .../x/net/internal/socket/sys_linux_arm.go | 10 - .../x/net/internal/socket/sys_linux_arm64.go | 10 - .../x/net/internal/socket/sys_linux_mips.go | 10 - .../x/net/internal/socket/sys_linux_mips64.go | 10 - .../net/internal/socket/sys_linux_mips64le.go | 10 - .../x/net/internal/socket/sys_linux_mipsle.go | 10 - .../x/net/internal/socket/sys_linux_ppc64.go | 10 - .../net/internal/socket/sys_linux_ppc64le.go | 10 - .../net/internal/socket/sys_linux_riscv64.go | 12 - .../x/net/internal/socket/sys_linux_s390x.go | 55 - .../x/net/internal/socket/sys_linux_s390x.s | 11 - .../x/net/internal/socket/sys_netbsd.go | 25 - .../x/net/internal/socket/sys_posix.go | 183 - .../x/net/internal/socket/sys_solaris.go | 70 - .../x/net/internal/socket/sys_solaris_amd64.s | 11 - .../x/net/internal/socket/sys_stub.go | 63 - .../x/net/internal/socket/sys_unix.go | 33 - .../x/net/internal/socket/sys_windows.go | 71 - .../x/net/internal/socket/zsys_aix_ppc64.go | 61 - .../x/net/internal/socket/zsys_darwin_386.go | 51 - .../net/internal/socket/zsys_darwin_amd64.go | 53 - .../x/net/internal/socket/zsys_darwin_arm.go | 51 - .../net/internal/socket/zsys_darwin_arm64.go | 53 - .../internal/socket/zsys_dragonfly_amd64.go | 53 - .../x/net/internal/socket/zsys_freebsd_386.go | 51 - .../net/internal/socket/zsys_freebsd_amd64.go | 53 - .../x/net/internal/socket/zsys_freebsd_arm.go | 51 - .../net/internal/socket/zsys_freebsd_arm64.go | 53 - .../x/net/internal/socket/zsys_linux_386.go | 55 - .../x/net/internal/socket/zsys_linux_amd64.go | 58 - .../x/net/internal/socket/zsys_linux_arm.go | 55 - .../x/net/internal/socket/zsys_linux_arm64.go | 58 - .../x/net/internal/socket/zsys_linux_mips.go | 55 - .../net/internal/socket/zsys_linux_mips64.go | 58 - .../internal/socket/zsys_linux_mips64le.go | 58 - .../net/internal/socket/zsys_linux_mipsle.go | 55 - .../x/net/internal/socket/zsys_linux_ppc64.go | 58 - .../net/internal/socket/zsys_linux_ppc64le.go | 58 - .../net/internal/socket/zsys_linux_riscv64.go | 59 - .../x/net/internal/socket/zsys_linux_s390x.go | 58 - .../x/net/internal/socket/zsys_netbsd_386.go | 57 - .../net/internal/socket/zsys_netbsd_amd64.go | 60 - .../x/net/internal/socket/zsys_netbsd_arm.go | 57 - .../net/internal/socket/zsys_netbsd_arm64.go | 60 - .../x/net/internal/socket/zsys_openbsd_386.go | 51 - .../net/internal/socket/zsys_openbsd_amd64.go | 53 - .../x/net/internal/socket/zsys_openbsd_arm.go | 51 - .../net/internal/socket/zsys_openbsd_arm64.go | 53 - .../net/internal/socket/zsys_solaris_amd64.go | 52 - vendor/golang.org/x/net/ipv4/batch.go | 194 - vendor/golang.org/x/net/ipv4/control.go | 144 - vendor/golang.org/x/net/ipv4/control_bsd.go | 40 - .../golang.org/x/net/ipv4/control_pktinfo.go | 39 - vendor/golang.org/x/net/ipv4/control_stub.go | 13 - vendor/golang.org/x/net/ipv4/control_unix.go | 73 - .../golang.org/x/net/ipv4/control_windows.go | 12 - vendor/golang.org/x/net/ipv4/defs_aix.go | 39 - vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 - .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 - vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 - vendor/golang.org/x/net/ipv4/defs_linux.go | 122 - vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 - vendor/golang.org/x/net/ipv4/dgramopt.go | 264 -- vendor/golang.org/x/net/ipv4/doc.go | 244 - vendor/golang.org/x/net/ipv4/endpoint.go | 186 - vendor/golang.org/x/net/ipv4/gen.go | 199 - vendor/golang.org/x/net/ipv4/genericopt.go | 55 - vendor/golang.org/x/net/ipv4/header.go | 173 - vendor/golang.org/x/net/ipv4/helper.go | 80 - vendor/golang.org/x/net/ipv4/iana.go | 38 - vendor/golang.org/x/net/ipv4/icmp.go | 57 - vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 - vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 - vendor/golang.org/x/net/ipv4/packet.go | 117 - vendor/golang.org/x/net/ipv4/payload.go | 23 - vendor/golang.org/x/net/ipv4/payload_cmsg.go | 84 - .../golang.org/x/net/ipv4/payload_nocmsg.go | 39 - vendor/golang.org/x/net/ipv4/sockopt.go | 44 - vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 - vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 - vendor/golang.org/x/net/ipv4/sys_aix.go | 38 - vendor/golang.org/x/net/ipv4/sys_asmreq.go | 119 - .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 - vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 - .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 - vendor/golang.org/x/net/ipv4/sys_bpf.go | 23 - vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 - vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 - vendor/golang.org/x/net/ipv4/sys_darwin.go | 65 - vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 - vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 - vendor/golang.org/x/net/ipv4/sys_linux.go | 59 - vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 - vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 52 - .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 - vendor/golang.org/x/net/ipv4/sys_stub.go | 13 - vendor/golang.org/x/net/ipv4/sys_windows.go | 67 - .../golang.org/x/net/ipv4/zsys_aix_ppc64.go | 33 - vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 - .../golang.org/x/net/ipv4/zsys_dragonfly.go | 31 - .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 - .../x/net/ipv4/zsys_freebsd_amd64.go | 95 - .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 - .../golang.org/x/net/ipv4/zsys_linux_386.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 150 - .../golang.org/x/net/ipv4/zsys_linux_arm.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 150 - .../golang.org/x/net/ipv4/zsys_linux_mips.go | 148 - .../x/net/ipv4/zsys_linux_mips64.go | 150 - .../x/net/ipv4/zsys_linux_mips64le.go | 150 - .../x/net/ipv4/zsys_linux_mipsle.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 150 - .../x/net/ipv4/zsys_linux_ppc64le.go | 150 - .../x/net/ipv4/zsys_linux_riscv64.go | 151 - .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 150 - vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 - vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 - vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 - vendor/golang.org/x/net/ipv6/batch.go | 116 - vendor/golang.org/x/net/ipv6/control.go | 187 - .../x/net/ipv6/control_rfc2292_unix.go | 48 - .../x/net/ipv6/control_rfc3542_unix.go | 94 - vendor/golang.org/x/net/ipv6/control_stub.go | 13 - vendor/golang.org/x/net/ipv6/control_unix.go | 55 - .../golang.org/x/net/ipv6/control_windows.go | 12 - vendor/golang.org/x/net/ipv6/defs_aix.go | 82 - vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 - .../golang.org/x/net/ipv6/defs_dragonfly.go | 84 - vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 - vendor/golang.org/x/net/ipv6/defs_linux.go | 147 - vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 - vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 - vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 - vendor/golang.org/x/net/ipv6/dgramopt.go | 301 -- vendor/golang.org/x/net/ipv6/doc.go | 243 - vendor/golang.org/x/net/ipv6/endpoint.go | 127 - vendor/golang.org/x/net/ipv6/gen.go | 199 - vendor/golang.org/x/net/ipv6/genericopt.go | 56 - vendor/golang.org/x/net/ipv6/header.go | 55 - vendor/golang.org/x/net/ipv6/helper.go | 59 - vendor/golang.org/x/net/ipv6/iana.go | 86 - vendor/golang.org/x/net/ipv6/icmp.go | 60 - vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 - vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 - vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 - vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 - vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 - vendor/golang.org/x/net/ipv6/payload.go | 23 - vendor/golang.org/x/net/ipv6/payload_cmsg.go | 70 - .../golang.org/x/net/ipv6/payload_nocmsg.go | 38 - vendor/golang.org/x/net/ipv6/sockopt.go | 43 - vendor/golang.org/x/net/ipv6/sockopt_posix.go | 89 - vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 - vendor/golang.org/x/net/ipv6/sys_aix.go | 77 - vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 - .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 - vendor/golang.org/x/net/ipv6/sys_bpf.go | 23 - vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 - vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 - vendor/golang.org/x/net/ipv6/sys_darwin.go | 78 - vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 - vendor/golang.org/x/net/ipv6/sys_linux.go | 74 - vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 - vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 - .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 - vendor/golang.org/x/net/ipv6/sys_stub.go | 13 - vendor/golang.org/x/net/ipv6/sys_windows.go | 75 - .../golang.org/x/net/ipv6/zsys_aix_ppc64.go | 103 - vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 - .../golang.org/x/net/ipv6/zsys_dragonfly.go | 88 - .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 - .../x/net/ipv6/zsys_freebsd_amd64.go | 124 - .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 - .../golang.org/x/net/ipv6/zsys_linux_386.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 172 - .../golang.org/x/net/ipv6/zsys_linux_arm.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 172 - .../golang.org/x/net/ipv6/zsys_linux_mips.go | 170 - .../x/net/ipv6/zsys_linux_mips64.go | 172 - .../x/net/ipv6/zsys_linux_mips64le.go | 172 - .../x/net/ipv6/zsys_linux_mipsle.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 172 - .../x/net/ipv6/zsys_linux_ppc64le.go | 172 - .../x/net/ipv6/zsys_linux_riscv64.go | 173 - .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 172 - vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 - vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 - vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 - vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - vendor/golang.org/x/sys/unix/mkpost.go | 106 - vendor/golang.org/x/sys/unix/mksyscall.go | 402 -- .../x/sys/unix/mksyscall_aix_ppc.go | 404 -- .../x/sys/unix/mksyscall_aix_ppc64.go | 602 --- .../x/sys/unix/mksyscall_solaris.go | 335 -- vendor/golang.org/x/sys/unix/mksysnum.go | 190 - vendor/golang.org/x/sys/unix/types_aix.go | 236 - vendor/golang.org/x/sys/unix/types_darwin.go | 277 -- .../golang.org/x/sys/unix/types_dragonfly.go | 263 -- vendor/golang.org/x/sys/unix/types_freebsd.go | 356 -- vendor/golang.org/x/sys/unix/types_netbsd.go | 289 -- vendor/golang.org/x/sys/unix/types_openbsd.go | 276 -- vendor/golang.org/x/sys/unix/types_solaris.go | 266 -- .../x/text/encoding/charmap/maketables.go | 556 --- .../x/text/encoding/htmlindex/gen.go | 173 - .../text/encoding/internal/identifier/gen.go | 137 - .../x/text/encoding/japanese/maketables.go | 161 - .../x/text/encoding/korean/maketables.go | 143 - .../encoding/simplifiedchinese/maketables.go | 161 - .../encoding/traditionalchinese/maketables.go | 140 - vendor/golang.org/x/text/language/gen.go | 1712 ------- .../golang.org/x/text/language/gen_common.go | 20 - .../golang.org/x/text/language/gen_index.go | 162 - vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../x/text/unicode/bidi/gen_ranges.go | 57 - .../x/text/unicode/bidi/gen_trieval.go | 64 - .../x/text/unicode/norm/maketables.go | 976 ---- .../golang.org/x/text/unicode/norm/triegen.go | 117 - vendor/golang.org/x/text/width/gen.go | 115 - vendor/golang.org/x/text/width/gen_common.go | 96 - vendor/golang.org/x/text/width/gen_trieval.go | 34 - .../x/tools/go/gcexportdata/main.go | 99 - .../x/tools/internal/imports/mkindex.go | 173 - .../x/tools/internal/imports/mkstdlib.go | 132 - .../square/go-jose.v2/.gitcookies.sh.enc | 1 - vendor/gopkg.in/square/go-jose.v2/.gitignore | 7 - vendor/gopkg.in/square/go-jose.v2/.travis.yml | 46 - .../gopkg.in/square/go-jose.v2/BUG-BOUNTY.md | 10 - .../square/go-jose.v2/CONTRIBUTING.md | 14 - vendor/gopkg.in/square/go-jose.v2/LICENSE | 202 - vendor/gopkg.in/square/go-jose.v2/README.md | 118 - .../gopkg.in/square/go-jose.v2/asymmetric.go | 592 --- .../square/go-jose.v2/cipher/cbc_hmac.go | 196 - .../square/go-jose.v2/cipher/concat_kdf.go | 75 - .../square/go-jose.v2/cipher/ecdh_es.go | 62 - .../square/go-jose.v2/cipher/key_wrap.go | 109 - vendor/gopkg.in/square/go-jose.v2/crypter.go | 535 --- vendor/gopkg.in/square/go-jose.v2/doc.go | 27 - vendor/gopkg.in/square/go-jose.v2/encoding.go | 179 - .../gopkg.in/square/go-jose.v2/json/LICENSE | 27 - .../gopkg.in/square/go-jose.v2/json/README.md | 13 - .../gopkg.in/square/go-jose.v2/json/decode.go | 1183 ----- .../gopkg.in/square/go-jose.v2/json/encode.go | 1197 ----- .../gopkg.in/square/go-jose.v2/json/indent.go | 141 - .../square/go-jose.v2/json/scanner.go | 623 --- .../gopkg.in/square/go-jose.v2/json/stream.go | 480 -- .../gopkg.in/square/go-jose.v2/json/tags.go | 44 - vendor/gopkg.in/square/go-jose.v2/jwe.go | 294 -- vendor/gopkg.in/square/go-jose.v2/jwk.go | 608 --- vendor/gopkg.in/square/go-jose.v2/jws.go | 321 -- vendor/gopkg.in/square/go-jose.v2/opaque.go | 83 - vendor/gopkg.in/square/go-jose.v2/shared.go | 499 --- vendor/gopkg.in/square/go-jose.v2/signing.go | 389 -- .../gopkg.in/square/go-jose.v2/symmetric.go | 482 -- vendor/modules.txt | 125 +- .../openpitrix/pkg/version/gen_helper.go | 74 - 879 files changed, 5869 insertions(+), 139213 deletions(-) create mode 100644 config/crds/iam.kubesphere.io_policyrules.yaml create mode 100644 config/crds/iam.kubesphere.io_rolebindings.yaml create mode 100644 config/crds/iam.kubesphere.io_roles.yaml create mode 100644 config/samples/iam_v1alpha2_policyrule.yaml create mode 100644 config/samples/iam_v1alpha2_role.yaml create mode 100644 config/samples/iam_v1alpha2_rolebinding.yaml create mode 100644 config/webhook/iam.yaml delete mode 100644 pkg/api/iam/user.go rename pkg/apis/iam/v1alpha2/{user_types.go => types.go} (53%) rename pkg/apis/iam/v1alpha2/{user_types_test.go => types_test.go} (100%) create mode 100644 pkg/controller/user/user_webhook.go create mode 100644 pkg/models/iam/im/im_operator.go create mode 100644 pkg/models/iam/im/ldap_operator.go create mode 100644 pkg/models/resources/v1alpha3/resource/resource_test.go delete mode 100644 vendor/github.com/cenkalti/backoff/.gitignore delete mode 100644 vendor/github.com/cenkalti/backoff/.travis.yml delete mode 100644 vendor/github.com/cenkalti/backoff/LICENSE delete mode 100644 vendor/github.com/cenkalti/backoff/README.md delete mode 100644 vendor/github.com/cenkalti/backoff/backoff.go delete mode 100644 vendor/github.com/cenkalti/backoff/context.go delete mode 100644 vendor/github.com/cenkalti/backoff/exponential.go delete mode 100644 vendor/github.com/cenkalti/backoff/retry.go delete mode 100644 vendor/github.com/cenkalti/backoff/ticker.go delete mode 100644 vendor/github.com/cenkalti/backoff/tries.go delete mode 100644 vendor/github.com/cheekybits/genny/LICENSE delete mode 100644 vendor/github.com/cheekybits/genny/generic/doc.go delete mode 100644 vendor/github.com/cheekybits/genny/generic/generic.go delete mode 100644 vendor/github.com/dustin/go-humanize/.travis.yml delete mode 100644 vendor/github.com/dustin/go-humanize/LICENSE delete mode 100644 vendor/github.com/dustin/go-humanize/README.markdown delete mode 100644 vendor/github.com/dustin/go-humanize/big.go delete mode 100644 vendor/github.com/dustin/go-humanize/bigbytes.go delete mode 100644 vendor/github.com/dustin/go-humanize/bytes.go delete mode 100644 vendor/github.com/dustin/go-humanize/comma.go delete mode 100644 vendor/github.com/dustin/go-humanize/commaf.go delete mode 100644 vendor/github.com/dustin/go-humanize/ftoa.go delete mode 100644 vendor/github.com/dustin/go-humanize/humanize.go delete mode 100644 vendor/github.com/dustin/go-humanize/number.go delete mode 100644 vendor/github.com/dustin/go-humanize/ordinals.go delete mode 100644 vendor/github.com/dustin/go-humanize/si.go delete mode 100644 vendor/github.com/dustin/go-humanize/times.go delete mode 100644 vendor/github.com/flynn/go-shlex/COPYING delete mode 100644 vendor/github.com/flynn/go-shlex/Makefile delete mode 100644 vendor/github.com/flynn/go-shlex/README.md delete mode 100644 vendor/github.com/flynn/go-shlex/shlex.go delete mode 100644 vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go delete mode 100644 vendor/github.com/go-acme/lego/LICENSE delete mode 100644 vendor/github.com/go-acme/lego/acme/api/account.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/api.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/authorization.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/certificate.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/challenge.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/internal/nonces/nonce_manager.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/internal/sender/sender.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/order.go delete mode 100644 vendor/github.com/go-acme/lego/acme/api/service.go delete mode 100644 vendor/github.com/go-acme/lego/acme/commons.go delete mode 100644 vendor/github.com/go-acme/lego/acme/errors.go delete mode 100644 vendor/github.com/go-acme/lego/certcrypto/crypto.go delete mode 100644 vendor/github.com/go-acme/lego/certificate/authorization.go delete mode 100644 vendor/github.com/go-acme/lego/certificate/certificates.go delete mode 100644 vendor/github.com/go-acme/lego/certificate/errors.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/challenges.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/dns01/cname.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge_manual.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/dns01/fqdn.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/dns01/nameserver.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/dns01/precheck.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/http01/http_challenge.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/http01/http_challenge_server.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/provider.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/resolver/errors.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/resolver/prober.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/resolver/solver_manager.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge.go delete mode 100644 vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge_server.go delete mode 100644 vendor/github.com/go-acme/lego/lego/client.go delete mode 100644 vendor/github.com/go-acme/lego/lego/client_config.go delete mode 100644 vendor/github.com/go-acme/lego/log/logger.go delete mode 100644 vendor/github.com/go-acme/lego/platform/wait/wait.go delete mode 100644 vendor/github.com/go-acme/lego/registration/registar.go delete mode 100644 vendor/github.com/go-acme/lego/registration/user.go delete mode 100644 vendor/github.com/hashicorp/go-syslog/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-syslog/LICENSE delete mode 100644 vendor/github.com/hashicorp/go-syslog/README.md delete mode 100644 vendor/github.com/hashicorp/go-syslog/builtin.go delete mode 100644 vendor/github.com/hashicorp/go-syslog/go.mod delete mode 100644 vendor/github.com/hashicorp/go-syslog/syslog.go delete mode 100644 vendor/github.com/hashicorp/go-syslog/unix.go delete mode 100644 vendor/github.com/hashicorp/go-syslog/unsupported.go delete mode 100644 vendor/github.com/jimstudt/http-authentication/LICENSE delete mode 100644 vendor/github.com/jimstudt/http-authentication/basic/README.md delete mode 100644 vendor/github.com/jimstudt/http-authentication/basic/bcrypt.go delete mode 100644 vendor/github.com/jimstudt/http-authentication/basic/htpasswd.go delete mode 100644 vendor/github.com/jimstudt/http-authentication/basic/md5.go delete mode 100644 vendor/github.com/jimstudt/http-authentication/basic/plain.go delete mode 100644 vendor/github.com/jimstudt/http-authentication/basic/sha.go delete mode 100644 vendor/github.com/jimstudt/http-authentication/basic/util.go delete mode 100644 vendor/github.com/klauspost/cpuid/.gitignore delete mode 100644 vendor/github.com/klauspost/cpuid/.travis.yml delete mode 100644 vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt delete mode 100644 vendor/github.com/klauspost/cpuid/LICENSE delete mode 100644 vendor/github.com/klauspost/cpuid/README.md delete mode 100644 vendor/github.com/klauspost/cpuid/cpuid.go delete mode 100644 vendor/github.com/klauspost/cpuid/cpuid_386.s delete mode 100644 vendor/github.com/klauspost/cpuid/cpuid_amd64.s delete mode 100644 vendor/github.com/klauspost/cpuid/detect_intel.go delete mode 100644 vendor/github.com/klauspost/cpuid/detect_ref.go delete mode 100644 vendor/github.com/klauspost/cpuid/generate.go delete mode 100644 vendor/github.com/klauspost/cpuid/private-gen.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/clientset_generated.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/doc.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/register.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/doc.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_app_client.go create mode 100644 vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_application.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/.editorconfig delete mode 100644 vendor/github.com/lucas-clemente/quic-go/.gitignore delete mode 100644 vendor/github.com/lucas-clemente/quic-go/.golangci.yml delete mode 100644 vendor/github.com/lucas-clemente/quic-go/.travis.yml delete mode 100644 vendor/github.com/lucas-clemente/quic-go/Changelog.md delete mode 100644 vendor/github.com/lucas-clemente/quic-go/LICENSE delete mode 100644 vendor/github.com/lucas-clemente/quic-go/README.md delete mode 100644 vendor/github.com/lucas-clemente/quic-go/appveyor.yml delete mode 100644 vendor/github.com/lucas-clemente/quic-go/buffer_pool.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/client.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/codecov.yml delete mode 100644 vendor/github.com/lucas-clemente/quic-go/conn.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/crypto_stream.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/frame_sorter.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/framer.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/go.mod delete mode 100644 vendor/github.com/lucas-clemente/quic-go/go.sum delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/client.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/gzipreader.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/request.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/request_body.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/request_writer.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/response.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/response_body.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer_closenotifier.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/roundtrip.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/h2quic/server.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/interface.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/retransmittable.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/prr_sender.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/rtt_stats.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/congestion/stats.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_generator.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_protector.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/qtls.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/transport_parameters.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/handshake/unsafe.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream_id.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/qerr/quic_error.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/host.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/utils/varint.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/mockgen.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh delete mode 100644 vendor/github.com/lucas-clemente/quic-go/multiplexer.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/packet_packer.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/receive_stream.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/send_stream.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/server.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/session.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/stream.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_uni.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_uni.go delete mode 100644 vendor/github.com/lucas-clemente/quic-go/window_update_queue.go delete mode 100644 vendor/github.com/marten-seemann/qtls/alert.go delete mode 100644 vendor/github.com/marten-seemann/qtls/auth.go delete mode 100644 vendor/github.com/marten-seemann/qtls/cipher_suites.go delete mode 100644 vendor/github.com/marten-seemann/qtls/common.go delete mode 100644 vendor/github.com/marten-seemann/qtls/conn.go delete mode 100644 vendor/github.com/marten-seemann/qtls/generate_cert.go delete mode 100644 vendor/github.com/marten-seemann/qtls/go.mod delete mode 100644 vendor/github.com/marten-seemann/qtls/go.sum delete mode 100644 vendor/github.com/marten-seemann/qtls/handshake_client.go delete mode 100644 vendor/github.com/marten-seemann/qtls/handshake_client_tls13.go delete mode 100644 vendor/github.com/marten-seemann/qtls/handshake_messages.go delete mode 100644 vendor/github.com/marten-seemann/qtls/handshake_server.go delete mode 100644 vendor/github.com/marten-seemann/qtls/handshake_server_tls13.go delete mode 100644 vendor/github.com/marten-seemann/qtls/key_agreement.go delete mode 100644 vendor/github.com/marten-seemann/qtls/key_schedule.go delete mode 100644 vendor/github.com/marten-seemann/qtls/prf.go delete mode 100644 vendor/github.com/marten-seemann/qtls/ticket.go delete mode 100644 vendor/github.com/marten-seemann/qtls/tls.go delete mode 100644 vendor/github.com/mholt/caddy/.gitattributes delete mode 100644 vendor/github.com/mholt/caddy/.gitignore delete mode 100644 vendor/github.com/mholt/caddy/LICENSE.txt delete mode 100644 vendor/github.com/mholt/caddy/README.md delete mode 100644 vendor/github.com/mholt/caddy/assets.go delete mode 100644 vendor/github.com/mholt/caddy/azure-pipelines.yml delete mode 100644 vendor/github.com/mholt/caddy/caddy.go delete mode 100644 vendor/github.com/mholt/caddy/caddy/caddymain/run.go delete mode 100644 vendor/github.com/mholt/caddy/caddyfile/dispenser.go delete mode 100644 vendor/github.com/mholt/caddy/caddyfile/json.go delete mode 100644 vendor/github.com/mholt/caddy/caddyfile/lexer.go delete mode 100644 vendor/github.com/mholt/caddy/caddyfile/parse.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/basicauth/basicauth.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/basicauth/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/bind/bind.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/browse/browse.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/browse/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/caddyhttp.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/errors/errors.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/errors/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/expvar/expvar.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/expvar/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/extensions/ext.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/extensions/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fastcgi.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgi_test.php delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgiclient.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/fastcgi/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/gzip/gzip.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/gzip/requestfilter.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/gzip/responsefilter.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/gzip/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/header/header.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/header/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/condition.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/error.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/https.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/logger.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/middleware.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/mitm.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/path.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/plugin.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/recorder.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/replacer.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/responsewriterwrapper.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/roller.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/server.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/siteconfig.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/tplcontext.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/httpserver/vhosttrie.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/index/index.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/internalsrv/internal.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/internalsrv/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/limits/handler.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/limits/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/log/log.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/log/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/markdown.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_json.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_none.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_toml.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_yaml.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/process.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/render.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/summary.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/markdown/template.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/mime/mime.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/mime/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/pprof/pprof.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/pprof/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/proxy/body.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/proxy/policy.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/proxy/proxy.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/proxy/reverseproxy.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/proxy/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/proxy/upstream.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/push/handler.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/push/link_parser.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/push/push.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/push/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/redirect/redirect.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/redirect/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/requestid/requestid.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/requestid/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/rewrite/rewrite.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/rewrite/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/rewrite/to.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/root/root.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/staticfiles/fileserver.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/status/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/status/status.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/templates/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/templates/templates.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/timeouts/timeouts.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/websocket/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddyhttp/websocket/websocket.go delete mode 100644 vendor/github.com/mholt/caddy/caddytls/config.go delete mode 100644 vendor/github.com/mholt/caddy/caddytls/crypto.go delete mode 100644 vendor/github.com/mholt/caddy/caddytls/handshake.go delete mode 100644 vendor/github.com/mholt/caddy/caddytls/selfsigned.go delete mode 100644 vendor/github.com/mholt/caddy/caddytls/setup.go delete mode 100644 vendor/github.com/mholt/caddy/caddytls/tls.go delete mode 100644 vendor/github.com/mholt/caddy/commands.go delete mode 100644 vendor/github.com/mholt/caddy/controller.go delete mode 100644 vendor/github.com/mholt/caddy/go.mod delete mode 100644 vendor/github.com/mholt/caddy/go.sum delete mode 100644 vendor/github.com/mholt/caddy/onevent/hook/config.go delete mode 100644 vendor/github.com/mholt/caddy/onevent/hook/hook.go delete mode 100644 vendor/github.com/mholt/caddy/onevent/on.go delete mode 100644 vendor/github.com/mholt/caddy/plugins.go delete mode 100644 vendor/github.com/mholt/caddy/rlimit_nonposix.go delete mode 100644 vendor/github.com/mholt/caddy/rlimit_posix.go delete mode 100644 vendor/github.com/mholt/caddy/sigtrap.go delete mode 100644 vendor/github.com/mholt/caddy/sigtrap_nonposix.go delete mode 100644 vendor/github.com/mholt/caddy/sigtrap_posix.go delete mode 100644 vendor/github.com/mholt/caddy/telemetry/collection.go delete mode 100644 vendor/github.com/mholt/caddy/telemetry/telemetry.go delete mode 100644 vendor/github.com/mholt/caddy/upgrade.go delete mode 100644 vendor/github.com/mholt/certmagic/.gitignore delete mode 100644 vendor/github.com/mholt/certmagic/.travis.yml delete mode 100644 vendor/github.com/mholt/certmagic/LICENSE.txt delete mode 100644 vendor/github.com/mholt/certmagic/README.md delete mode 100644 vendor/github.com/mholt/certmagic/appveyor.yml delete mode 100644 vendor/github.com/mholt/certmagic/cache.go delete mode 100644 vendor/github.com/mholt/certmagic/certificates.go delete mode 100644 vendor/github.com/mholt/certmagic/certmagic.go delete mode 100644 vendor/github.com/mholt/certmagic/client.go delete mode 100644 vendor/github.com/mholt/certmagic/config.go delete mode 100644 vendor/github.com/mholt/certmagic/crypto.go delete mode 100644 vendor/github.com/mholt/certmagic/filestorage.go delete mode 100644 vendor/github.com/mholt/certmagic/go.mod delete mode 100644 vendor/github.com/mholt/certmagic/go.sum delete mode 100644 vendor/github.com/mholt/certmagic/handshake.go delete mode 100644 vendor/github.com/mholt/certmagic/httphandler.go delete mode 100644 vendor/github.com/mholt/certmagic/maintain.go delete mode 100644 vendor/github.com/mholt/certmagic/ocsp.go delete mode 100644 vendor/github.com/mholt/certmagic/solvers.go delete mode 100644 vendor/github.com/mholt/certmagic/storage.go delete mode 100644 vendor/github.com/mholt/certmagic/user.go delete mode 100644 vendor/github.com/miekg/dns/.codecov.yml delete mode 100644 vendor/github.com/miekg/dns/.gitignore delete mode 100644 vendor/github.com/miekg/dns/.travis.yml delete mode 100644 vendor/github.com/miekg/dns/AUTHORS delete mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS delete mode 100644 vendor/github.com/miekg/dns/COPYRIGHT delete mode 100644 vendor/github.com/miekg/dns/Gopkg.lock delete mode 100644 vendor/github.com/miekg/dns/Gopkg.toml delete mode 100644 vendor/github.com/miekg/dns/LICENSE delete mode 100644 vendor/github.com/miekg/dns/Makefile.fuzz delete mode 100644 vendor/github.com/miekg/dns/Makefile.release delete mode 100644 vendor/github.com/miekg/dns/README.md delete mode 100644 vendor/github.com/miekg/dns/acceptfunc.go delete mode 100644 vendor/github.com/miekg/dns/client.go delete mode 100644 vendor/github.com/miekg/dns/clientconfig.go delete mode 100644 vendor/github.com/miekg/dns/dane.go delete mode 100644 vendor/github.com/miekg/dns/defaults.go delete mode 100644 vendor/github.com/miekg/dns/dns.go delete mode 100644 vendor/github.com/miekg/dns/dnssec.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go delete mode 100644 vendor/github.com/miekg/dns/doc.go delete mode 100644 vendor/github.com/miekg/dns/duplicate.go delete mode 100644 vendor/github.com/miekg/dns/duplicate_generate.go delete mode 100644 vendor/github.com/miekg/dns/edns.go delete mode 100644 vendor/github.com/miekg/dns/format.go delete mode 100644 vendor/github.com/miekg/dns/fuzz.go delete mode 100644 vendor/github.com/miekg/dns/generate.go delete mode 100644 vendor/github.com/miekg/dns/labels.go delete mode 100644 vendor/github.com/miekg/dns/listen_go111.go delete mode 100644 vendor/github.com/miekg/dns/listen_go_not111.go delete mode 100644 vendor/github.com/miekg/dns/msg.go delete mode 100644 vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/miekg/dns/msg_helpers.go delete mode 100644 vendor/github.com/miekg/dns/msg_truncate.go delete mode 100644 vendor/github.com/miekg/dns/nsecx.go delete mode 100644 vendor/github.com/miekg/dns/privaterr.go delete mode 100644 vendor/github.com/miekg/dns/reverse.go delete mode 100644 vendor/github.com/miekg/dns/sanitize.go delete mode 100644 vendor/github.com/miekg/dns/scan.go delete mode 100644 vendor/github.com/miekg/dns/scan_rr.go delete mode 100644 vendor/github.com/miekg/dns/serve_mux.go delete mode 100644 vendor/github.com/miekg/dns/server.go delete mode 100644 vendor/github.com/miekg/dns/sig0.go delete mode 100644 vendor/github.com/miekg/dns/singleinflight.go delete mode 100644 vendor/github.com/miekg/dns/smimea.go delete mode 100644 vendor/github.com/miekg/dns/tlsa.go delete mode 100644 vendor/github.com/miekg/dns/tsig.go delete mode 100644 vendor/github.com/miekg/dns/types.go delete mode 100644 vendor/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/github.com/miekg/dns/udp.go delete mode 100644 vendor/github.com/miekg/dns/udp_windows.go delete mode 100644 vendor/github.com/miekg/dns/update.go delete mode 100644 vendor/github.com/miekg/dns/version.go delete mode 100644 vendor/github.com/miekg/dns/xfr.go delete mode 100644 vendor/github.com/miekg/dns/zduplicate.go delete mode 100644 vendor/github.com/miekg/dns/zmsg.go delete mode 100644 vendor/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/github.com/naoina/go-stringutil/.travis.yml delete mode 100644 vendor/github.com/naoina/go-stringutil/LICENSE delete mode 100644 vendor/github.com/naoina/go-stringutil/README.md delete mode 100644 vendor/github.com/naoina/go-stringutil/da.go delete mode 100644 vendor/github.com/naoina/go-stringutil/strings.go delete mode 100644 vendor/github.com/naoina/toml/.travis.yml delete mode 100644 vendor/github.com/naoina/toml/LICENSE delete mode 100644 vendor/github.com/naoina/toml/README.md delete mode 100644 vendor/github.com/naoina/toml/ast/ast.go delete mode 100644 vendor/github.com/naoina/toml/config.go delete mode 100644 vendor/github.com/naoina/toml/decode.go delete mode 100644 vendor/github.com/naoina/toml/encode.go delete mode 100644 vendor/github.com/naoina/toml/error.go delete mode 100644 vendor/github.com/naoina/toml/parse.go delete mode 100644 vendor/github.com/naoina/toml/parse.peg delete mode 100644 vendor/github.com/naoina/toml/parse.peg.go delete mode 100644 vendor/github.com/naoina/toml/util.go create mode 100644 vendor/github.com/openshift/api/apps/v1/consts.go create mode 100644 vendor/github.com/openshift/api/apps/v1/deprecated_consts.go create mode 100644 vendor/github.com/openshift/api/apps/v1/legacy.go rename vendor/github.com/openshift/api/apps/v1/{types_swagger_doc_generated.go => zz_generated.swagger_doc_generated.go} (92%) create mode 100644 vendor/github.com/openshift/api/project/v1/legacy.go rename vendor/github.com/openshift/api/project/v1/{types_swagger_doc_generated.go => zz_generated.swagger_doc_generated.go} (51%) delete mode 100644 vendor/github.com/russross/blackfriday/.gitignore delete mode 100644 vendor/github.com/russross/blackfriday/.travis.yml delete mode 100644 vendor/github.com/russross/blackfriday/LICENSE.txt delete mode 100644 vendor/github.com/russross/blackfriday/README.md delete mode 100644 vendor/github.com/russross/blackfriday/block.go delete mode 100644 vendor/github.com/russross/blackfriday/doc.go delete mode 100644 vendor/github.com/russross/blackfriday/go.mod delete mode 100644 vendor/github.com/russross/blackfriday/html.go delete mode 100644 vendor/github.com/russross/blackfriday/inline.go delete mode 100644 vendor/github.com/russross/blackfriday/latex.go delete mode 100644 vendor/github.com/russross/blackfriday/markdown.go delete mode 100644 vendor/github.com/russross/blackfriday/smartypants.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go create mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go delete mode 100644 vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go delete mode 100644 vendor/golang.org/x/crypto/hkdf/hkdf.go delete mode 100644 vendor/golang.org/x/crypto/ocsp/ocsp.go delete mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go delete mode 100644 vendor/golang.org/x/net/bpf/asm.go delete mode 100644 vendor/golang.org/x/net/bpf/constants.go delete mode 100644 vendor/golang.org/x/net/bpf/doc.go delete mode 100644 vendor/golang.org/x/net/bpf/instructions.go delete mode 100644 vendor/golang.org/x/net/bpf/setter.go delete mode 100644 vendor/golang.org/x/net/bpf/vm.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_instructions.go delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go delete mode 100644 vendor/golang.org/x/net/internal/iana/const.go delete mode 100644 vendor/golang.org/x/net/internal/iana/gen.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_aix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/internal/socket/empty.s delete mode 100644 vendor/golang.org/x/net/internal/socket/error_unix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/error_windows.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_32bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_msg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/socket.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_const_unix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linkname.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.s delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_riscv64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_netbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_posix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_unix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_windows.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_aix_ppc64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_riscv64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv4/batch.go delete mode 100644 vendor/golang.org/x/net/ipv4/control.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_pktinfo.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_unix.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_windows.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_aix.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/dgramopt.go delete mode 100644 vendor/golang.org/x/net/ipv4/doc.go delete mode 100644 vendor/golang.org/x/net/ipv4/endpoint.go delete mode 100644 vendor/golang.org/x/net/ipv4/gen.go delete mode 100644 vendor/golang.org/x/net/ipv4/genericopt.go delete mode 100644 vendor/golang.org/x/net/ipv4/header.go delete mode 100644 vendor/golang.org/x/net/ipv4/helper.go delete mode 100644 vendor/golang.org/x/net/ipv4/iana.go delete mode 100644 vendor/golang.org/x/net/ipv4/icmp.go delete mode 100644 vendor/golang.org/x/net/ipv4/icmp_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/icmp_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/packet.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload_nocmsg.go delete mode 100644 vendor/golang.org/x/net/ipv4/sockopt.go delete mode 100644 vendor/golang.org/x/net/ipv4/sockopt_posix.go delete mode 100644 vendor/golang.org/x/net/ipv4/sockopt_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_aix.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_windows.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_aix_ppc64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_386.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_riscv64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/batch.go delete mode 100644 vendor/golang.org/x/net/ipv6/control.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_unix.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_windows.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_aix.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/dgramopt.go delete mode 100644 vendor/golang.org/x/net/ipv6/doc.go delete mode 100644 vendor/golang.org/x/net/ipv6/endpoint.go delete mode 100644 vendor/golang.org/x/net/ipv6/gen.go delete mode 100644 vendor/golang.org/x/net/ipv6/genericopt.go delete mode 100644 vendor/golang.org/x/net/ipv6/header.go delete mode 100644 vendor/golang.org/x/net/ipv6/helper.go delete mode 100644 vendor/golang.org/x/net/ipv6/iana.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_windows.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload_nocmsg.go delete mode 100644 vendor/golang.org/x/net/ipv6/sockopt.go delete mode 100644 vendor/golang.org/x/net/ipv6/sockopt_posix.go delete mode 100644 vendor/golang.org/x/net/ipv6/sockopt_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_aix.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_windows.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_aix_ppc64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_386.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_riscv64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go delete mode 100644 vendor/golang.org/x/text/encoding/charmap/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/htmlindex/gen.go delete mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go delete mode 100644 vendor/golang.org/x/text/encoding/japanese/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/korean/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go delete mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go delete mode 100644 vendor/golang.org/x/text/language/gen.go delete mode 100644 vendor/golang.org/x/text/language/gen_common.go delete mode 100644 vendor/golang.org/x/text/language/gen_index.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go delete mode 100644 vendor/golang.org/x/text/width/gen.go delete mode 100644 vendor/golang.org/x/text/width/gen_common.go delete mode 100644 vendor/golang.org/x/text/width/gen_trieval.go delete mode 100644 vendor/golang.org/x/tools/go/gcexportdata/main.go delete mode 100644 vendor/golang.org/x/tools/internal/imports/mkindex.go delete mode 100644 vendor/golang.org/x/tools/internal/imports/mkstdlib.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc delete mode 100644 vendor/gopkg.in/square/go-jose.v2/.gitignore delete mode 100644 vendor/gopkg.in/square/go-jose.v2/.travis.yml delete mode 100644 vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v2/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/asymmetric.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/crypter.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/doc.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/encoding.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/LICENSE delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/README.md delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/decode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/encode.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/indent.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/scanner.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/stream.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/json/tags.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/jwe.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/jwk.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/jws.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/opaque.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/shared.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/signing.go delete mode 100644 vendor/gopkg.in/square/go-jose.v2/symmetric.go delete mode 100644 vendor/openpitrix.io/openpitrix/pkg/version/gen_helper.go diff --git a/cmd/controller-manager/app/server.go b/cmd/controller-manager/app/server.go index 647fb9c98..c1eb2e079 100644 --- a/cmd/controller-manager/app/server.go +++ b/cmd/controller-manager/app/server.go @@ -151,6 +151,14 @@ func Run(s *options.KubeSphereControllerManagerOptions, stopCh <-chan struct{}) // Start cache data after all informer is registered informerFactory.Start(stopCh) + // Setup webhooks TODO enable webhook + //klog.Info("setting up webhook server") + //hookServer := mgr.GetWebhookServer() + // + //klog.Info("registering webhooks to the webhook server") + //hookServer.Register("//mutating-encrypt-password-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.PasswordCipher{Client: mgr.GetClient()}}) + //hookServer.Register("/validate-email-iam-kubesphere-io-v1alpha2-user", &webhook.Admission{Handler: &user.EmailValidator{Client: mgr.GetClient()}}) + klog.V(0).Info("Starting the controllers.") if err = mgr.Start(stopCh); err != nil { klog.Fatalf("unable to run the manager: %v", err) diff --git a/config/crds/iam.kubesphere.io_policyrules.yaml b/config/crds/iam.kubesphere.io_policyrules.yaml new file mode 100644 index 000000000..674c97416 --- /dev/null +++ b/config/crds/iam.kubesphere.io_policyrules.yaml @@ -0,0 +1,53 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: policyrules.iam.kubesphere.io +spec: + group: iam.kubesphere.io + names: + categories: + - iam + kind: PolicyRule + listKind: PolicyRuleList + plural: policyrules + singular: policyrule + scope: Cluster + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + level: + type: string + metadata: + type: object + rego: + type: string + required: + - level + - rego + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/iam.kubesphere.io_rolebindings.yaml b/config/crds/iam.kubesphere.io_rolebindings.yaml new file mode 100644 index 000000000..a5c835f40 --- /dev/null +++ b/config/crds/iam.kubesphere.io_rolebindings.yaml @@ -0,0 +1,93 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: rolebindings.iam.kubesphere.io +spec: + group: iam.kubesphere.io + names: + categories: + - iam + kind: RoleBinding + listKind: RoleBindingList + plural: rolebindings + singular: rolebinding + scope: Cluster + validation: + openAPIV3Schema: + description: RoleBinding is the Schema for the rolebindings API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + level: + type: string + metadata: + type: object + roleRef: + description: RoleRef contains information that points to the role being + used + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - apiGroup + - kind + - name + type: object + subjects: + description: Subjects holds references to the users the role applies to. + items: + description: or a value for non-objects such as user and group names. + properties: + apiGroup: + description: APIGroup holds the API group of the referenced subject. + type: string + kind: + description: Kind of object being referenced. Values defined by this + API group are "User", "Group", and "ServiceAccount". If the Authorizer + does not recognized the kind value, the Authorizer should report + an error. + type: string + name: + description: Name of the object being referenced. + type: string + required: + - apiGroup + - kind + - name + type: object + type: array + required: + - level + - roleRef + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crds/iam.kubesphere.io_roles.yaml b/config/crds/iam.kubesphere.io_roles.yaml new file mode 100644 index 000000000..144333b4a --- /dev/null +++ b/config/crds/iam.kubesphere.io_roles.yaml @@ -0,0 +1,81 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: roles.iam.kubesphere.io +spec: + group: iam.kubesphere.io + names: + categories: + - iam + kind: Role + listKind: RoleList + plural: roles + singular: role + scope: Cluster + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + rules: + items: + description: RuleRef contains information that points to the role being + used + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - apiGroup + - kind + - name + type: object + type: array + scope: + properties: + level: + type: string + scopes: + items: + type: string + type: array + required: + - level + - scopes + type: object + required: + - rules + - scope + type: object + version: v1alpha2 + versions: + - name: v1alpha2 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/samples/iam_v1alpha2_policyrule.yaml b/config/samples/iam_v1alpha2_policyrule.yaml new file mode 100644 index 000000000..6f2cd420b --- /dev/null +++ b/config/samples/iam_v1alpha2_policyrule.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.kubesphere.io/v1alpha2 +kind: PolicyRule +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: policyrule-sample +spec: + # Add fields here + foo: bar diff --git a/config/samples/iam_v1alpha2_role.yaml b/config/samples/iam_v1alpha2_role.yaml new file mode 100644 index 000000000..8c81faa95 --- /dev/null +++ b/config/samples/iam_v1alpha2_role.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.kubesphere.io/v1alpha2 +kind: Role +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: role-sample +spec: + # Add fields here + foo: bar diff --git a/config/samples/iam_v1alpha2_rolebinding.yaml b/config/samples/iam_v1alpha2_rolebinding.yaml new file mode 100644 index 000000000..d0c5b34e7 --- /dev/null +++ b/config/samples/iam_v1alpha2_rolebinding.yaml @@ -0,0 +1,9 @@ +apiVersion: iam.kubesphere.io/v1alpha2 +kind: RoleBinding +metadata: + labels: + controller-tools.k8s.io: "1.0" + name: rolebinding-sample +spec: + # Add fields here + foo: bar diff --git a/config/samples/iam_v1alpha2_user.yaml b/config/samples/iam_v1alpha2_user.yaml index 8bd165623..e8661efb2 100644 --- a/config/samples/iam_v1alpha2_user.yaml +++ b/config/samples/iam_v1alpha2_user.yaml @@ -6,4 +6,4 @@ metadata: name: admin spec: email: admin@kubesphere.io - password: d41d8cd98f00b204e9800998ecf8427e + password: $2a$04$wr/XmTQ99uQpgi335xPyoOM08h34ZQk265pdqHMv5Yw6Xo2vfiO/6 diff --git a/config/webhook/iam.yaml b/config/webhook/iam.yaml new file mode 100644 index 000000000..5f2cf6522 --- /dev/null +++ b/config/webhook/iam.yaml @@ -0,0 +1,55 @@ + +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: kubesphere-iam-validator +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: + service: + name: webhook-service + namespace: kubesphere-system + path: /validate-email-iam-kubesphere-io-v1alpha2-user + failurePolicy: Fail + name: vemail.iam.kubesphere.io + rules: + - apiGroups: + - iam.kubesphere.io + apiVersions: + - v1alpha2 + operations: + - CREATE + - UPDATE + resources: + - users + +--- +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: MutatingWebhookConfiguration +metadata: + name: kubesphere-iam-injector +webhooks: + - admissionReviewVersions: + - v1beta1 + clientConfig: + caBundle: + service: + name: webhook-service + namespace: kubesphere-system + path: /mutating-encrypt-password-iam-kubesphere-io-v1alpha2-user + failurePolicy: Fail + name: mpassword.iam.kubesphere.io + reinvocationPolicy: Never + rules: + - apiGroups: + - iam.kubesphere.io + apiVersions: + - v1alpha2 + operations: + - CREATE + - UPDATE + resources: + - users diff --git a/go.mod b/go.mod index 8a17b7d5d..8def09c3c 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,6 @@ require ( github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a github.com/aws/aws-sdk-go v1.22.2 github.com/beevik/etree v1.1.0 - github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible github.com/docker/distribution v2.7.1+incompatible github.com/docker/docker v1.4.2-0.20190822205725-ed20165a37b4 @@ -54,24 +53,19 @@ require ( github.com/json-iterator/go v1.1.8 github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/kiali/kiali v0.15.1-0.20191210080139-edbbad1ef779 - github.com/klauspost/cpuid v1.2.1 // indirect github.com/kubernetes-sigs/application v0.0.0-20191210100950-18cc93526ab4 github.com/kubesphere/sonargo v0.0.2 github.com/leodido/go-urn v1.1.0 // indirect github.com/lib/pq v1.2.0 // indirect - github.com/lucas-clemente/quic-go v0.11.1 // indirect github.com/mailru/easyjson v0.7.0 // indirect github.com/mattn/go-sqlite3 v1.11.0 // indirect - github.com/mholt/caddy v1.0.0 - github.com/mholt/certmagic v0.5.1 // indirect - github.com/miekg/dns v1.1.9 // indirect github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect github.com/onsi/ginkgo v1.8.0 github.com/onsi/gomega v1.5.0 github.com/open-policy-agent/opa v0.18.0 github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec v1.0.1 // indirect - github.com/openshift/api v3.9.0+incompatible // indirect + github.com/openshift/api v0.0.0-20180801171038-322a19404e37 // indirect github.com/pkg/errors v0.8.1 github.com/projectcalico/libcalico-go v1.7.2-0.20191104213956-8f81e1e344ce github.com/prometheus/common v0.4.0 @@ -82,23 +76,22 @@ require ( github.com/spf13/viper v1.4.0 github.com/stretchr/testify v1.4.0 github.com/xanzy/ssh-agent v0.2.1 // indirect - golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 + golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 google.golang.org/grpc v1.23.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/go-playground/validator.v9 v9.29.1 // indirect - gopkg.in/square/go-jose.v2 v2.3.1 // indirect gopkg.in/src-d/go-billy.v4 v4.3.0 // indirect gopkg.in/src-d/go-git.v4 v4.11.0 gopkg.in/yaml.v2 v2.2.4 istio.io/api v0.0.0-20191111210003-35e06ef8d838 istio.io/client-go v0.0.0-20191113122552-9bd0ba57c3d2 - k8s.io/api v0.0.0-20191114100352-16d7abae0d2a + k8s.io/api v0.18.0 k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833 - k8s.io/apimachinery v0.0.0-20191028221656-72ed19daf4bb + k8s.io/apimachinery v0.18.0 k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682 k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 - k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 + k8s.io/code-generator v0.18.0 k8s.io/component-base v0.0.0-20191114102325-35a9586014f7 k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e // indirect k8s.io/klog v1.0.0 @@ -301,7 +294,8 @@ replace ( github.com/open-policy-agent/opa => github.com/open-policy-agent/opa v0.18.0 github.com/opencontainers/go-digest => github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec => github.com/opencontainers/image-spec v1.0.1 - github.com/openshift/api => github.com/openshift/api v3.9.0+incompatible + github.com/openshift/api => github.com/openshift/api v0.0.0-20180801171038-322a19404e37 + github.com/openshift/build-machinery-go => github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160 github.com/pborman/uuid => github.com/pborman/uuid v1.2.0 github.com/pelletier/go-buffruneio => github.com/pelletier/go-buffruneio v0.2.0 github.com/pelletier/go-toml => github.com/pelletier/go-toml v1.2.0 diff --git a/go.sum b/go.sum index e9ac5831d..6d86b60cd 100644 --- a/go.sum +++ b/go.sum @@ -58,12 +58,8 @@ github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17 github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -96,8 +92,6 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/go-elasticsearch/v5 v5.6.1 h1:RnL2wcXepOT5SdoKMMO1j1OBX0vxHYbBtkQNL2E3xs4= github.com/elastic/go-elasticsearch/v5 v5.6.1/go.mod h1:r7uV7HidpfkYh7D8SB4lkS13TNlNy3oa5GNmTZvuVqY= github.com/elastic/go-elasticsearch/v6 v6.8.2 h1:rp5DGrd63V5c6nHLjF6QEXUpZSvs0+QM3ld7m9VhV2g= @@ -133,8 +127,6 @@ github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk= @@ -225,8 +217,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.6 h1:8p0pcgLlw2iuZVsdHdPaMUXFOA+6gDixcXbHEMzSyW8= github.com/grpc-ecosystem/grpc-gateway v1.9.6/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= @@ -242,8 +232,6 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a h1:BcF8coBl0QFVhe8vAMMlD+CV8EISiu9MGKLoj6ZEyJA= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jinzhu/gorm v1.9.2/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc= @@ -263,8 +251,6 @@ github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT github.com/keybase/go-ps v0.0.0-20161005175911-668c8856d999/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7 h1:SWlt7BoQNASbhTUD0Oy5yysI2seJ7vWuGUp///OM4TM= github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7/go.mod h1:Y2SaZf2Rzd0pXkLVhLlCiAXFCLSXAIbTKDivVgff/AM= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -282,20 +268,14 @@ github.com/kubesphere/kiali v0.15.1-0.20191210080139-edbbad1ef779 h1:52StEbBn6dR github.com/kubesphere/kiali v0.15.1-0.20191210080139-edbbad1ef779/go.mod h1:Y1EqeixoXkKkU8I+yvOfhdh21+8+etFE6wYOVT2XFdI= github.com/kubesphere/sonargo v0.0.2 h1:hsSRE3sv3mkPcUAeSABdp7rtfcNW2zzeHXzFa01CTkU= github.com/kubesphere/sonargo v0.0.2/go.mod h1:ww8n9ANlDXhX5PBZ18iaRnCgEkXN0GMml3/KZXOZ11w= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lucas-clemente/quic-go v0.11.1 h1:zasajC848Dqq/+WqfqBCkmPw+YHNe1MBts/z7y7nXf4= -github.com/lucas-clemente/quic-go v0.11.1/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/marten-seemann/qtls v0.2.3 h1:0yWJ43C62LsZt08vuQJDK1uC1czUc3FJeCLPoNAI4vA= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= @@ -305,12 +285,6 @@ github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mholt/caddy v1.0.0 h1:KI6RPGih2GFzWRPG8s9clKK28Ns4ZlVMKR/v7mxq6+c= -github.com/mholt/caddy v1.0.0/go.mod h1:PzUpQ3yGCTuEuy0KSxEeB4TZOi3zBZ8BR/zY0RBP414= -github.com/mholt/certmagic v0.5.1 h1:8Pf6Hwwlh5sbT3nwn3ovXyXWxHCEM54wvfLzTrQ+UiM= -github.com/mholt/certmagic v0.5.1/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.9 h1:OIdC9wT96RzuZMf2PfKRhFgsStHUUBZLM/lo1LqiM9E= -github.com/miekg/dns v1.1.9/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= @@ -327,10 +301,6 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= @@ -343,8 +313,10 @@ github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2i github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/openshift/api v3.9.0+incompatible h1:fJ/KsefYuZAjmrr3+5U9yZIZbTOpVkDDLDLFresAeYs= -github.com/openshift/api v3.9.0+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= +github.com/openshift/api v0.0.0-20180801171038-322a19404e37/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= +github.com/openshift/api v0.0.0-20200331152225-585af27e34fd h1:f4iPC9iCf1an7qEWpEFvp/swsM79vvBRsJ2twU4D30s= +github.com/openshift/api v0.0.0-20200331152225-585af27e34fd/go.mod h1:RKMJ5CBnljLfnej+BJ/xnOWc3kZDvJUaIAEq2oKSPtE= +github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA= @@ -492,8 +464,6 @@ gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvR gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1 h1:60g8zx1BijSVSgLTzLCW9UC4/+i1Ih9jJ1DR5Tgp9vE= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= diff --git a/pkg/api/iam/user.go b/pkg/api/iam/user.go deleted file mode 100644 index 526cf346f..000000000 --- a/pkg/api/iam/user.go +++ /dev/null @@ -1,41 +0,0 @@ -package iam - -import ( - "kubesphere.io/kubesphere/pkg/server/errors" - "time" -) - -type User struct { - Name string `json:"username"` - UID string `json:"uid"` - Email string `json:"email"` - Lang string `json:"lang,omitempty"` - Description string `json:"description"` - CreateTime time.Time `json:"createTime"` - Groups []string `json:"groups,omitempty"` - Password string `json:"password,omitempty"` -} - -func (u *User) GetName() string { - return u.Name -} - -func (u *User) GetUID() string { - return u.UID -} - -func (u *User) GetEmail() string { - return u.Email -} - -func (u *User) Validate() error { - if u.Name == "" { - return errors.New("username can not be empty") - } - - if u.Password == "" { - return errors.New("password can not be empty") - } - - return nil -} diff --git a/pkg/api/types.go b/pkg/api/types.go index 8b1266f32..42863886d 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -6,8 +6,8 @@ import ( ) type ListResult struct { - Items []interface{} `json:"items,omitempty"` - TotalItems int `json:"totalItems,omitempty"` + Items []interface{} `json:"items"` + TotalItems int `json:"totalItems"` } type ResourceQuota struct { diff --git a/pkg/apis/iam/v1alpha2/register.go b/pkg/apis/iam/v1alpha2/register.go index 373dcf121..a2abceb52 100644 --- a/pkg/apis/iam/v1alpha2/register.go +++ b/pkg/apis/iam/v1alpha2/register.go @@ -25,8 +25,9 @@ limitations under the License. package v1alpha2 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" ) var ( @@ -34,7 +35,7 @@ var ( SchemeGroupVersion = schema.GroupVersion{Group: "iam.kubesphere.io", Version: "v1alpha2"} // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) // AddToScheme is required by pkg/client/... AddToScheme = SchemeBuilder.AddToScheme @@ -44,3 +45,18 @@ var ( func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &User{}, + &Role{}, + &RoleList{}, + &RoleBinding{}, + &RoleBindingList{}, + &PolicyRule{}, + &PolicyRuleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/apis/iam/v1alpha2/user_types.go b/pkg/apis/iam/v1alpha2/types.go similarity index 53% rename from pkg/apis/iam/v1alpha2/user_types.go rename to pkg/apis/iam/v1alpha2/types.go index ee9034af7..aeb6a404e 100644 --- a/pkg/apis/iam/v1alpha2/user_types.go +++ b/pkg/apis/iam/v1alpha2/types.go @@ -20,14 +20,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// User is the Schema for the users API -// +k8s:openapi-gen=true // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +genclient:nonNamespaced +// +k8s:openapi-gen=true + +// User is the Schema for the users API // +kubebuilder:printcolumn:name="Email",type="string",JSONPath=".spec.email" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.state" // +kubebuilder:resource:categories="iam",scope="Cluster" @@ -119,7 +117,6 @@ const ( ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient:nonNamespaced // UserList contains a list of User type UserList struct { @@ -128,6 +125,112 @@ type UserList struct { Items []User `json:"items"` } -func init() { - SchemeBuilder.Register(&User{}, &UserList{}) +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// +kubebuilder:resource:categories="iam",scope="Cluster" +type Role struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Scope Scope `json:"scope"` + Rules []RuleRef `json:"rules"` +} + +type Scope struct { + Level Level `json:"level"` + Scopes []string `json:"scopes"` +} + +type Level string + +const ( + LevelGlobal Level = "Global" + LevelCluster Level = "Cluster" + LevelWorkspace Level = "Workspace" + LevelNamespace Level = "Namespace" + ScopeALL = "*" +) + +// RuleRef contains information that points to the role being used +type RuleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind"` + // Name is the name of resource being referenced + Name string `json:"name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleList contains a list of Role +type RoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Role `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// +kubebuilder:resource:categories="iam",scope="Cluster" +type PolicyRule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Level Level `json:"level"` + Rego string `json:"rego"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PolicyRuleList contains a list of PolicyRule +type PolicyRuleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PolicyRule `json:"items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBinding is the Schema for the rolebindings API +// +kubebuilder:resource:categories="iam",scope="Cluster" +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Level Level `json:"level"` + RoleRef RoleRef `json:"roleRef"` + // Subjects holds references to the users the role applies to. + // +optional + Subjects []Subject `json:"subjects,omitempty"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind"` + // Name is the name of resource being referenced + Name string `json:"name"` +} + +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind"` + // APIGroup holds the API group of the referenced subject. + APIGroup string `json:"apiGroup"` + // Name of the object being referenced. + Name string `json:"name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingList contains a list of RoleBinding +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RoleBinding `json:"items"` } diff --git a/pkg/apis/iam/v1alpha2/user_types_test.go b/pkg/apis/iam/v1alpha2/types_test.go similarity index 100% rename from pkg/apis/iam/v1alpha2/user_types_test.go rename to pkg/apis/iam/v1alpha2/types_test.go diff --git a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go index a70563680..26394de1d 100644 --- a/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/iam/v1alpha2/zz_generated.deepcopy.go @@ -21,9 +21,257 @@ limitations under the License. package v1alpha2 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRuleList) DeepCopyInto(out *PolicyRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRuleList. +func (in *PolicyRuleList) DeepCopy() *PolicyRuleList { + if in == nil { + return nil + } + out := new(PolicyRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Scope.DeepCopyInto(&out.Scope) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleRef, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.RoleRef = in.RoleRef + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleRef) DeepCopyInto(out *RoleRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. +func (in *RoleRef) DeepCopy() *RoleRef { + if in == nil { + return nil + } + out := new(RoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleRef) DeepCopyInto(out *RuleRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleRef. +func (in *RuleRef) DeepCopy() *RuleRef { + if in == nil { + return nil + } + out := new(RuleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scope) DeepCopyInto(out *Scope) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scope. +func (in *Scope) DeepCopy() *Scope { + if in == nil { + return nil + } + out := new(Scope) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *User) DeepCopyInto(out *User) { *out = *in diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 323ec1de1..91553bc32 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -193,7 +193,7 @@ func (s *APIServer) buildHandlerChain() { // authenticators are unordered authn := unionauth.New(anonymous.NewAuthenticator(), - basictoken.New(basic.NewBasicAuthenticator(im.NewFakeOperator())), + basictoken.New(basic.NewBasicAuthenticator(im.NewOperator(s.KubernetesClient.KubeSphere(), s.InformerFactory.KubeSphereSharedInformerFactory()))), bearertoken.New(jwttoken.NewTokenAuthenticator(token.NewJwtTokenIssuer(token.DefaultIssuerName, s.Config.AuthenticationOptions, s.CacheClient)))) handler = filters.WithAuthentication(handler, authn) handler = filters.WithRequestInfo(handler, requestInfoResolver) @@ -273,6 +273,7 @@ func (s *APIServer) waitForResourceSync(stopCh <-chan struct{}) error { ksGVRs := []schema.GroupVersionResource{ {Group: "tenant.kubesphere.io", Version: "v1alpha1", Resource: "workspaces"}, + {Group: "iam.kubesphere.io", Version: "v1alpha2", Resource: "users"}, {Group: "tower.kubesphere.io", Version: "v1alpha1", Resource: "agents"}, } diff --git a/pkg/apiserver/authentication/authenticators/basic/basic.go b/pkg/apiserver/authentication/authenticators/basic/basic.go index b97073a7e..434de152c 100644 --- a/pkg/apiserver/authentication/authenticators/basic/basic.go +++ b/pkg/apiserver/authentication/authenticators/basic/basic.go @@ -51,7 +51,7 @@ func (t *basicAuthenticator) AuthenticatePassword(ctx context.Context, username, return &authenticator.Response{ User: &user.DefaultInfo{ Name: providedUser.GetName(), - UID: providedUser.GetUID(), + UID: string(providedUser.GetUID()), Groups: []string{user.AllAuthenticated}, }, }, true, nil diff --git a/pkg/apiserver/authentication/token/jwt.go b/pkg/apiserver/authentication/token/jwt.go index ff2b18caf..e505f4765 100644 --- a/pkg/apiserver/authentication/token/jwt.go +++ b/pkg/apiserver/authentication/token/jwt.go @@ -21,7 +21,7 @@ package token import ( "fmt" "github.com/dgrijalva/jwt-go" - "kubesphere.io/kubesphere/pkg/api/iam" + "k8s.io/apiserver/pkg/authentication/user" authoptions "kubesphere.io/kubesphere/pkg/apiserver/authentication/options" "kubesphere.io/kubesphere/pkg/server/errors" "kubesphere.io/kubesphere/pkg/simple/client/cache" @@ -69,7 +69,7 @@ func (s *jwtTokenIssuer) Verify(tokenString string) (User, error) { return nil, err } - return &iam.User{Name: clm.Username, UID: clm.UID}, nil + return &user.DefaultInfo{Name: clm.Username, UID: clm.UID}, nil } func (s *jwtTokenIssuer) IssueTo(user User, expiresIn time.Duration) (string, error) { diff --git a/pkg/apiserver/authentication/token/jwt_test.go b/pkg/apiserver/authentication/token/jwt_test.go index dac84abb5..a1bd026c3 100644 --- a/pkg/apiserver/authentication/token/jwt_test.go +++ b/pkg/apiserver/authentication/token/jwt_test.go @@ -20,7 +20,7 @@ package token import ( "github.com/google/go-cmp/cmp" - "kubesphere.io/kubesphere/pkg/api/iam" + "k8s.io/apiserver/pkg/authentication/user" authoptions "kubesphere.io/kubesphere/pkg/apiserver/authentication/options" "kubesphere.io/kubesphere/pkg/simple/client/cache" "testing" @@ -48,7 +48,7 @@ func TestJwtTokenIssuer(t *testing.T) { } for _, testCase := range testCases { - user := &iam.User{ + user := &user.DefaultInfo{ Name: testCase.name, UID: testCase.uid, } diff --git a/pkg/apiserver/query/field.go b/pkg/apiserver/query/field.go index 996d4c2f5..e37ec7866 100644 --- a/pkg/apiserver/query/field.go +++ b/pkg/apiserver/query/field.go @@ -1,16 +1,19 @@ package query type Field string +type Value string const ( FieldName = "name" + FieldUID = "uid" FieldCreationTimeStamp = "creationTimestamp" FieldLastUpdateTimestamp = "lastUpdateTimestamp" FieldLabel = "label" + FieldAnnotation = "annotation" + FieldClusterName = "clusterName" FieldNamespace = "namespace" FieldStatus = "status" - FieldApplication = "application" - FieldOwner = "owner" + FieldOwnerReference = "ownerReference" FieldOwnerKind = "ownerKind" ) @@ -23,9 +26,12 @@ var SortableFields = []Field{ // Field contains all the query field that can be compared var ComparableFields = []Field{ FieldName, + FieldUID, + FieldLabel, + FieldAnnotation, + FieldClusterName, FieldNamespace, FieldStatus, - FieldApplication, - FieldOwner, + FieldOwnerReference, FieldOwnerKind, } diff --git a/pkg/apiserver/query/types.go b/pkg/apiserver/query/types.go index 6bc4054e6..7e101326c 100644 --- a/pkg/apiserver/query/types.go +++ b/pkg/apiserver/query/types.go @@ -3,7 +3,6 @@ package query import ( "github.com/emicklei/go-restful" "strconv" - "strings" ) const ( @@ -16,24 +15,6 @@ const ( ParameterAscending = "ascending" ) -type Comparable interface { - Compare(Comparable) int - - Contains(Comparable) bool -} - -type ComparableString string - -func (c ComparableString) Compare(comparable Comparable) int { - other := comparable.(ComparableString) - return strings.Compare(string(c), string(other)) -} - -func (c ComparableString) Contains(comparable Comparable) bool { - other := comparable.(ComparableString) - return strings.Contains(string(c), string(other)) -} - // Query represents api search terms type Query struct { Pagination *Pagination @@ -52,29 +33,30 @@ type Pagination struct { // items per page Limit int - // page number - Page int + // offset + Offset int } -var NoPagination = newPagination(-1, -1) +var NoPagination = newPagination(-1, 0) -func newPagination(limit int, page int) *Pagination { +// make sure that pagination is valid +func newPagination(limit int, offset int) *Pagination { return &Pagination{ - Limit: limit, - Page: page, + Limit: limit, + Offset: offset, } } -func (p *Pagination) IsValidPagintaion() bool { - return p.Limit >= 0 && p.Page >= 0 -} +func (p *Pagination) GetValidPagination(total int) (startIndex, endIndex int) { + if p.Limit == NoPagination.Limit { + return 0, total + } -func (p *Pagination) IsPageAvailable(total, startIndex int) bool { - return total > startIndex && p.Limit > 0 -} + if p.Limit < 0 || p.Offset < 0 { + return 0, 0 + } -func (p *Pagination) GetPaginationSettings(total int) (startIndex, endIndex int) { - startIndex = p.Limit * p.Page + startIndex = p.Limit * p.Offset endIndex = startIndex + p.Limit if endIndex > total { @@ -86,33 +68,33 @@ func (p *Pagination) GetPaginationSettings(total int) (startIndex, endIndex int) func New() *Query { return &Query{ - Pagination: &Pagination{ - Limit: -1, - Page: -1, - }, - SortBy: "", - Ascending: false, - Filters: []Filter{}, + Pagination: NoPagination, + SortBy: "", + Ascending: false, + Filters: []Filter{}, } } type Filter struct { Field Field - Value Comparable + Value Value } func ParseQueryParameter(request *restful.Request) *Query { query := New() - limit, err := strconv.ParseInt(request.QueryParameter("limit"), 10, 0) + limit, err := strconv.Atoi(request.QueryParameter("limit")) + // equivalent to undefined, use the default value if err != nil { - query.Pagination = NoPagination + limit = -1 + } + page, err := strconv.Atoi(request.QueryParameter("page")) + // equivalent to undefined, use the default value + if err != nil { + page = 1 } - page, err := strconv.ParseInt(request.QueryParameter("page"), 10, 0) - if err == nil { - query.Pagination = newPagination(int(limit), int(page-1)) - } + query.Pagination = newPagination(limit, page-1) query.SortBy = Field(defaultString(request.QueryParameter("sortBy"), FieldCreationTimeStamp)) @@ -128,13 +110,12 @@ func ParseQueryParameter(request *restful.Request) *Query { if len(f) != 0 { query.Filters = append(query.Filters, Filter{ Field: field, - Value: ComparableString(f), + Value: Value(f), }) } } return query - } func defaultString(value, defaultValue string) string { diff --git a/pkg/apiserver/query/types_test.go b/pkg/apiserver/query/types_test.go index a62fb0b61..e473da2fd 100644 --- a/pkg/apiserver/query/types_test.go +++ b/pkg/apiserver/query/types_test.go @@ -16,7 +16,7 @@ func TestParseQueryParameter(t *testing.T) { }{ { "test normal case", - "name=foo&status=Running&application=book&page=1&limit=10&ascending=true", + "label=app.kubernetes.io/name:book&name=foo&status=Running&page=1&limit=10&ascending=true", &Query{ Pagination: newPagination(10, 0), SortBy: FieldCreationTimeStamp, @@ -24,15 +24,15 @@ func TestParseQueryParameter(t *testing.T) { Filters: []Filter{ { FieldName, - ComparableString("foo"), + Value("foo"), + }, + { + FieldLabel, + Value("app.kubernetes.io/name:book"), }, { FieldStatus, - ComparableString("Running"), - }, - { - FieldApplication, - ComparableString("book"), + Value("Running"), }, }, }, @@ -41,13 +41,10 @@ func TestParseQueryParameter(t *testing.T) { "test bad case", "xxxx=xxxx&dsfsw=xxxx&page=abc&limit=add&ascending=ssss", &Query{ - Pagination: &Pagination{ - Limit: -1, - Page: -1, - }, - SortBy: FieldCreationTimeStamp, - Ascending: false, - Filters: []Filter{}, + Pagination: NoPagination, + SortBy: FieldCreationTimeStamp, + Ascending: false, + Filters: []Filter{}, }, }, } diff --git a/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go b/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go index 6058a738b..28cc58b61 100644 --- a/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go +++ b/pkg/controller/clusterrolebinding/clusterrolebinding_controller.go @@ -169,7 +169,6 @@ func (r *ReconcileClusterRoleBinding) updateRoleBindings(clusterRoleBinding *rba if clusterRoleBinding.Name == getWorkspaceViewerRoleBindingName(workspaceName) { found := &rbac.RoleBinding{} - viewerBinding := &rbac.RoleBinding{} viewerBinding.Name = "viewer" viewerBinding.Namespace = namespace.Name diff --git a/pkg/controller/user/user_webhook.go b/pkg/controller/user/user_webhook.go new file mode 100644 index 000000000..036516be5 --- /dev/null +++ b/pkg/controller/user/user_webhook.go @@ -0,0 +1,102 @@ +/* + * + * Copyright 2020 The KubeSphere Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package user + +import ( + "context" + "encoding/json" + "fmt" + "golang.org/x/crypto/bcrypt" + "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + "net/http" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "strconv" +) + +type EmailValidator struct { + Client client.Client + decoder *admission.Decoder +} + +type PasswordCipher struct { + Client client.Client + decoder *admission.Decoder +} + +func (a *EmailValidator) Handle(ctx context.Context, req admission.Request) admission.Response { + user := &v1alpha2.User{} + err := a.decoder.Decode(req, user) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + email := user.Spec.Email + + allUsers := v1alpha2.UserList{} + + a.Client.List(ctx, &v1alpha2.UserList{}, &client.ListOptions{}) + + found := emailAlreadyExist(allUsers, email) + + if !found { + return admission.Denied(fmt.Sprintf("email %s must be unique", email)) + } + + return admission.Allowed("") +} + +func emailAlreadyExist(users v1alpha2.UserList, email string) bool { + for _, user := range users.Items { + if user.Spec.Email == email { + return true + } + } + return false +} + +func (a *PasswordCipher) Handle(ctx context.Context, req admission.Request) admission.Response { + user := &v1alpha2.User{} + err := a.decoder.Decode(req, user) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + encrypted, err := strconv.ParseBool(user.Annotations["iam.kubesphere.io/password-encrypted"]) + + if err != nil || !encrypted { + password, err := hashPassword(user.Spec.EncryptedPassword) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + user.Spec.EncryptedPassword = password + } + + marshaledUser, err := json.Marshal(user) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + + return admission.PatchResponseFromRaw(req.Object.Raw, marshaledUser) +} + +func hashPassword(password string) (string, error) { + bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost) + return string(bytes), err +} diff --git a/pkg/kapis/iam/v1alpha2/register.go b/pkg/kapis/iam/v1alpha2/register.go index 2a082d418..9ea21a33c 100644 --- a/pkg/kapis/iam/v1alpha2/register.go +++ b/pkg/kapis/iam/v1alpha2/register.go @@ -44,7 +44,7 @@ func AddToContainer(c *restful.Container, k8sClient k8s.Client, factory informer handler := newIAMHandler(k8sClient, factory, ldapClient, cacheClient, options) // implemented by create CRD object. - //ws.Route(ws.POST("/users")) + //ws.Route(ws.POST("/users").To(handler.CreateUser)) //ws.Route(ws.DELETE("/users/{user}")) //ws.Route(ws.PUT("/users/{user}")) //ws.Route(ws.GET("/users/{user}")) diff --git a/pkg/kapis/resources/v1alpha2/register.go b/pkg/kapis/resources/v1alpha2/register.go index ab91af3f0..a3e22a835 100644 --- a/pkg/kapis/resources/v1alpha2/register.go +++ b/pkg/kapis/resources/v1alpha2/register.go @@ -49,6 +49,7 @@ func AddToContainer(c *restful.Container, client kubernetes.Interface, factory i webservice.Route(webservice.GET("/namespaces/{namespace}/{resources}"). To(handler.handleListNamespaceResources). + Deprecate(). Metadata(restfulspec.KeyOpenAPITags, []string{constants.NamespaceResourcesTag}). Doc("Namespace level resource query"). Param(webservice.PathParameter("namespace", "the name of the project")). @@ -66,6 +67,7 @@ func AddToContainer(c *restful.Container, client kubernetes.Interface, factory i webservice.Route(webservice.GET("/{resources}"). To(handler.handleListNamespaceResources). + Deprecate(). Returns(http.StatusOK, api.StatusOK, models.PageableResponse{}). Metadata(restfulspec.KeyOpenAPITags, []string{constants.ClusterResourcesTag}). Doc("Cluster level resource query"). diff --git a/pkg/kapis/resources/v1alpha3/handler.go b/pkg/kapis/resources/v1alpha3/handler.go index 71883a012..5b59c5a1f 100644 --- a/pkg/kapis/resources/v1alpha3/handler.go +++ b/pkg/kapis/resources/v1alpha3/handler.go @@ -11,7 +11,7 @@ import ( ) type Handler struct { - namespacedResourceGetter *resource.NamespacedResourceGetter + namespacedResourceGetter *resource.ResourceGetter componentsGetter components.ComponentsGetter } @@ -22,22 +22,8 @@ func New(factory informers.InformerFactory) *Handler { } } -func (h Handler) handleGetNamespacedResource(request *restful.Request, response *restful.Response) { - resource := request.PathParameter("resources") - namespace := request.PathParameter("namespace") - name := request.PathParameter("name") - - result, err := h.namespacedResourceGetter.Get(resource, namespace, name) - if err != nil { - api.HandleInternalError(response, nil, err) - return - } - - response.WriteHeaderAndEntity(http.StatusOK, result) -} - -// handleListNamedResource retrieves namespaced scope resources -func (h Handler) handleListNamespacedResource(request *restful.Request, response *restful.Response) { +// handleListResources retrieves resources +func (h Handler) handleListResources(request *restful.Request, response *restful.Response) { query := query.ParseQueryParameter(request) resource := request.PathParameter("resources") namespace := request.PathParameter("namespace") @@ -48,7 +34,7 @@ func (h Handler) handleListNamespacedResource(request *restful.Request, response return } - response.WriteHeaderAndEntity(http.StatusOK, result) + response.WriteEntity(result) } func (h Handler) handleGetComponentStatus(request *restful.Request, response *restful.Response) { diff --git a/pkg/kapis/resources/v1alpha3/register.go b/pkg/kapis/resources/v1alpha3/register.go index 7d80e8792..f78945064 100644 --- a/pkg/kapis/resources/v1alpha3/register.go +++ b/pkg/kapis/resources/v1alpha3/register.go @@ -45,14 +45,26 @@ func AddToContainer(c *restful.Container, informerFactory informers.InformerFact webservice := runtime.NewWebService(GroupVersion) handler := New(informerFactory) + webservice.Route(webservice.GET("/{resources}"). + To(handler.handleListResources). + Metadata(restfulspec.KeyOpenAPITags, []string{tagNamespacedResource}). + Doc("Cluster level resource query"). + Param(webservice.PathParameter("resources", "namespace level resource type, e.g. pods,jobs,configmaps,services.")). + Param(webservice.QueryParameter(query.ParameterName, "name used to do filtering").Required(false)). + Param(webservice.QueryParameter(query.ParameterPage, "page").Required(false).DataFormat("page=%d").DefaultValue("page=1")). + Param(webservice.QueryParameter(query.ParameterLimit, "limit").Required(false)). + Param(webservice.QueryParameter(query.ParameterAscending, "sort parameters, e.g. reverse=true").Required(false).DefaultValue("ascending=false")). + Param(webservice.QueryParameter(query.ParameterOrderBy, "sort parameters, e.g. orderBy=createTime")). + Returns(http.StatusOK, ok, api.ListResult{})) + webservice.Route(webservice.GET("/namespaces/{namespace}/{resources}"). - To(handler.handleGetNamespacedResource). + To(handler.handleListResources). Metadata(restfulspec.KeyOpenAPITags, []string{tagNamespacedResource}). Doc("Namespace level resource query"). Param(webservice.PathParameter("namespace", "the name of the project")). Param(webservice.PathParameter("resources", "namespace level resource type, e.g. pods,jobs,configmaps,services.")). Param(webservice.QueryParameter(query.ParameterName, "name used to do filtering").Required(false)). - Param(webservice.QueryParameter(query.ParameterPage, "page").Required(false).DataFormat("page=%d").DefaultValue("page=0")). + Param(webservice.QueryParameter(query.ParameterPage, "page").Required(false).DataFormat("page=%d").DefaultValue("page=1")). Param(webservice.QueryParameter(query.ParameterLimit, "limit").Required(false)). Param(webservice.QueryParameter(query.ParameterAscending, "sort parameters, e.g. reverse=true").Required(false).DefaultValue("ascending=false")). Param(webservice.QueryParameter(query.ParameterOrderBy, "sort parameters, e.g. orderBy=createTime")). diff --git a/pkg/models/iam/im/im.go b/pkg/models/iam/im/im.go index e9061c67e..66f3b6228 100644 --- a/pkg/models/iam/im/im.go +++ b/pkg/models/iam/im/im.go @@ -19,76 +19,20 @@ package im import ( "github.com/pkg/errors" - "kubesphere.io/kubesphere/pkg/api/iam" - "kubesphere.io/kubesphere/pkg/simple/client/ldap" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" ) type IdentityManagementInterface interface { - CreateUser(user *iam.User) (*iam.User, error) + CreateUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error) DeleteUser(username string) error - ModifyUser(user *iam.User) (*iam.User, error) - DescribeUser(username string) (*iam.User, error) - Authenticate(username, password string) (*iam.User, error) -} - -type imOperator struct { - ldapClient ldap.Interface + ModifyUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error) + DescribeUser(username string) (*iamv1alpha2.User, error) + Authenticate(username, password string) (*iamv1alpha2.User, error) } var ( - AuthRateLimitExceeded = errors.New("user auth rate limit exceeded") - UserAlreadyExists = errors.New("user already exists") - UserNotExists = errors.New("user not exists") + AuthRateLimitExceeded = errors.New("user auth rate limit exceeded") + AuthFailedIncorrectPassword = errors.New("incorrect password") + UserAlreadyExists = errors.New("user already exists") + UserNotExists = errors.New("user not exists") ) - -func NewLDAPOperator(ldapClient ldap.Interface) IdentityManagementInterface { - return &imOperator{ - ldapClient: ldapClient, - } - -} - -func (im *imOperator) ModifyUser(user *iam.User) (*iam.User, error) { - - err := im.ldapClient.Update(user) - - if err != nil { - return nil, err - } - - return im.ldapClient.Get(user.Name) -} - -func (im *imOperator) Authenticate(username, password string) (*iam.User, error) { - - user, err := im.ldapClient.Get(username) - - if err != nil { - return nil, err - } - - err = im.ldapClient.Authenticate(user.Name, password) - if err != nil { - return nil, err - } - - return user, nil -} - -func (im *imOperator) DescribeUser(username string) (*iam.User, error) { - return im.ldapClient.Get(username) -} - -func (im *imOperator) DeleteUser(username string) error { - return im.ldapClient.Delete(username) -} - -func (im *imOperator) CreateUser(user *iam.User) (*iam.User, error) { - err := im.ldapClient.Create(user) - - if err != nil { - return nil, err - } - - return user, nil -} diff --git a/pkg/models/iam/im/im_operator.go b/pkg/models/iam/im/im_operator.go new file mode 100644 index 000000000..8bf7f28ec --- /dev/null +++ b/pkg/models/iam/im/im_operator.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2020 The KubeSphere Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package im + +import ( + "golang.org/x/crypto/bcrypt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + kubesphereclient "kubesphere.io/kubesphere/pkg/client/clientset/versioned" + informers "kubesphere.io/kubesphere/pkg/client/informers/externalversions" +) + +func NewOperator(ksClient kubesphereclient.Interface, informer informers.SharedInformerFactory) IdentityManagementInterface { + + return &defaultIMOperator{ + ksClient: ksClient, + informer: informer, + } + +} + +type defaultIMOperator struct { + ksClient kubesphereclient.Interface + informer informers.SharedInformerFactory +} + +func (im *defaultIMOperator) ModifyUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error) { + return im.ksClient.IamV1alpha2().Users().Update(user) +} + +func (im *defaultIMOperator) Authenticate(username, password string) (*iamv1alpha2.User, error) { + + user, err := im.ksClient.IamV1alpha2().Users().Get(username, metav1.GetOptions{}) + + if err != nil { + return nil, err + } + if checkPasswordHash(password, user.Spec.EncryptedPassword) { + return user, nil + } + return nil, AuthFailedIncorrectPassword +} + +func checkPasswordHash(password, hash string) bool { + err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) + return err == nil +} + +func (im *defaultIMOperator) DescribeUser(username string) (*iamv1alpha2.User, error) { + user, err := im.ksClient.IamV1alpha2().Users().Get(username, metav1.GetOptions{}) + + if err != nil { + return nil, err + } + return user, nil +} + +func (im *defaultIMOperator) DeleteUser(username string) error { + return im.ksClient.IamV1alpha2().Users().Delete(username, metav1.NewDeleteOptions(0)) +} + +func (im *defaultIMOperator) CreateUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error) { + user, err := im.ksClient.IamV1alpha2().Users().Create(user) + if err != nil { + return nil, err + } + + return user, nil +} diff --git a/pkg/models/iam/im/im_test.go b/pkg/models/iam/im/im_test.go index 2e3a3eccc..a23dd9370 100644 --- a/pkg/models/iam/im/im_test.go +++ b/pkg/models/iam/im/im_test.go @@ -17,3 +17,25 @@ */ package im + +import ( + "golang.org/x/crypto/bcrypt" + "testing" +) + +func TestEncryptPassword(t *testing.T) { + password := "P@88w0rd" + encryptedPassword, err := hashPassword(password) + if err != nil { + t.Fatal(err) + } + if !checkPasswordHash(password, encryptedPassword) { + t.Fatal(err) + } + t.Log(encryptedPassword) +} + +func hashPassword(password string) (string, error) { + bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.MinCost) + return string(bytes), err +} diff --git a/pkg/models/iam/im/ldap_operator.go b/pkg/models/iam/im/ldap_operator.go new file mode 100644 index 000000000..10e40250e --- /dev/null +++ b/pkg/models/iam/im/ldap_operator.go @@ -0,0 +1,80 @@ +/* + * + * Copyright 2020 The KubeSphere Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package im + +import ( + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" + "kubesphere.io/kubesphere/pkg/simple/client/ldap" +) + +type ldapOperator struct { + ldapClient ldap.Interface +} + +func NewLDAPOperator(ldapClient ldap.Interface) IdentityManagementInterface { + return &ldapOperator{ + ldapClient: ldapClient, + } + +} + +func (im *ldapOperator) ModifyUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error) { + + err := im.ldapClient.Update(user) + + if err != nil { + return nil, err + } + + return im.ldapClient.Get(user.Name) +} + +func (im *ldapOperator) Authenticate(username, password string) (*iamv1alpha2.User, error) { + + user, err := im.ldapClient.Get(username) + + if err != nil { + return nil, err + } + + err = im.ldapClient.Authenticate(user.Name, password) + if err != nil { + return nil, err + } + + return user, nil +} + +func (im *ldapOperator) DescribeUser(username string) (*iamv1alpha2.User, error) { + return im.ldapClient.Get(username) +} + +func (im *ldapOperator) DeleteUser(username string) error { + return im.ldapClient.Delete(username) +} + +func (im *ldapOperator) CreateUser(user *iamv1alpha2.User) (*iamv1alpha2.User, error) { + err := im.ldapClient.Create(user) + + if err != nil { + return nil, err + } + + return user, nil +} diff --git a/pkg/models/resources/v1alpha3/deployment/deployments.go b/pkg/models/resources/v1alpha3/deployment/deployments.go index 6068b8664..e23bb4c4a 100644 --- a/pkg/models/resources/v1alpha3/deployment/deployments.go +++ b/pkg/models/resources/v1alpha3/deployment/deployments.go @@ -32,9 +32,6 @@ import ( ) const ( - applicationLabel = "app.kubernetes.io/name" - ReleaseLabel = "relase" - statusStopped = "stopped" statusRunning = "running" statusUpdating = "updating" @@ -80,14 +77,10 @@ func (d *deploymentsGetter) compare(left runtime.Object, right runtime.Object, f } switch field { - case query.FieldCreationTimeStamp: - return leftDeployment.CreationTimestamp.After(rightDeployment.CreationTimestamp.Time) case query.FieldLastUpdateTimestamp: return lastUpdateTime(leftDeployment).After(lastUpdateTime(rightDeployment)) default: - fallthrough - case query.FieldName: - return strings.Compare(leftDeployment.Name, rightDeployment.Name) > 0 + return v1alpha3.DefaultObjectMetaCompare(leftDeployment.ObjectMeta, rightDeployment.ObjectMeta, field) } } @@ -98,18 +91,12 @@ func (d *deploymentsGetter) filter(object runtime.Object, filter query.Filter) b } switch filter.Field { - case query.FieldName: - return query.ComparableString(deployment.Name).Contains(filter.Value) - case query.FieldApplication: - if app, ok := deployment.Labels[applicationLabel]; ok { - return query.ComparableString(app).Contains(filter.Value) - } + case query.FieldStatus: - return filter.Value.Compare(query.ComparableString(deploymentStatus(deployment.Status))) == 0 + return strings.Compare(deploymentStatus(deployment.Status), string(filter.Value)) == 0 default: - return false + return v1alpha3.DefaultObjectMetaFilter(deployment.ObjectMeta, filter) } - return false } func deploymentStatus(status v1.DeploymentStatus) string { diff --git a/pkg/models/resources/v1alpha3/deployment/deployments_test.go b/pkg/models/resources/v1alpha3/deployment/deployments_test.go index 2091d75cd..db99b3776 100644 --- a/pkg/models/resources/v1alpha3/deployment/deployments_test.go +++ b/pkg/models/resources/v1alpha3/deployment/deployments_test.go @@ -94,15 +94,15 @@ func TestListDeployments(t *testing.T) { }, &query.Query{ Pagination: &query.Pagination{ - Limit: 1, - Page: 1, + Limit: 1, + Offset: 1, }, SortBy: query.FieldName, Ascending: false, Filters: []query.Filter{ { Field: query.FieldName, - Value: query.ComparableString("foo"), + Value: query.Value("foo"), }, }, }, diff --git a/pkg/models/resources/v1alpha3/interface.go b/pkg/models/resources/v1alpha3/interface.go index 871a50cd2..e5fd4c4aa 100644 --- a/pkg/models/resources/v1alpha3/interface.go +++ b/pkg/models/resources/v1alpha3/interface.go @@ -1,10 +1,12 @@ package v1alpha3 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "kubesphere.io/kubesphere/pkg/api" "kubesphere.io/kubesphere/pkg/apiserver/query" "sort" + "strings" ) type Interface interface { @@ -36,14 +38,6 @@ func DefaultList(objects []runtime.Object, query *query.Query, compareFunc Compa } } - start, end := query.Pagination.GetPaginationSettings(len(filtered)) - if !query.Pagination.IsPageAvailable(len(filtered), start) { - return &api.ListResult{ - Items: nil, - TotalItems: 0, - } - } - // sort by sortBy field sort.Slice(filtered, func(i, j int) bool { if !query.Ascending { @@ -52,14 +46,88 @@ func DefaultList(objects []runtime.Object, query *query.Query, compareFunc Compa return compareFunc(filtered[i], filtered[j], query.SortBy) }) + start, end := query.Pagination.GetValidPagination(len(filtered)) + return &api.ListResult{ Items: objectsToInterfaces(filtered[start:end]), TotalItems: len(filtered), } } +func DefaultObjectMetaCompare(left, right metav1.ObjectMeta, sortBy query.Field) bool { + switch sortBy { + // ?sortBy=name + case query.FieldName: + return strings.Compare(left.Name, right.Name) > 0 + // ?sortBy=creationTimestamp + case query.FieldCreationTimeStamp: + return left.CreationTimestamp.After(right.CreationTimestamp.Time) + default: + return false + } +} + +// Default metadata filter +func DefaultObjectMetaFilter(item metav1.ObjectMeta, filter query.Filter) bool { + switch filter.Field { + // /namespaces?page=1&limit=10&name=default + case query.FieldName: + return strings.Contains(item.Name, string(filter.Value)) + // /namespaces?page=1&limit=10&uid=a8a8d6cf-f6a5-4fea-9c1b-e57610115706 + case query.FieldUID: + return strings.Compare(string(item.UID), string(filter.Value)) == 0 + // /deployments?page=1&limit=10&namespace=kubesphere-system + case query.FieldNamespace: + return strings.Compare(item.Namespace, string(filter.Value)) == 0 + // /namespaces?page=1&limit=10&ownerReference=a8a8d6cf-f6a5-4fea-9c1b-e57610115706 + case query.FieldOwnerReference: + for _, ownerReference := range item.OwnerReferences { + if strings.Compare(string(ownerReference.UID), string(filter.Value)) == 0 { + return true + } + } + return false + // /namespaces?page=1&limit=10&ownerKind=Workspace + case query.FieldOwnerKind: + for _, ownerReference := range item.OwnerReferences { + if strings.Compare(ownerReference.Kind, string(filter.Value)) == 0 { + return true + } + } + return false + // /namespaces?page=1&limit=10&annotation=openpitrix_runtime + case query.FieldAnnotation: + return containsAnyValue(item.Annotations, string(filter.Value)) + // /namespaces?page=1&limit=10&label=kubesphere.io/workspace:system-workspace + case query.FieldLabel: + return containsAnyValue(item.Labels, string(filter.Value)) + case query.FieldClusterName: + return strings.Compare(item.ClusterName, string(filter.Value)) == 0 + default: + return false + } +} + +// Filter format ,if the key is defined, the key must match exactly, value match according to strings.Contains. +func containsAnyValue(keyValues map[string]string, filter string) bool { + fields := strings.SplitN(filter, ":", 2) + var keyFilter, valueFilter string + if len(fields) == 2 { + keyFilter = fields[0] + valueFilter = fields[1] + } else { + valueFilter = fields[0] + } + for key, value := range keyValues { + if (key == keyFilter || keyFilter == "") && strings.Contains(value, valueFilter) { + return true + } + } + return false +} + func objectsToInterfaces(objs []runtime.Object) []interface{} { - var res []interface{} + res := make([]interface{}, 0) for _, obj := range objs { res = append(res, obj) } diff --git a/pkg/models/resources/v1alpha3/namespace/namespaces.go b/pkg/models/resources/v1alpha3/namespace/namespaces.go index d3b4821ee..e70b11b10 100644 --- a/pkg/models/resources/v1alpha3/namespace/namespaces.go +++ b/pkg/models/resources/v1alpha3/namespace/namespaces.go @@ -15,7 +15,7 @@ type namespaceGetter struct { informers informers.SharedInformerFactory } -func NewNamespaceGetter(informers informers.SharedInformerFactory) v1alpha3.Interface { +func New(informers informers.SharedInformerFactory) v1alpha3.Interface { return &namespaceGetter{informers: informers} } @@ -42,14 +42,11 @@ func (n namespaceGetter) filter(item runtime.Object, filter query.Filter) bool { if !ok { return false } - switch filter.Field { - case query.FieldName: - return query.ComparableString(namespace.Name).Contains(filter.Value) case query.FieldStatus: - return query.ComparableString(namespace.Status.Phase).Compare(filter.Value) == 0 + return strings.Compare(string(namespace.Status.Phase), string(filter.Value)) == 0 default: - return false + return v1alpha3.DefaultObjectMetaFilter(namespace.ObjectMeta, filter) } } @@ -63,13 +60,5 @@ func (n namespaceGetter) compare(left runtime.Object, right runtime.Object, fiel if !ok { return true } - - switch field { - case query.FieldName: - return strings.Compare(leftNs.Name, rightNs.Name) > 0 - case query.FieldCreationTimeStamp: - return leftNs.CreationTimestamp.After(rightNs.CreationTimestamp.Time) - default: - return false - } + return v1alpha3.DefaultObjectMetaCompare(leftNs.ObjectMeta, rightNs.ObjectMeta, field) } diff --git a/pkg/models/resources/v1alpha3/resource/resource.go b/pkg/models/resources/v1alpha3/resource/resource.go index 110fa1d6c..d846cb8e4 100644 --- a/pkg/models/resources/v1alpha3/resource/resource.go +++ b/pkg/models/resources/v1alpha3/resource/resource.go @@ -8,27 +8,29 @@ import ( "kubesphere.io/kubesphere/pkg/informers" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3" "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/deployment" + "kubesphere.io/kubesphere/pkg/models/resources/v1alpha3/namespace" ) var ErrResourceNotSupported = errors.New("resource is not supported") -type NamespacedResourceGetter struct { +type ResourceGetter struct { getters map[schema.GroupVersionResource]v1alpha3.Interface } -func New(factory informers.InformerFactory) *NamespacedResourceGetter { +func New(factory informers.InformerFactory) *ResourceGetter { getters := make(map[schema.GroupVersionResource]v1alpha3.Interface) getters[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}] = deployment.New(factory.KubernetesSharedInformerFactory()) + getters[schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}] = namespace.New(factory.KubernetesSharedInformerFactory()) - return &NamespacedResourceGetter{ + return &ResourceGetter{ getters: getters, } } // tryResource will retrieve a getter with resource name, it doesn't guarantee find resource with correct group version // need to refactor this use schema.GroupVersionResource -func (r *NamespacedResourceGetter) tryResource(resource string) v1alpha3.Interface { +func (r *ResourceGetter) tryResource(resource string) v1alpha3.Interface { for k, v := range r.getters { if k.Resource == resource { return v @@ -38,21 +40,18 @@ func (r *NamespacedResourceGetter) tryResource(resource string) v1alpha3.Interfa return nil } -func (r *NamespacedResourceGetter) Get(resource, namespace, name string) (interface{}, error) { +func (r *ResourceGetter) Get(resource, namespace, name string) (interface{}, error) { getter := r.tryResource(resource) if getter == nil { return nil, ErrResourceNotSupported } - return getter.Get(namespace, name) } -func (r *NamespacedResourceGetter) List(resource, namespace string, query *query.Query) (*api.ListResult, error) { +func (r *ResourceGetter) List(resource, namespace string, query *query.Query) (*api.ListResult, error) { getter := r.tryResource(resource) if getter == nil { return nil, ErrResourceNotSupported } - return getter.List(namespace, query) - } diff --git a/pkg/models/resources/v1alpha3/resource/resource_test.go b/pkg/models/resources/v1alpha3/resource/resource_test.go new file mode 100644 index 000000000..7526e4e0f --- /dev/null +++ b/pkg/models/resources/v1alpha3/resource/resource_test.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2020 The KubeSphere Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * / + */ + +package resource + +import ( + "github.com/google/go-cmp/cmp" + fakeapp "github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake" + fakeistio "istio.io/client-go/pkg/clientset/versioned/fake" + v1 "k8s.io/api/core/v1" + corev1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" + "kubesphere.io/kubesphere/pkg/api" + "kubesphere.io/kubesphere/pkg/apiserver/query" + fakeks "kubesphere.io/kubesphere/pkg/client/clientset/versioned/fake" + "kubesphere.io/kubesphere/pkg/informers" + "testing" +) + +func TestResourceGetter(t *testing.T) { + + namespaces := make([]interface{}, 0) + defaultNamespace := &v1.Namespace{ + ObjectMeta: corev1.ObjectMeta{ + Name: "default", + Labels: map[string]string{"kubesphere.io/workspace": "system-workspace"}, + }, + } + kubesphereNamespace := &v1.Namespace{ + ObjectMeta: corev1.ObjectMeta{ + Name: "kubesphere-system", + Labels: map[string]string{"kubesphere.io/workspace": "system-workspace"}, + }, + } + + namespaces = append(namespaces, defaultNamespace, kubesphereNamespace) + + ksClient := fakeks.NewSimpleClientset() + k8sClient := fakek8s.NewSimpleClientset(defaultNamespace, kubesphereNamespace) + istioClient := fakeistio.NewSimpleClientset() + appClient := fakeapp.NewSimpleClientset() + fakeInformerFactory := informers.NewInformerFactories(k8sClient, ksClient, istioClient, appClient) + + k8sInformerFactory := fakeInformerFactory.KubernetesSharedInformerFactory() + for _, namespace := range namespaces { + err := k8sInformerFactory.Core().V1().Namespaces().Informer().GetIndexer().Add(namespace) + if err != nil { + t.Fatal(err) + } + } + + resource := New(fakeInformerFactory) + + tests := []struct { + Name string + Resource string + Namespace string + Query *query.Query + ExpectError error + ExpectResponse *api.ListResult + }{ + { + Name: "normal case", + Resource: "namespaces", + Namespace: "", + Query: &query.Query{ + Pagination: &query.Pagination{ + Limit: 10, + Offset: 0, + }, + SortBy: query.FieldName, + Ascending: false, + Filters: []query.Filter{}, + }, + ExpectError: nil, + ExpectResponse: &api.ListResult{ + Items: namespaces, + TotalItems: 2, + }, + }, + } + + for _, test := range tests { + + result, err := resource.List(test.Resource, test.Namespace, test.Query) + + t.Logf("%+v", result) + if err != test.ExpectError { + t.Errorf("expected error: %s, got: %s", test.ExpectError, err) + } + if diff := cmp.Diff(test.ExpectResponse, result); diff != "" { + t.Errorf(diff) + } + } + +} diff --git a/pkg/models/tenant/workspaces.go b/pkg/models/tenant/workspaces.go index f56103177..346c5ec00 100644 --- a/pkg/models/tenant/workspaces.go +++ b/pkg/models/tenant/workspaces.go @@ -22,6 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" "kubesphere.io/kubesphere/pkg/apis/tenant/v1alpha1" "kubesphere.io/kubesphere/pkg/client/informers/externalversions" "kubesphere.io/kubesphere/pkg/constants" @@ -37,12 +38,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - - iamapi "kubesphere.io/kubesphere/pkg/api/iam" ) type InWorkspaceUser struct { - *iamapi.User + *iamv1alpha2.User WorkspaceRole string `json:"workspaceRole"` } diff --git a/pkg/simple/client/ldap/interface.go b/pkg/simple/client/ldap/interface.go index 1f780d012..8406263d1 100644 --- a/pkg/simple/client/ldap/interface.go +++ b/pkg/simple/client/ldap/interface.go @@ -1,20 +1,22 @@ package ldap -import "kubesphere.io/kubesphere/pkg/api/iam" +import ( + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" +) // Interface defines CRUD behaviors of manipulating users type Interface interface { // Create create a new user in ldap - Create(user *iam.User) error + Create(user *iamv1alpha2.User) error // Update updates a user information, return error if user not exists - Update(user *iam.User) error + Update(user *iamv1alpha2.User) error // Delete deletes a user from ldap, return nil if user not exists Delete(name string) error // Get gets a user by its username from ldap, return ErrUserNotExists if user not exists - Get(name string) (*iam.User, error) + Get(name string) (*iamv1alpha2.User, error) // Authenticate checks if (name, password) is valid, return ErrInvalidCredentials if not Authenticate(name string, password string) error diff --git a/pkg/simple/client/ldap/ldap.go b/pkg/simple/client/ldap/ldap.go index bb88aced7..4611592ef 100644 --- a/pkg/simple/client/ldap/ldap.go +++ b/pkg/simple/client/ldap/ldap.go @@ -21,7 +21,8 @@ import ( "fmt" "github.com/go-ldap/ldap" "github.com/google/uuid" - "kubesphere.io/kubesphere/pkg/api/iam" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" "kubesphere.io/kubesphere/pkg/server/errors" "sync" "time" @@ -181,7 +182,7 @@ func (l *ldapInterfaceImpl) filterForUsername(username string) string { return fmt.Sprintf("(&(objectClass=inetOrgPerson)(|(uid=%s)(mail=%s)))", username, username) } -func (l *ldapInterfaceImpl) Get(name string) (*iam.User, error) { +func (l *ldapInterfaceImpl) Get(name string) (*iamv1alpha2.User, error) { conn, err := l.newConn() if err != nil { return nil, err @@ -215,20 +216,24 @@ func (l *ldapInterfaceImpl) Get(name string) (*iam.User, error) { userEntry := searchResults.Entries[0] - user := &iam.User{ - Name: userEntry.GetAttributeValue(ldapAttributeUserID), - Email: userEntry.GetAttributeValue(ldapAttributeMail), - Lang: userEntry.GetAttributeValue(ldapAttributePreferredLanguage), - Description: userEntry.GetAttributeValue(ldapAttributeDescription), + user := &iamv1alpha2.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: userEntry.GetAttributeValue(ldapAttributeUserID), + }, + Spec: iamv1alpha2.UserSpec{ + Email: userEntry.GetAttributeValue(ldapAttributeMail), + Lang: userEntry.GetAttributeValue(ldapAttributePreferredLanguage), + Description: userEntry.GetAttributeValue(ldapAttributeDescription), + }, } createTimestamp, _ := time.Parse(ldapAttributeCreateTimestampLayout, userEntry.GetAttributeValue(ldapAttributeCreateTimestamp)) - user.CreateTime = createTimestamp + user.ObjectMeta.CreationTimestamp.Time = createTimestamp return user, nil } -func (l *ldapInterfaceImpl) Create(user *iam.User) error { +func (l *ldapInterfaceImpl) Create(user *iamv1alpha2.User) error { if _, err := l.Get(user.Name); err != nil { return ErrUserAlreadyExisted } @@ -266,19 +271,19 @@ func (l *ldapInterfaceImpl) Create(user *iam.User) error { }, { Type: ldapAttributeMail, - Vals: []string{user.Email}, + Vals: []string{user.Spec.Email}, }, { Type: ldapAttributeUserPassword, - Vals: []string{user.Password}, + Vals: []string{user.Spec.EncryptedPassword}, }, { Type: ldapAttributePreferredLanguage, - Vals: []string{user.Lang}, + Vals: []string{user.Spec.Lang}, }, { Type: ldapAttributeDescription, - Vals: []string{user.Description}, + Vals: []string{user.Spec.Description}, }, }, } @@ -314,7 +319,7 @@ func (l *ldapInterfaceImpl) Delete(name string) error { return conn.Del(deleteRequest) } -func (l *ldapInterfaceImpl) Update(newUser *iam.User) error { +func (l *ldapInterfaceImpl) Update(newUser *iamv1alpha2.User) error { conn, err := l.newConn() if err != nil { return err @@ -331,16 +336,16 @@ func (l *ldapInterfaceImpl) Update(newUser *iam.User) error { DN: l.dnForUsername(newUser.Name), } - if newUser.Description != "" { - modifyRequest.Replace(ldapAttributeDescription, []string{newUser.Description}) + if newUser.Spec.Description != "" { + modifyRequest.Replace(ldapAttributeDescription, []string{newUser.Spec.Description}) } - if newUser.Lang != "" { - modifyRequest.Replace(ldapAttributePreferredLanguage, []string{newUser.Lang}) + if newUser.Spec.Lang != "" { + modifyRequest.Replace(ldapAttributePreferredLanguage, []string{newUser.Spec.Lang}) } - if newUser.Password != "" { - modifyRequest.Replace(ldapAttributeUserPassword, []string{newUser.Password}) + if newUser.Spec.EncryptedPassword != "" { + modifyRequest.Replace(ldapAttributeUserPassword, []string{newUser.Spec.EncryptedPassword}) } return conn.Modify(modifyRequest) diff --git a/pkg/simple/client/ldap/simple_ldap.go b/pkg/simple/client/ldap/simple_ldap.go index 2dc8fe8bf..c2550fe21 100644 --- a/pkg/simple/client/ldap/simple_ldap.go +++ b/pkg/simple/client/ldap/simple_ldap.go @@ -1,40 +1,43 @@ package ldap import ( - "kubesphere.io/kubesphere/pkg/api/iam" - "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" ) // simpleLdap is a implementation of ldap.Interface, you should never use this in production env! type simpleLdap struct { - store map[string]*iam.User + store map[string]*iamv1alpha2.User } func NewSimpleLdap() Interface { sl := &simpleLdap{ - store: map[string]*iam.User{}, + store: map[string]*iamv1alpha2.User{}, } // initialize with a admin user - admin := &iam.User{ - Name: "admin", - Email: "admin@kubesphere.io", - Lang: "eng", - Description: "administrator", - CreateTime: time.Now(), - Groups: nil, - Password: "P@88w0rd", + admin := &iamv1alpha2.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin", + }, + Spec: iamv1alpha2.UserSpec{ + Email: "admin@kubesphere.io", + Lang: "eng", + Description: "administrator", + Groups: nil, + EncryptedPassword: "P@88w0rd", + }, } sl.store[admin.Name] = admin return sl } -func (s simpleLdap) Create(user *iam.User) error { +func (s simpleLdap) Create(user *iamv1alpha2.User) error { s.store[user.Name] = user return nil } -func (s simpleLdap) Update(user *iam.User) error { +func (s simpleLdap) Update(user *iamv1alpha2.User) error { _, err := s.Get(user.Name) if err != nil { return err @@ -52,7 +55,7 @@ func (s simpleLdap) Delete(name string) error { return nil } -func (s simpleLdap) Get(name string) (*iam.User, error) { +func (s simpleLdap) Get(name string) (*iamv1alpha2.User, error) { if user, ok := s.store[name]; !ok { return nil, ErrUserNotExists } else { @@ -64,7 +67,7 @@ func (s simpleLdap) Authenticate(name string, password string) error { if user, err := s.Get(name); err != nil { return err } else { - if user.Password != password { + if user.Spec.EncryptedPassword != password { return ErrInvalidCredentials } } diff --git a/pkg/simple/client/ldap/simple_ldap_test.go b/pkg/simple/client/ldap/simple_ldap_test.go index 34dd9fb31..62dff8764 100644 --- a/pkg/simple/client/ldap/simple_ldap_test.go +++ b/pkg/simple/client/ldap/simple_ldap_test.go @@ -2,22 +2,26 @@ package ldap import ( "github.com/google/go-cmp/cmp" - "kubesphere.io/kubesphere/pkg/api/iam" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + iamv1alpha2 "kubesphere.io/kubesphere/pkg/apis/iam/v1alpha2" "testing" - "time" ) func TestSimpleLdap(t *testing.T) { ldapClient := NewSimpleLdap() - foo := &iam.User{ - Name: "jerry", - Email: "jerry@kubesphere.io", - Lang: "en", - Description: "Jerry is kind and gentle.", - CreateTime: time.Now(), - Groups: []string{}, - Password: "P@88w0rd", + foo := &iamv1alpha2.User{ + TypeMeta: metav1.TypeMeta{APIVersion: iamv1alpha2.SchemeGroupVersion.String()}, + ObjectMeta: metav1.ObjectMeta{ + Name: "jerry", + }, + Spec: iamv1alpha2.UserSpec{ + Email: "jerry@kubesphere.io", + Lang: "en", + Description: "Jerry is kind and gentle.", + Groups: []string{}, + EncryptedPassword: "P@88w0rd", + }, } t.Run("should create user", func(t *testing.T) { @@ -44,7 +48,7 @@ func TestSimpleLdap(t *testing.T) { t.Fatal(err) } - foo.Description = "Jerry needs some drinks." + foo.Spec.Description = "Jerry needs some drinks." err = ldapClient.Update(foo) if err != nil { t.Fatal(err) @@ -85,7 +89,7 @@ func TestSimpleLdap(t *testing.T) { t.Fatal(err) } - err = ldapClient.Authenticate(foo.Name, foo.Password) + err = ldapClient.Authenticate(foo.Name, foo.Spec.EncryptedPassword) if err != nil { t.Fatalf("should pass but got an error %v", err) } diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index 47a6a46ec..000000000 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.7 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/LICENSE b/vendor/github.com/cenkalti/backoff/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/vendor/github.com/cenkalti/backoff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/README.md b/vendor/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 55ebc98fc..000000000 --- a/vendor/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## Usage - -See https://godoc.org/github.com/cenkalti/backoff#pkg-examples - -## Contributing - -* I would like to keep this library as small as possible. -* Please don't send a PR without opening an issue and discussing it first. -* If proposed change is not a common use case, I will probably not accept it. - -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master -[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master -[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master - -[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/backoff.go deleted file mode 100644 index 3676ee405..000000000 --- a/vendor/github.com/cenkalti/backoff/backoff.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Use Retry function for retrying operations that may fail. -// If Retry does not meet your needs, -// copy/paste the function into your project and modify as you wish. -// -// There is also Ticker type similar to time.Ticker. -// You can use it if you need to work with channels. -// -// See Examples section below for usage examples. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff. Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Stop indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/vendor/github.com/cenkalti/backoff/context.go b/vendor/github.com/cenkalti/backoff/context.go deleted file mode 100644 index 7706faa2b..000000000 --- a/vendor/github.com/cenkalti/backoff/context.go +++ /dev/null @@ -1,63 +0,0 @@ -package backoff - -import ( - "context" - "time" -) - -// BackOffContext is a backoff policy that stops retrying after the context -// is canceled. -type BackOffContext interface { - BackOff - Context() context.Context -} - -type backOffContext struct { - BackOff - ctx context.Context -} - -// WithContext returns a BackOffContext with context ctx -// -// ctx must not be nil -func WithContext(b BackOff, ctx context.Context) BackOffContext { - if ctx == nil { - panic("nil context") - } - - if b, ok := b.(*backOffContext); ok { - return &backOffContext{ - BackOff: b.BackOff, - ctx: ctx, - } - } - - return &backOffContext{ - BackOff: b, - ctx: ctx, - } -} - -func ensureContext(b BackOff) BackOffContext { - if cb, ok := b.(BackOffContext); ok { - return cb - } - return WithContext(b, context.Background()) -} - -func (b *backOffContext) Context() context.Context { - return b.ctx -} - -func (b *backOffContext) NextBackOff() time.Duration { - select { - case <-b.ctx.Done(): - return Stop - default: - } - next := b.BackOff.NextBackOff() - if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { - return Stop - } - return next -} diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/exponential.go deleted file mode 100644 index a031a6597..000000000 --- a/vendor/github.com/cenkalti/backoff/exponential.go +++ /dev/null @@ -1,153 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop - } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). It is -// safe to call even while the backoff policy is used by a running -// ticker. -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index e936a506f..000000000 --- a/vendor/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the operation o until it does not return error or BackOff stops. -// o is guaranteed to be run at least once. -// -// If o returns a *PermanentError, the operation is not retried, and the -// wrapped error is returned. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - var err error - var next time.Duration - var t *time.Timer - - cb := ensureContext(b) - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if permanent, ok := err.(*PermanentError); ok { - return permanent.Err - } - - if next = cb.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - if t == nil { - t = time.NewTimer(next) - defer t.Stop() - } else { - t.Reset(next) - } - - select { - case <-cb.Context().Done(): - return err - case <-t.C: - } - } -} - -// PermanentError signals that the operation should not be retried. -type PermanentError struct { - Err error -} - -func (e *PermanentError) Error() string { - return e.Err.Error() -} - -// Permanent wraps the given err in a *PermanentError. -func Permanent(err error) *PermanentError { - return &PermanentError{ - Err: err, - } -} diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/ticker.go deleted file mode 100644 index e41084b0e..000000000 --- a/vendor/github.com/cenkalti/backoff/ticker.go +++ /dev/null @@ -1,82 +0,0 @@ -package backoff - -import ( - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOffContext - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send -// the time at times specified by the BackOff argument. Ticker is -// guaranteed to tick at least once. The channel is closed when Stop -// method is called or BackOff stops. It is not safe to manipulate the -// provided backoff policy (notably calling NextBackOff or Reset) -// while the ticker is running. -func NewTicker(b BackOff) *Ticker { - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: ensureContext(b), - stop: make(chan struct{}), - } - t.b.Reset() - go t.run() - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - case <-t.b.Context().Done(): - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - return time.After(next) -} diff --git a/vendor/github.com/cenkalti/backoff/tries.go b/vendor/github.com/cenkalti/backoff/tries.go deleted file mode 100644 index cfeefd9b7..000000000 --- a/vendor/github.com/cenkalti/backoff/tries.go +++ /dev/null @@ -1,35 +0,0 @@ -package backoff - -import "time" - -/* -WithMaxRetries creates a wrapper around another BackOff, which will -return Stop if NextBackOff() has been called too many times since -the last time Reset() was called - -Note: Implementation is not thread-safe. -*/ -func WithMaxRetries(b BackOff, max uint64) BackOff { - return &backOffTries{delegate: b, maxTries: max} -} - -type backOffTries struct { - delegate BackOff - maxTries uint64 - numTries uint64 -} - -func (b *backOffTries) NextBackOff() time.Duration { - if b.maxTries > 0 { - if b.maxTries <= b.numTries { - return Stop - } - b.numTries++ - } - return b.delegate.NextBackOff() -} - -func (b *backOffTries) Reset() { - b.numTries = 0 - b.delegate.Reset() -} diff --git a/vendor/github.com/cheekybits/genny/LICENSE b/vendor/github.com/cheekybits/genny/LICENSE deleted file mode 100644 index 519d7f227..000000000 --- a/vendor/github.com/cheekybits/genny/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 cheekybits - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/cheekybits/genny/generic/doc.go b/vendor/github.com/cheekybits/genny/generic/doc.go deleted file mode 100644 index 3bd6c869c..000000000 --- a/vendor/github.com/cheekybits/genny/generic/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package generic contains the generic marker types. -package generic diff --git a/vendor/github.com/cheekybits/genny/generic/generic.go b/vendor/github.com/cheekybits/genny/generic/generic.go deleted file mode 100644 index 04a2306cb..000000000 --- a/vendor/github.com/cheekybits/genny/generic/generic.go +++ /dev/null @@ -1,13 +0,0 @@ -package generic - -// Type is the placeholder type that indicates a generic value. -// When genny is executed, variables of this type will be replaced with -// references to the specific types. -// var GenericType generic.Type -type Type interface{} - -// Number is the placehoder type that indiccates a generic numerical value. -// When genny is executed, variables of this type will be replaced with -// references to the specific types. -// var GenericType generic.Number -type Number float64 diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml deleted file mode 100644 index ba95cdd15..000000000 --- a/vendor/github.com/dustin/go-humanize/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -sudo: false -language: go -go: - - 1.3.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/dustin/go-humanize/LICENSE b/vendor/github.com/dustin/go-humanize/LICENSE deleted file mode 100644 index 8d9a94a90..000000000 --- a/vendor/github.com/dustin/go-humanize/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2005-2008 Dustin Sallings - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown deleted file mode 100644 index 91b4ae564..000000000 --- a/vendor/github.com/dustin/go-humanize/README.markdown +++ /dev/null @@ -1,124 +0,0 @@ -# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize) - -Just a few functions for helping humanize times and sizes. - -`go get` it as `github.com/dustin/go-humanize`, import it as -`"github.com/dustin/go-humanize"`, use it as `humanize`. - -See [godoc](https://godoc.org/github.com/dustin/go-humanize) for -complete documentation. - -## Sizes - -This lets you take numbers like `82854982` and convert them to useful -strings like, `83 MB` or `79 MiB` (whichever you prefer). - -Example: - -```go -fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB. -``` - -## Times - -This lets you take a `time.Time` and spit it out in relative terms. -For example, `12 seconds ago` or `3 days from now`. - -Example: - -```go -fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago. -``` - -Thanks to Kyle Lemons for the time implementation from an IRC -conversation one day. It's pretty neat. - -## Ordinals - -From a [mailing list discussion][odisc] where a user wanted to be able -to label ordinals. - - 0 -> 0th - 1 -> 1st - 2 -> 2nd - 3 -> 3rd - 4 -> 4th - [...] - -Example: - -```go -fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend. -``` - -## Commas - -Want to shove commas into numbers? Be my guest. - - 0 -> 0 - 100 -> 100 - 1000 -> 1,000 - 1000000000 -> 1,000,000,000 - -100000 -> -100,000 - -Example: - -```go -fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491. -``` - -## Ftoa - -Nicer float64 formatter that removes trailing zeros. - -```go -fmt.Printf("%f", 2.24) // 2.240000 -fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24 -fmt.Printf("%f", 2.0) // 2.000000 -fmt.Printf("%s", humanize.Ftoa(2.0)) // 2 -``` - -## SI notation - -Format numbers with [SI notation][sinotation]. - -Example: - -```go -humanize.SI(0.00000000223, "M") // 2.23 nM -``` - -## English-specific functions - -The following functions are in the `humanize/english` subpackage. - -### Plurals - -Simple English pluralization - -```go -english.PluralWord(1, "object", "") // object -english.PluralWord(42, "object", "") // objects -english.PluralWord(2, "bus", "") // buses -english.PluralWord(99, "locus", "loci") // loci - -english.Plural(1, "object", "") // 1 object -english.Plural(42, "object", "") // 42 objects -english.Plural(2, "bus", "") // 2 buses -english.Plural(99, "locus", "loci") // 99 loci -``` - -### Word series - -Format comma-separated words lists with conjuctions: - -```go -english.WordSeries([]string{"foo"}, "and") // foo -english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar -english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz - -english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz -``` - -[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion -[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix diff --git a/vendor/github.com/dustin/go-humanize/big.go b/vendor/github.com/dustin/go-humanize/big.go deleted file mode 100644 index f49dc337d..000000000 --- a/vendor/github.com/dustin/go-humanize/big.go +++ /dev/null @@ -1,31 +0,0 @@ -package humanize - -import ( - "math/big" -) - -// order of magnitude (to a max order) -func oomm(n, b *big.Int, maxmag int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - if mag == maxmag && maxmag >= 0 { - break - } - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} - -// total order of magnitude -// (same as above, but with no upper limit) -func oom(n, b *big.Int) (float64, int) { - mag := 0 - m := &big.Int{} - for n.Cmp(b) >= 0 { - n.DivMod(n, b, m) - mag++ - } - return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag -} diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go deleted file mode 100644 index 1a2bf6172..000000000 --- a/vendor/github.com/dustin/go-humanize/bigbytes.go +++ /dev/null @@ -1,173 +0,0 @@ -package humanize - -import ( - "fmt" - "math/big" - "strings" - "unicode" -) - -var ( - bigIECExp = big.NewInt(1024) - - // BigByte is one byte in bit.Ints - BigByte = big.NewInt(1) - // BigKiByte is 1,024 bytes in bit.Ints - BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp) - // BigMiByte is 1,024 k bytes in bit.Ints - BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp) - // BigGiByte is 1,024 m bytes in bit.Ints - BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp) - // BigTiByte is 1,024 g bytes in bit.Ints - BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp) - // BigPiByte is 1,024 t bytes in bit.Ints - BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp) - // BigEiByte is 1,024 p bytes in bit.Ints - BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp) - // BigZiByte is 1,024 e bytes in bit.Ints - BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp) - // BigYiByte is 1,024 z bytes in bit.Ints - BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp) -) - -var ( - bigSIExp = big.NewInt(1000) - - // BigSIByte is one SI byte in big.Ints - BigSIByte = big.NewInt(1) - // BigKByte is 1,000 SI bytes in big.Ints - BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp) - // BigMByte is 1,000 SI k bytes in big.Ints - BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp) - // BigGByte is 1,000 SI m bytes in big.Ints - BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp) - // BigTByte is 1,000 SI g bytes in big.Ints - BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp) - // BigPByte is 1,000 SI t bytes in big.Ints - BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp) - // BigEByte is 1,000 SI p bytes in big.Ints - BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp) - // BigZByte is 1,000 SI e bytes in big.Ints - BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp) - // BigYByte is 1,000 SI z bytes in big.Ints - BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp) -) - -var bigBytesSizeTable = map[string]*big.Int{ - "b": BigByte, - "kib": BigKiByte, - "kb": BigKByte, - "mib": BigMiByte, - "mb": BigMByte, - "gib": BigGiByte, - "gb": BigGByte, - "tib": BigTiByte, - "tb": BigTByte, - "pib": BigPiByte, - "pb": BigPByte, - "eib": BigEiByte, - "eb": BigEByte, - "zib": BigZiByte, - "zb": BigZByte, - "yib": BigYiByte, - "yb": BigYByte, - // Without suffix - "": BigByte, - "ki": BigKiByte, - "k": BigKByte, - "mi": BigMiByte, - "m": BigMByte, - "gi": BigGiByte, - "g": BigGByte, - "ti": BigTiByte, - "t": BigTByte, - "pi": BigPiByte, - "p": BigPByte, - "ei": BigEiByte, - "e": BigEByte, - "z": BigZByte, - "zi": BigZiByte, - "y": BigYByte, - "yi": BigYiByte, -} - -var ten = big.NewInt(10) - -func humanateBigBytes(s, base *big.Int, sizes []string) string { - if s.Cmp(ten) < 0 { - return fmt.Sprintf("%d B", s) - } - c := (&big.Int{}).Set(s) - val, mag := oomm(c, base, len(sizes)-1) - suffix := sizes[mag] - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) - -} - -// BigBytes produces a human readable representation of an SI size. -// -// See also: ParseBigBytes. -// -// BigBytes(82854982) -> 83 MB -func BigBytes(s *big.Int) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} - return humanateBigBytes(s, bigSIExp, sizes) -} - -// BigIBytes produces a human readable representation of an IEC size. -// -// See also: ParseBigBytes. -// -// BigIBytes(82854982) -> 79 MiB -func BigIBytes(s *big.Int) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - return humanateBigBytes(s, bigIECExp, sizes) -} - -// ParseBigBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See also: BigBytes, BigIBytes. -// -// ParseBigBytes("42 MB") -> 42000000, nil -// ParseBigBytes("42 mib") -> 44040192, nil -func ParseBigBytes(s string) (*big.Int, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - val := &big.Rat{} - _, err := fmt.Sscanf(num, "%f", val) - if err != nil { - return nil, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bigBytesSizeTable[extra]; ok { - mv := (&big.Rat{}).SetInt(m) - val.Mul(val, mv) - rv := &big.Int{} - rv.Div(val.Num(), val.Denom()) - return rv, nil - } - - return nil, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/bytes.go b/vendor/github.com/dustin/go-humanize/bytes.go deleted file mode 100644 index 0b498f488..000000000 --- a/vendor/github.com/dustin/go-humanize/bytes.go +++ /dev/null @@ -1,143 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "strconv" - "strings" - "unicode" -) - -// IEC Sizes. -// kibis of bits -const ( - Byte = 1 << (iota * 10) - KiByte - MiByte - GiByte - TiByte - PiByte - EiByte -) - -// SI Sizes. -const ( - IByte = 1 - KByte = IByte * 1000 - MByte = KByte * 1000 - GByte = MByte * 1000 - TByte = GByte * 1000 - PByte = TByte * 1000 - EByte = PByte * 1000 -) - -var bytesSizeTable = map[string]uint64{ - "b": Byte, - "kib": KiByte, - "kb": KByte, - "mib": MiByte, - "mb": MByte, - "gib": GiByte, - "gb": GByte, - "tib": TiByte, - "tb": TByte, - "pib": PiByte, - "pb": PByte, - "eib": EiByte, - "eb": EByte, - // Without suffix - "": Byte, - "ki": KiByte, - "k": KByte, - "mi": MiByte, - "m": MByte, - "gi": GiByte, - "g": GByte, - "ti": TiByte, - "t": TByte, - "pi": PiByte, - "p": PByte, - "ei": EiByte, - "e": EByte, -} - -func logn(n, b float64) float64 { - return math.Log(n) / math.Log(b) -} - -func humanateBytes(s uint64, base float64, sizes []string) string { - if s < 10 { - return fmt.Sprintf("%d B", s) - } - e := math.Floor(logn(float64(s), base)) - suffix := sizes[int(e)] - val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10 - f := "%.0f %s" - if val < 10 { - f = "%.1f %s" - } - - return fmt.Sprintf(f, val, suffix) -} - -// Bytes produces a human readable representation of an SI size. -// -// See also: ParseBytes. -// -// Bytes(82854982) -> 83 MB -func Bytes(s uint64) string { - sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"} - return humanateBytes(s, 1000, sizes) -} - -// IBytes produces a human readable representation of an IEC size. -// -// See also: ParseBytes. -// -// IBytes(82854982) -> 79 MiB -func IBytes(s uint64) string { - sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} - return humanateBytes(s, 1024, sizes) -} - -// ParseBytes parses a string representation of bytes into the number -// of bytes it represents. -// -// See Also: Bytes, IBytes. -// -// ParseBytes("42 MB") -> 42000000, nil -// ParseBytes("42 mib") -> 44040192, nil -func ParseBytes(s string) (uint64, error) { - lastDigit := 0 - hasComma := false - for _, r := range s { - if !(unicode.IsDigit(r) || r == '.' || r == ',') { - break - } - if r == ',' { - hasComma = true - } - lastDigit++ - } - - num := s[:lastDigit] - if hasComma { - num = strings.Replace(num, ",", "", -1) - } - - f, err := strconv.ParseFloat(num, 64) - if err != nil { - return 0, err - } - - extra := strings.ToLower(strings.TrimSpace(s[lastDigit:])) - if m, ok := bytesSizeTable[extra]; ok { - f *= float64(m) - if f >= math.MaxUint64 { - return 0, fmt.Errorf("too large: %v", s) - } - return uint64(f), nil - } - - return 0, fmt.Errorf("unhandled size name: %v", extra) -} diff --git a/vendor/github.com/dustin/go-humanize/comma.go b/vendor/github.com/dustin/go-humanize/comma.go deleted file mode 100644 index 520ae3e57..000000000 --- a/vendor/github.com/dustin/go-humanize/comma.go +++ /dev/null @@ -1,116 +0,0 @@ -package humanize - -import ( - "bytes" - "math" - "math/big" - "strconv" - "strings" -) - -// Comma produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Comma(834142) -> 834,142 -func Comma(v int64) string { - sign := "" - - // Min int64 can't be negated to a usable value, so it has to be special cased. - if v == math.MinInt64 { - return "-9,223,372,036,854,775,808" - } - - if v < 0 { - sign = "-" - v = 0 - v - } - - parts := []string{"", "", "", "", "", "", ""} - j := len(parts) - 1 - - for v > 999 { - parts[j] = strconv.FormatInt(v%1000, 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - v = v / 1000 - j-- - } - parts[j] = strconv.Itoa(int(v)) - return sign + strings.Join(parts[j:], ",") -} - -// Commaf produces a string form of the given number in base 10 with -// commas after every three orders of magnitude. -// -// e.g. Commaf(834142.32) -> 834,142.32 -func Commaf(v float64) string { - buf := &bytes.Buffer{} - if v < 0 { - buf.Write([]byte{'-'}) - v = 0 - v - } - - comma := []byte{','} - - parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} - -// CommafWithDigits works like the Commaf but limits the resulting -// string to the given number of decimal places. -// -// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3 -func CommafWithDigits(f float64, decimals int) string { - return stripTrailingDigits(Commaf(f), decimals) -} - -// BigComma produces a string form of the given big.Int in base 10 -// with commas after every three orders of magnitude. -func BigComma(b *big.Int) string { - sign := "" - if b.Sign() < 0 { - sign = "-" - b.Abs(b) - } - - athousand := big.NewInt(1000) - c := (&big.Int{}).Set(b) - _, m := oom(c, athousand) - parts := make([]string, m+1) - j := len(parts) - 1 - - mod := &big.Int{} - for b.Cmp(athousand) >= 0 { - b.DivMod(b, athousand, mod) - parts[j] = strconv.FormatInt(mod.Int64(), 10) - switch len(parts[j]) { - case 2: - parts[j] = "0" + parts[j] - case 1: - parts[j] = "00" + parts[j] - } - j-- - } - parts[j] = strconv.Itoa(int(b.Int64())) - return sign + strings.Join(parts[j:], ",") -} diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go deleted file mode 100644 index 620690dec..000000000 --- a/vendor/github.com/dustin/go-humanize/commaf.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build go1.6 - -package humanize - -import ( - "bytes" - "math/big" - "strings" -) - -// BigCommaf produces a string form of the given big.Float in base 10 -// with commas after every three orders of magnitude. -func BigCommaf(v *big.Float) string { - buf := &bytes.Buffer{} - if v.Sign() < 0 { - buf.Write([]byte{'-'}) - v.Abs(v) - } - - comma := []byte{','} - - parts := strings.Split(v.Text('f', -1), ".") - pos := 0 - if len(parts[0])%3 != 0 { - pos += len(parts[0]) % 3 - buf.WriteString(parts[0][:pos]) - buf.Write(comma) - } - for ; pos < len(parts[0]); pos += 3 { - buf.WriteString(parts[0][pos : pos+3]) - buf.Write(comma) - } - buf.Truncate(buf.Len() - 1) - - if len(parts) > 1 { - buf.Write([]byte{'.'}) - buf.WriteString(parts[1]) - } - return buf.String() -} diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go deleted file mode 100644 index 1c62b640d..000000000 --- a/vendor/github.com/dustin/go-humanize/ftoa.go +++ /dev/null @@ -1,46 +0,0 @@ -package humanize - -import ( - "strconv" - "strings" -) - -func stripTrailingZeros(s string) string { - offset := len(s) - 1 - for offset > 0 { - if s[offset] == '.' { - offset-- - break - } - if s[offset] != '0' { - break - } - offset-- - } - return s[:offset+1] -} - -func stripTrailingDigits(s string, digits int) string { - if i := strings.Index(s, "."); i >= 0 { - if digits <= 0 { - return s[:i] - } - i++ - if i+digits >= len(s) { - return s - } - return s[:i+digits] - } - return s -} - -// Ftoa converts a float to a string with no trailing zeros. -func Ftoa(num float64) string { - return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64)) -} - -// FtoaWithDigits converts a float to a string but limits the resulting string -// to the given number of decimal places, and no trailing zeros. -func FtoaWithDigits(num float64, digits int) string { - return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits)) -} diff --git a/vendor/github.com/dustin/go-humanize/humanize.go b/vendor/github.com/dustin/go-humanize/humanize.go deleted file mode 100644 index a2c2da31e..000000000 --- a/vendor/github.com/dustin/go-humanize/humanize.go +++ /dev/null @@ -1,8 +0,0 @@ -/* -Package humanize converts boring ugly numbers to human-friendly strings and back. - -Durations can be turned into strings such as "3 days ago", numbers -representing sizes like 82854982 into useful strings like, "83 MB" or -"79 MiB" (whichever you prefer). -*/ -package humanize diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go deleted file mode 100644 index dec618659..000000000 --- a/vendor/github.com/dustin/go-humanize/number.go +++ /dev/null @@ -1,192 +0,0 @@ -package humanize - -/* -Slightly adapted from the source to fit go-humanize. - -Author: https://github.com/gorhill -Source: https://gist.github.com/gorhill/5285193 - -*/ - -import ( - "math" - "strconv" -) - -var ( - renderFloatPrecisionMultipliers = [...]float64{ - 1, - 10, - 100, - 1000, - 10000, - 100000, - 1000000, - 10000000, - 100000000, - 1000000000, - } - - renderFloatPrecisionRounders = [...]float64{ - 0.5, - 0.05, - 0.005, - 0.0005, - 0.00005, - 0.000005, - 0.0000005, - 0.00000005, - 0.000000005, - 0.0000000005, - } -) - -// FormatFloat produces a formatted number as string based on the following user-specified criteria: -// * thousands separator -// * decimal separator -// * decimal precision -// -// Usage: s := RenderFloat(format, n) -// The format parameter tells how to render the number n. -// -// See examples: http://play.golang.org/p/LXc1Ddm1lJ -// -// Examples of format strings, given n = 12345.6789: -// "#,###.##" => "12,345.67" -// "#,###." => "12,345" -// "#,###" => "12345,678" -// "#\u202F###,##" => "12 345,68" -// "#.###,###### => 12.345,678900 -// "" (aka default format) => 12,345.67 -// -// The highest precision allowed is 9 digits after the decimal symbol. -// There is also a version for integer number, FormatInteger(), -// which is convenient for calls within template. -func FormatFloat(format string, n float64) string { - // Special cases: - // NaN = "NaN" - // +Inf = "+Infinity" - // -Inf = "-Infinity" - if math.IsNaN(n) { - return "NaN" - } - if n > math.MaxFloat64 { - return "Infinity" - } - if n < -math.MaxFloat64 { - return "-Infinity" - } - - // default format - precision := 2 - decimalStr := "." - thousandStr := "," - positiveStr := "" - negativeStr := "-" - - if len(format) > 0 { - format := []rune(format) - - // If there is an explicit format directive, - // then default values are these: - precision = 9 - thousandStr = "" - - // collect indices of meaningful formatting directives - formatIndx := []int{} - for i, char := range format { - if char != '#' && char != '0' { - formatIndx = append(formatIndx, i) - } - } - - if len(formatIndx) > 0 { - // Directive at index 0: - // Must be a '+' - // Raise an error if not the case - // index: 0123456789 - // +0.000,000 - // +000,000.0 - // +0000.00 - // +0000 - if formatIndx[0] == 0 { - if format[formatIndx[0]] != '+' { - panic("RenderFloat(): invalid positive sign directive") - } - positiveStr = "+" - formatIndx = formatIndx[1:] - } - - // Two directives: - // First is thousands separator - // Raise an error if not followed by 3-digit - // 0123456789 - // 0.000,000 - // 000,000.00 - if len(formatIndx) == 2 { - if (formatIndx[1] - formatIndx[0]) != 4 { - panic("RenderFloat(): thousands separator directive must be followed by 3 digit-specifiers") - } - thousandStr = string(format[formatIndx[0]]) - formatIndx = formatIndx[1:] - } - - // One directive: - // Directive is decimal separator - // The number of digit-specifier following the separator indicates wanted precision - // 0123456789 - // 0.00 - // 000,0000 - if len(formatIndx) == 1 { - decimalStr = string(format[formatIndx[0]]) - precision = len(format) - formatIndx[0] - 1 - } - } - } - - // generate sign part - var signStr string - if n >= 0.000000001 { - signStr = positiveStr - } else if n <= -0.000000001 { - signStr = negativeStr - n = -n - } else { - signStr = "" - n = 0.0 - } - - // split number into integer and fractional parts - intf, fracf := math.Modf(n + renderFloatPrecisionRounders[precision]) - - // generate integer part string - intStr := strconv.FormatInt(int64(intf), 10) - - // add thousand separator if required - if len(thousandStr) > 0 { - for i := len(intStr); i > 3; { - i -= 3 - intStr = intStr[:i] + thousandStr + intStr[i:] - } - } - - // no fractional part, we can leave now - if precision == 0 { - return signStr + intStr - } - - // generate fractional part - fracStr := strconv.Itoa(int(fracf * renderFloatPrecisionMultipliers[precision])) - // may need padding - if len(fracStr) < precision { - fracStr = "000000000000000"[:precision-len(fracStr)] + fracStr - } - - return signStr + intStr + decimalStr + fracStr -} - -// FormatInteger produces a formatted number as string. -// See FormatFloat. -func FormatInteger(format string, n int) string { - return FormatFloat(format, float64(n)) -} diff --git a/vendor/github.com/dustin/go-humanize/ordinals.go b/vendor/github.com/dustin/go-humanize/ordinals.go deleted file mode 100644 index 43d88a861..000000000 --- a/vendor/github.com/dustin/go-humanize/ordinals.go +++ /dev/null @@ -1,25 +0,0 @@ -package humanize - -import "strconv" - -// Ordinal gives you the input number in a rank/ordinal format. -// -// Ordinal(3) -> 3rd -func Ordinal(x int) string { - suffix := "th" - switch x % 10 { - case 1: - if x%100 != 11 { - suffix = "st" - } - case 2: - if x%100 != 12 { - suffix = "nd" - } - case 3: - if x%100 != 13 { - suffix = "rd" - } - } - return strconv.Itoa(x) + suffix -} diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go deleted file mode 100644 index ae659e0e4..000000000 --- a/vendor/github.com/dustin/go-humanize/si.go +++ /dev/null @@ -1,123 +0,0 @@ -package humanize - -import ( - "errors" - "math" - "regexp" - "strconv" -) - -var siPrefixTable = map[float64]string{ - -24: "y", // yocto - -21: "z", // zepto - -18: "a", // atto - -15: "f", // femto - -12: "p", // pico - -9: "n", // nano - -6: "µ", // micro - -3: "m", // milli - 0: "", - 3: "k", // kilo - 6: "M", // mega - 9: "G", // giga - 12: "T", // tera - 15: "P", // peta - 18: "E", // exa - 21: "Z", // zetta - 24: "Y", // yotta -} - -var revSIPrefixTable = revfmap(siPrefixTable) - -// revfmap reverses the map and precomputes the power multiplier -func revfmap(in map[float64]string) map[string]float64 { - rv := map[string]float64{} - for k, v := range in { - rv[v] = math.Pow(10, k) - } - return rv -} - -var riParseRegex *regexp.Regexp - -func init() { - ri := `^([\-0-9.]+)\s?([` - for _, v := range siPrefixTable { - ri += v - } - ri += `]?)(.*)` - - riParseRegex = regexp.MustCompile(ri) -} - -// ComputeSI finds the most appropriate SI prefix for the given number -// and returns the prefix along with the value adjusted to be within -// that prefix. -// -// See also: SI, ParseSI. -// -// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p") -func ComputeSI(input float64) (float64, string) { - if input == 0 { - return 0, "" - } - mag := math.Abs(input) - exponent := math.Floor(logn(mag, 10)) - exponent = math.Floor(exponent/3) * 3 - - value := mag / math.Pow(10, exponent) - - // Handle special case where value is exactly 1000.0 - // Should return 1 M instead of 1000 k - if value == 1000.0 { - exponent += 3 - value = mag / math.Pow(10, exponent) - } - - value = math.Copysign(value, input) - - prefix := siPrefixTable[exponent] - return value, prefix -} - -// SI returns a string with default formatting. -// -// SI uses Ftoa to format float value, removing trailing zeros. -// -// See also: ComputeSI, ParseSI. -// -// e.g. SI(1000000, "B") -> 1 MB -// e.g. SI(2.2345e-12, "F") -> 2.2345 pF -func SI(input float64, unit string) string { - value, prefix := ComputeSI(input) - return Ftoa(value) + " " + prefix + unit -} - -// SIWithDigits works like SI but limits the resulting string to the -// given number of decimal places. -// -// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB -// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF -func SIWithDigits(input float64, decimals int, unit string) string { - value, prefix := ComputeSI(input) - return FtoaWithDigits(value, decimals) + " " + prefix + unit -} - -var errInvalid = errors.New("invalid input") - -// ParseSI parses an SI string back into the number and unit. -// -// See also: SI, ComputeSI. -// -// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil) -func ParseSI(input string) (float64, string, error) { - found := riParseRegex.FindStringSubmatch(input) - if len(found) != 4 { - return 0, "", errInvalid - } - mag := revSIPrefixTable[found[2]] - unit := found[3] - - base, err := strconv.ParseFloat(found[1], 64) - return base * mag, unit, err -} diff --git a/vendor/github.com/dustin/go-humanize/times.go b/vendor/github.com/dustin/go-humanize/times.go deleted file mode 100644 index dd3fbf5ef..000000000 --- a/vendor/github.com/dustin/go-humanize/times.go +++ /dev/null @@ -1,117 +0,0 @@ -package humanize - -import ( - "fmt" - "math" - "sort" - "time" -) - -// Seconds-based time units -const ( - Day = 24 * time.Hour - Week = 7 * Day - Month = 30 * Day - Year = 12 * Month - LongTime = 37 * Year -) - -// Time formats a time into a relative string. -// -// Time(someT) -> "3 weeks ago" -func Time(then time.Time) string { - return RelTime(then, time.Now(), "ago", "from now") -} - -// A RelTimeMagnitude struct contains a relative time point at which -// the relative format of time will switch to a new format string. A -// slice of these in ascending order by their "D" field is passed to -// CustomRelTime to format durations. -// -// The Format field is a string that may contain a "%s" which will be -// replaced with the appropriate signed label (e.g. "ago" or "from -// now") and a "%d" that will be replaced by the quantity. -// -// The DivBy field is the amount of time the time difference must be -// divided by in order to display correctly. -// -// e.g. if D is 2*time.Minute and you want to display "%d minutes %s" -// DivBy should be time.Minute so whatever the duration is will be -// expressed in minutes. -type RelTimeMagnitude struct { - D time.Duration - Format string - DivBy time.Duration -} - -var defaultMagnitudes = []RelTimeMagnitude{ - {time.Second, "now", time.Second}, - {2 * time.Second, "1 second %s", 1}, - {time.Minute, "%d seconds %s", time.Second}, - {2 * time.Minute, "1 minute %s", 1}, - {time.Hour, "%d minutes %s", time.Minute}, - {2 * time.Hour, "1 hour %s", 1}, - {Day, "%d hours %s", time.Hour}, - {2 * Day, "1 day %s", 1}, - {Week, "%d days %s", Day}, - {2 * Week, "1 week %s", 1}, - {Month, "%d weeks %s", Week}, - {2 * Month, "1 month %s", 1}, - {Year, "%d months %s", Month}, - {18 * Month, "1 year %s", 1}, - {2 * Year, "2 years %s", 1}, - {LongTime, "%d years %s", Year}, - {math.MaxInt64, "a long while %s", 1}, -} - -// RelTime formats a time into a relative string. -// -// It takes two times and two labels. In addition to the generic time -// delta string (e.g. 5 minutes), the labels are used applied so that -// the label corresponding to the smaller time is applied. -// -// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier" -func RelTime(a, b time.Time, albl, blbl string) string { - return CustomRelTime(a, b, albl, blbl, defaultMagnitudes) -} - -// CustomRelTime formats a time into a relative string. -// -// It takes two times two labels and a table of relative time formats. -// In addition to the generic time delta string (e.g. 5 minutes), the -// labels are used applied so that the label corresponding to the -// smaller time is applied. -func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string { - lbl := albl - diff := b.Sub(a) - - if a.After(b) { - lbl = blbl - diff = a.Sub(b) - } - - n := sort.Search(len(magnitudes), func(i int) bool { - return magnitudes[i].D > diff - }) - - if n >= len(magnitudes) { - n = len(magnitudes) - 1 - } - mag := magnitudes[n] - args := []interface{}{} - escaped := false - for _, ch := range mag.Format { - if escaped { - switch ch { - case 's': - args = append(args, lbl) - case 'd': - args = append(args, diff/mag.DivBy) - } - escaped = false - } else { - escaped = ch == '%' - } - } - return fmt.Sprintf(mag.Format, args...) -} diff --git a/vendor/github.com/flynn/go-shlex/COPYING b/vendor/github.com/flynn/go-shlex/COPYING deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/flynn/go-shlex/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/flynn/go-shlex/Makefile b/vendor/github.com/flynn/go-shlex/Makefile deleted file mode 100644 index 038d9a489..000000000 --- a/vendor/github.com/flynn/go-shlex/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2011 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include $(GOROOT)/src/Make.inc - -TARG=shlex -GOFILES=\ - shlex.go\ - -include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/flynn/go-shlex/README.md b/vendor/github.com/flynn/go-shlex/README.md deleted file mode 100644 index c86bcc066..000000000 --- a/vendor/github.com/flynn/go-shlex/README.md +++ /dev/null @@ -1,2 +0,0 @@ -go-shlex is a simple lexer for go that supports shell-style quoting, -commenting, and escaping. diff --git a/vendor/github.com/flynn/go-shlex/shlex.go b/vendor/github.com/flynn/go-shlex/shlex.go deleted file mode 100644 index 7aeace801..000000000 --- a/vendor/github.com/flynn/go-shlex/shlex.go +++ /dev/null @@ -1,457 +0,0 @@ -/* -Copyright 2012 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shlex - -/* -Package shlex implements a simple lexer which splits input in to tokens using -shell-style rules for quoting and commenting. -*/ -import ( - "bufio" - "errors" - "fmt" - "io" - "strings" -) - -/* -A TokenType is a top-level token; a word, space, comment, unknown. -*/ -type TokenType int - -/* -A RuneTokenType is the type of a UTF-8 character; a character, quote, space, escape. -*/ -type RuneTokenType int - -type lexerState int - -type Token struct { - tokenType TokenType - value string -} - -/* -Two tokens are equal if both their types and values are equal. A nil token can -never equal another token. -*/ -func (a *Token) Equal(b *Token) bool { - if a == nil || b == nil { - return false - } - if a.tokenType != b.tokenType { - return false - } - return a.value == b.value -} - -const ( - RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}" - RUNE_SPACE string = " \t\r\n" - RUNE_ESCAPING_QUOTE string = "\"" - RUNE_NONESCAPING_QUOTE string = "'" - RUNE_ESCAPE = "\\" - RUNE_COMMENT = "#" - - RUNETOKEN_UNKNOWN RuneTokenType = 0 - RUNETOKEN_CHAR RuneTokenType = 1 - RUNETOKEN_SPACE RuneTokenType = 2 - RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3 - RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4 - RUNETOKEN_ESCAPE RuneTokenType = 5 - RUNETOKEN_COMMENT RuneTokenType = 6 - RUNETOKEN_EOF RuneTokenType = 7 - - TOKEN_UNKNOWN TokenType = 0 - TOKEN_WORD TokenType = 1 - TOKEN_SPACE TokenType = 2 - TOKEN_COMMENT TokenType = 3 - - STATE_START lexerState = 0 - STATE_INWORD lexerState = 1 - STATE_ESCAPING lexerState = 2 - STATE_ESCAPING_QUOTED lexerState = 3 - STATE_QUOTED_ESCAPING lexerState = 4 - STATE_QUOTED lexerState = 5 - STATE_COMMENT lexerState = 6 - - INITIAL_TOKEN_CAPACITY int = 100 -) - -/* -A type for classifying characters. This allows for different sorts of -classifiers - those accepting extended non-ascii chars, or strict posix -compatibility, for example. -*/ -type TokenClassifier struct { - typeMap map[int32]RuneTokenType -} - -func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) { - for _, rune := range runes { - (*typeMap)[int32(rune)] = tokenType - } -} - -/* -Create a new classifier for basic ASCII characters. -*/ -func NewDefaultClassifier() *TokenClassifier { - typeMap := map[int32]RuneTokenType{} - addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR) - addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE) - addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE) - addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE) - addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE) - addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT) - return &TokenClassifier{ - typeMap: typeMap} -} - -func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType { - return classifier.typeMap[rune] -} - -/* -A type for turning an input stream in to a sequence of strings. Whitespace and -comments are skipped. -*/ -type Lexer struct { - tokenizer *Tokenizer -} - -/* -Create a new lexer. -*/ -func NewLexer(r io.Reader) (*Lexer, error) { - - tokenizer, err := NewTokenizer(r) - if err != nil { - return nil, err - } - lexer := &Lexer{tokenizer: tokenizer} - return lexer, nil -} - -/* -Return the next word, and an error value. If there are no more words, the error -will be io.EOF. -*/ -func (l *Lexer) NextWord() (string, error) { - var token *Token - var err error - for { - token, err = l.tokenizer.NextToken() - if err != nil { - return "", err - } - switch token.tokenType { - case TOKEN_WORD: - { - return token.value, nil - } - case TOKEN_COMMENT: - { - // skip comments - } - default: - { - panic(fmt.Sprintf("Unknown token type: %v", token.tokenType)) - } - } - } - return "", io.EOF -} - -/* -A type for turning an input stream in to a sequence of typed tokens. -*/ -type Tokenizer struct { - input *bufio.Reader - classifier *TokenClassifier -} - -/* -Create a new tokenizer. -*/ -func NewTokenizer(r io.Reader) (*Tokenizer, error) { - input := bufio.NewReader(r) - classifier := NewDefaultClassifier() - tokenizer := &Tokenizer{ - input: input, - classifier: classifier} - return tokenizer, nil -} - -/* -Scan the stream for the next token. - -This uses an internal state machine. It will panic if it encounters a character -which it does not know how to handle. -*/ -func (t *Tokenizer) scanStream() (*Token, error) { - state := STATE_START - var tokenType TokenType - value := make([]int32, 0, INITIAL_TOKEN_CAPACITY) - var ( - nextRune int32 - nextRuneType RuneTokenType - err error - ) -SCAN: - for { - nextRune, _, err = t.input.ReadRune() - nextRuneType = t.classifier.ClassifyRune(nextRune) - if err != nil { - if err == io.EOF { - nextRuneType = RUNETOKEN_EOF - err = nil - } else { - return nil, err - } - } - switch state { - case STATE_START: // no runes read yet - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - return nil, io.EOF - } - case RUNETOKEN_CHAR: - { - tokenType = TOKEN_WORD - value = append(value, nextRune) - state = STATE_INWORD - } - case RUNETOKEN_SPACE: - { - } - case RUNETOKEN_ESCAPING_QUOTE: - { - tokenType = TOKEN_WORD - state = STATE_QUOTED_ESCAPING - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - tokenType = TOKEN_WORD - state = STATE_QUOTED - } - case RUNETOKEN_ESCAPE: - { - tokenType = TOKEN_WORD - state = STATE_ESCAPING - } - case RUNETOKEN_COMMENT: - { - tokenType = TOKEN_COMMENT - state = STATE_COMMENT - } - default: - { - return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune)) - } - } - } - case STATE_INWORD: // in a regular word - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_SPACE: - { - t.input.UnreadRune() - break SCAN - } - case RUNETOKEN_ESCAPING_QUOTE: - { - state = STATE_QUOTED_ESCAPING - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - state = STATE_QUOTED - } - case RUNETOKEN_ESCAPE: - { - state = STATE_ESCAPING - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_ESCAPING: // the next rune after an escape character - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found after escape character") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - state = STATE_INWORD - value = append(value, nextRune) - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found after escape character") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - state = STATE_QUOTED_ESCAPING - value = append(value, nextRune) - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_QUOTED_ESCAPING: // in escaping double quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found when expecting closing quote.") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_ESCAPING_QUOTE: - { - state = STATE_INWORD - } - case RUNETOKEN_ESCAPE: - { - state = STATE_ESCAPING_QUOTED - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_QUOTED: // in non-escaping single quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found when expecting closing quote.") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - state = STATE_INWORD - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_COMMENT: - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE: - { - value = append(value, nextRune) - } - case RUNETOKEN_SPACE: - { - if nextRune == '\n' { - state = STATE_START - break SCAN - } else { - value = append(value, nextRune) - } - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - default: - { - panic(fmt.Sprintf("Unexpected state: %v", state)) - } - } - } - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err -} - -/* -Return the next token in the stream, and an error value. If there are no more -tokens available, the error value will be io.EOF. -*/ -func (t *Tokenizer) NextToken() (*Token, error) { - return t.scanStream() -} - -/* -Split a string in to a slice of strings, based upon shell-style rules for -quoting, escaping, and spaces. -*/ -func Split(s string) ([]string, error) { - l, err := NewLexer(strings.NewReader(s)) - if err != nil { - return nil, err - } - subStrings := []string{} - for { - word, err := l.NextWord() - if err != nil { - if err == io.EOF { - return subStrings, nil - } - return subStrings, err - } - subStrings = append(subStrings, word) - } - return subStrings, nil -} diff --git a/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go b/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go deleted file mode 100644 index 3525a004b..000000000 --- a/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "fmt" - "go/format" - "html/template" - "io/ioutil" - "log" - "path/filepath" - "strings" - - "github.com/globalsign/mgo/internal/json" -) - -func main() { - log.SetFlags(0) - log.SetPrefix(name + ": ") - - var g Generator - - fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name) - - src := g.generate() - - err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) - } -} - -// Generator holds the state of the analysis. Primarily used to buffer -// the output for format.Source. -type Generator struct { - bytes.Buffer // Accumulated output. -} - -// format returns the gofmt-ed contents of the Generator's buffer. -func (g *Generator) format() []byte { - src, err := format.Source(g.Bytes()) - if err != nil { - // Should never happen, but can arise when developing this code. - // The user can compile the output to see the error. - log.Printf("warning: internal error: invalid Go generated: %s", err) - log.Printf("warning: compile the package to analyze the error") - return g.Bytes() - } - return src -} - -// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS - -const name = "bson_corpus_spec_test_generator" - -func (g *Generator) generate() []byte { - - testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json") - if err != nil { - log.Fatalf("error reading bson-corpus files: %s", err) - } - - tests, err := g.loadTests(testFiles) - if err != nil { - log.Fatalf("error loading tests: %s", err) - } - - tmpl, err := g.getTemplate() - if err != nil { - log.Fatalf("error loading template: %s", err) - } - - tmpl.Execute(&g.Buffer, tests) - - return g.format() -} - -func (g *Generator) loadTests(filenames []string) ([]*testDef, error) { - var tests []*testDef - for _, filename := range filenames { - test, err := g.loadTest(filename) - if err != nil { - return nil, err - } - - tests = append(tests, test) - } - - return tests, nil -} - -func (g *Generator) loadTest(filename string) (*testDef, error) { - content, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - var testDef testDef - err = json.Unmarshal(content, &testDef) - if err != nil { - return nil, err - } - - names := make(map[string]struct{}) - - for i := len(testDef.Valid) - 1; i >= 0; i-- { - if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" { - testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...) - continue - } - - name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description) - nameIdx := name - j := 1 - for { - if _, ok := names[nameIdx]; !ok { - break - } - - nameIdx = fmt.Sprintf("%s_%d", name, j) - } - - names[nameIdx] = struct{}{} - - testDef.Valid[i].TestDef = &testDef - testDef.Valid[i].Name = nameIdx - testDef.Valid[i].StructTest = testDef.TestKey != "" && - (testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) && - !testDef.Deprecated - } - - for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- { - if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") { - testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...) - continue - } - - name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description) - nameIdx := name - j := 1 - for { - if _, ok := names[nameIdx]; !ok { - break - } - - nameIdx = fmt.Sprintf("%s_%d", name, j) - } - names[nameIdx] = struct{}{} - - testDef.DecodeErrors[i].Name = nameIdx - } - - return &testDef, nil -} - -func (g *Generator) getTemplate() (*template.Template, error) { - content := `package bson_test - -import ( - "encoding/hex" - "time" - - . "gopkg.in/check.v1" - "github.com/globalsign/mgo/bson" -) - -func testValid(c *C, in []byte, expected []byte, result interface{}) { - err := bson.Unmarshal(in, result) - c.Assert(err, IsNil) - - out, err := bson.Marshal(result) - c.Assert(err, IsNil) - - c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out)) -} - -func testDecodeSkip(c *C, in []byte) { - err := bson.Unmarshal(in, &struct{}{}) - c.Assert(err, IsNil) -} - -func testDecodeError(c *C, in []byte, result interface{}) { - err := bson.Unmarshal(in, result) - c.Assert(err, Not(IsNil)) -} - -{{range .}} -{{range .Valid}} -func (s *S) Test{{.Name}}(c *C) { - b, err := hex.DecodeString("{{.Bson}}") - c.Assert(err, IsNil) - - {{if .CanonicalBson}} - cb, err := hex.DecodeString("{{.CanonicalBson}}") - c.Assert(err, IsNil) - {{else}} - cb := b - {{end}} - - var resultD bson.D - testValid(c, b, cb, &resultD) - {{if .StructTest}}var resultS struct { - Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + ` - } - testValid(c, b, cb, &resultS){{end}} - - testDecodeSkip(c, b) -} -{{end}} - -{{range .DecodeErrors}} -func (s *S) Test{{.Name}}(c *C) { - b, err := hex.DecodeString("{{.Bson}}") - c.Assert(err, IsNil) - - var resultD bson.D - testDecodeError(c, b, &resultD) -} -{{end}} -{{end}} -` - tmpl, err := template.New("").Parse(content) - if err != nil { - return nil, err - } - return tmpl, nil -} - -func cleanupFuncName(name string) string { - return strings.Map(func(r rune) rune { - if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) { - return r - } - return '_' - }, name) -} - -type testDef struct { - Description string `json:"description"` - BsonType string `json:"bson_type"` - TestKey string `json:"test_key"` - Valid []*valid `json:"valid"` - DecodeErrors []*decodeError `json:"decodeErrors"` - Deprecated bool `json:"deprecated"` -} - -func (t *testDef) GoType() string { - switch t.BsonType { - case "0x01": - return "float64" - case "0x02": - return "string" - case "0x03": - return "bson.D" - case "0x04": - return "[]interface{}" - case "0x05": - return "[]byte" - case "0x07": - return "bson.ObjectId" - case "0x08": - return "bool" - case "0x09": - return "time.Time" - case "0x0E": - return "string" - case "0x10": - return "int32" - case "0x12": - return "int64" - case "0x13": - return "bson.Decimal" - default: - return "interface{}" - } -} - -type valid struct { - Description string `json:"description"` - Bson string `json:"bson"` - CanonicalBson string `json:"canonical_bson"` - - Name string - StructTest bool - TestDef *testDef -} - -type decodeError struct { - Description string `json:"description"` - Bson string `json:"bson"` - - Name string -} diff --git a/vendor/github.com/go-acme/lego/LICENSE b/vendor/github.com/go-acme/lego/LICENSE deleted file mode 100644 index 270cba089..000000000 --- a/vendor/github.com/go-acme/lego/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015-2017 Sebastian Erhart - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-acme/lego/acme/api/account.go b/vendor/github.com/go-acme/lego/acme/api/account.go deleted file mode 100644 index fd2331424..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/account.go +++ /dev/null @@ -1,69 +0,0 @@ -package api - -import ( - "encoding/base64" - "errors" - "fmt" - - "github.com/go-acme/lego/acme" -) - -type AccountService service - -// New Creates a new account. -func (a *AccountService) New(req acme.Account) (acme.ExtendedAccount, error) { - var account acme.Account - resp, err := a.core.post(a.core.GetDirectory().NewAccountURL, req, &account) - location := getLocation(resp) - - if len(location) > 0 { - a.core.jws.SetKid(location) - } - - if err != nil { - return acme.ExtendedAccount{Location: location}, err - } - - return acme.ExtendedAccount{Account: account, Location: location}, nil -} - -// NewEAB Creates a new account with an External Account Binding. -func (a *AccountService) NewEAB(accMsg acme.Account, kid string, hmacEncoded string) (acme.ExtendedAccount, error) { - hmac, err := base64.RawURLEncoding.DecodeString(hmacEncoded) - if err != nil { - return acme.ExtendedAccount{}, fmt.Errorf("acme: could not decode hmac key: %v", err) - } - - eabJWS, err := a.core.signEABContent(a.core.GetDirectory().NewAccountURL, kid, hmac) - if err != nil { - return acme.ExtendedAccount{}, fmt.Errorf("acme: error signing eab content: %v", err) - } - accMsg.ExternalAccountBinding = eabJWS - - return a.New(accMsg) -} - -// Get Retrieves an account. -func (a *AccountService) Get(accountURL string) (acme.Account, error) { - if len(accountURL) == 0 { - return acme.Account{}, errors.New("account[get]: empty URL") - } - - var account acme.Account - _, err := a.core.post(accountURL, acme.Account{}, &account) - if err != nil { - return acme.Account{}, err - } - return account, nil -} - -// Deactivate Deactivates an account. -func (a *AccountService) Deactivate(accountURL string) error { - if len(accountURL) == 0 { - return errors.New("account[deactivate]: empty URL") - } - - req := acme.Account{Status: acme.StatusDeactivated} - _, err := a.core.post(accountURL, req, nil) - return err -} diff --git a/vendor/github.com/go-acme/lego/acme/api/api.go b/vendor/github.com/go-acme/lego/acme/api/api.go deleted file mode 100644 index 912e7c5ae..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/api.go +++ /dev/null @@ -1,166 +0,0 @@ -package api - -import ( - "bytes" - "context" - "crypto" - "encoding/json" - "errors" - "fmt" - "net/http" - "time" - - "github.com/cenkalti/backoff" - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/acme/api/internal/nonces" - "github.com/go-acme/lego/acme/api/internal/secure" - "github.com/go-acme/lego/acme/api/internal/sender" - "github.com/go-acme/lego/log" -) - -// Core ACME/LE core API. -type Core struct { - doer *sender.Doer - nonceManager *nonces.Manager - jws *secure.JWS - directory acme.Directory - HTTPClient *http.Client - - common service // Reuse a single struct instead of allocating one for each service on the heap. - Accounts *AccountService - Authorizations *AuthorizationService - Certificates *CertificateService - Challenges *ChallengeService - Orders *OrderService -} - -// New Creates a new Core. -func New(httpClient *http.Client, userAgent string, caDirURL, kid string, privateKey crypto.PrivateKey) (*Core, error) { - doer := sender.NewDoer(httpClient, userAgent) - - dir, err := getDirectory(doer, caDirURL) - if err != nil { - return nil, err - } - - nonceManager := nonces.NewManager(doer, dir.NewNonceURL) - - jws := secure.NewJWS(privateKey, kid, nonceManager) - - c := &Core{doer: doer, nonceManager: nonceManager, jws: jws, directory: dir, HTTPClient: httpClient} - - c.common.core = c - c.Accounts = (*AccountService)(&c.common) - c.Authorizations = (*AuthorizationService)(&c.common) - c.Certificates = (*CertificateService)(&c.common) - c.Challenges = (*ChallengeService)(&c.common) - c.Orders = (*OrderService)(&c.common) - - return c, nil -} - -// post performs an HTTP POST request and parses the response body as JSON, -// into the provided respBody object. -func (a *Core) post(uri string, reqBody, response interface{}) (*http.Response, error) { - content, err := json.Marshal(reqBody) - if err != nil { - return nil, errors.New("failed to marshal message") - } - - return a.retrievablePost(uri, content, response) -} - -// postAsGet performs an HTTP POST ("POST-as-GET") request. -// https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-6.3 -func (a *Core) postAsGet(uri string, response interface{}) (*http.Response, error) { - return a.retrievablePost(uri, []byte{}, response) -} - -func (a *Core) retrievablePost(uri string, content []byte, response interface{}) (*http.Response, error) { - // during tests, allow to support ~90% of bad nonce with a minimum of attempts. - bo := backoff.NewExponentialBackOff() - bo.InitialInterval = 200 * time.Millisecond - bo.MaxInterval = 5 * time.Second - bo.MaxElapsedTime = 20 * time.Second - - ctx, cancel := context.WithCancel(context.Background()) - - var resp *http.Response - operation := func() error { - var err error - resp, err = a.signedPost(uri, content, response) - if err != nil { - switch err.(type) { - // Retry if the nonce was invalidated - case *acme.NonceError: - log.Infof("nonce error retry: %s", err) - return err - default: - cancel() - return err - } - } - - return nil - } - - err := backoff.Retry(operation, backoff.WithContext(bo, ctx)) - if err != nil { - return nil, err - } - - return resp, nil -} - -func (a *Core) signedPost(uri string, content []byte, response interface{}) (*http.Response, error) { - signedContent, err := a.jws.SignContent(uri, content) - if err != nil { - return nil, fmt.Errorf("failed to post JWS message -> failed to sign content -> %v", err) - } - - signedBody := bytes.NewBuffer([]byte(signedContent.FullSerialize())) - - resp, err := a.doer.Post(uri, signedBody, "application/jose+json", response) - - // nonceErr is ignored to keep the root error. - nonce, nonceErr := nonces.GetFromResponse(resp) - if nonceErr == nil { - a.nonceManager.Push(nonce) - } - - return resp, err -} - -func (a *Core) signEABContent(newAccountURL, kid string, hmac []byte) ([]byte, error) { - eabJWS, err := a.jws.SignEABContent(newAccountURL, kid, hmac) - if err != nil { - return nil, err - } - - return []byte(eabJWS.FullSerialize()), nil -} - -// GetKeyAuthorization Gets the key authorization -func (a *Core) GetKeyAuthorization(token string) (string, error) { - return a.jws.GetKeyAuthorization(token) -} - -func (a *Core) GetDirectory() acme.Directory { - return a.directory -} - -func getDirectory(do *sender.Doer, caDirURL string) (acme.Directory, error) { - var dir acme.Directory - if _, err := do.Get(caDirURL, &dir); err != nil { - return dir, fmt.Errorf("get directory at '%s': %v", caDirURL, err) - } - - if dir.NewAccountURL == "" { - return dir, errors.New("directory missing new registration URL") - } - if dir.NewOrderURL == "" { - return dir, errors.New("directory missing new order URL") - } - - return dir, nil -} diff --git a/vendor/github.com/go-acme/lego/acme/api/authorization.go b/vendor/github.com/go-acme/lego/acme/api/authorization.go deleted file mode 100644 index a59fa0d25..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/authorization.go +++ /dev/null @@ -1,34 +0,0 @@ -package api - -import ( - "errors" - - "github.com/go-acme/lego/acme" -) - -type AuthorizationService service - -// Get Gets an authorization. -func (c *AuthorizationService) Get(authzURL string) (acme.Authorization, error) { - if len(authzURL) == 0 { - return acme.Authorization{}, errors.New("authorization[get]: empty URL") - } - - var authz acme.Authorization - _, err := c.core.postAsGet(authzURL, &authz) - if err != nil { - return acme.Authorization{}, err - } - return authz, nil -} - -// Deactivate Deactivates an authorization. -func (c *AuthorizationService) Deactivate(authzURL string) error { - if len(authzURL) == 0 { - return errors.New("authorization[deactivate]: empty URL") - } - - var disabledAuth acme.Authorization - _, err := c.core.post(authzURL, acme.Authorization{Status: acme.StatusDeactivated}, &disabledAuth) - return err -} diff --git a/vendor/github.com/go-acme/lego/acme/api/certificate.go b/vendor/github.com/go-acme/lego/acme/api/certificate.go deleted file mode 100644 index 65904e76d..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/certificate.go +++ /dev/null @@ -1,99 +0,0 @@ -package api - -import ( - "crypto/x509" - "encoding/pem" - "errors" - "io/ioutil" - "net/http" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/certcrypto" - "github.com/go-acme/lego/log" -) - -// maxBodySize is the maximum size of body that we will read. -const maxBodySize = 1024 * 1024 - -type CertificateService service - -// Get Returns the certificate and the issuer certificate. -// 'bundle' is only applied if the issuer is provided by the 'up' link. -func (c *CertificateService) Get(certURL string, bundle bool) ([]byte, []byte, error) { - cert, up, err := c.get(certURL) - if err != nil { - return nil, nil, err - } - - // Get issuerCert from bundled response from Let's Encrypt - // See https://community.letsencrypt.org/t/acme-v2-no-up-link-in-response/64962 - _, issuer := pem.Decode(cert) - if issuer != nil { - return cert, issuer, nil - } - - issuer, err = c.getIssuerFromLink(up) - if err != nil { - // If we fail to acquire the issuer cert, return the issued certificate - do not fail. - log.Warnf("acme: Could not bundle issuer certificate [%s]: %v", certURL, err) - } else if len(issuer) > 0 { - // If bundle is true, we want to return a certificate bundle. - // To do this, we append the issuer cert to the issued cert. - if bundle { - cert = append(cert, issuer...) - } - } - - return cert, issuer, nil -} - -// Revoke Revokes a certificate. -func (c *CertificateService) Revoke(req acme.RevokeCertMessage) error { - _, err := c.core.post(c.core.GetDirectory().RevokeCertURL, req, nil) - return err -} - -// get Returns the certificate and the "up" link. -func (c *CertificateService) get(certURL string) ([]byte, string, error) { - if len(certURL) == 0 { - return nil, "", errors.New("certificate[get]: empty URL") - } - - resp, err := c.core.postAsGet(certURL, nil) - if err != nil { - return nil, "", err - } - - cert, err := ioutil.ReadAll(http.MaxBytesReader(nil, resp.Body, maxBodySize)) - if err != nil { - return nil, "", err - } - - // The issuer certificate link may be supplied via an "up" link - // in the response headers of a new certificate. - // See https://tools.ietf.org/html/draft-ietf-acme-acme-12#section-7.4.2 - up := getLink(resp.Header, "up") - - return cert, up, err -} - -// getIssuerFromLink requests the issuer certificate -func (c *CertificateService) getIssuerFromLink(up string) ([]byte, error) { - if len(up) == 0 { - return nil, nil - } - - log.Infof("acme: Requesting issuer cert from %s", up) - - cert, _, err := c.get(up) - if err != nil { - return nil, err - } - - _, err = x509.ParseCertificate(cert) - if err != nil { - return nil, err - } - - return certcrypto.PEMEncode(certcrypto.DERCertificateBytes(cert)), nil -} diff --git a/vendor/github.com/go-acme/lego/acme/api/challenge.go b/vendor/github.com/go-acme/lego/acme/api/challenge.go deleted file mode 100644 index f4e8dbeeb..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/challenge.go +++ /dev/null @@ -1,45 +0,0 @@ -package api - -import ( - "errors" - - "github.com/go-acme/lego/acme" -) - -type ChallengeService service - -// New Creates a challenge. -func (c *ChallengeService) New(chlgURL string) (acme.ExtendedChallenge, error) { - if len(chlgURL) == 0 { - return acme.ExtendedChallenge{}, errors.New("challenge[new]: empty URL") - } - - // Challenge initiation is done by sending a JWS payload containing the trivial JSON object `{}`. - // We use an empty struct instance as the postJSON payload here to achieve this result. - var chlng acme.ExtendedChallenge - resp, err := c.core.post(chlgURL, struct{}{}, &chlng) - if err != nil { - return acme.ExtendedChallenge{}, err - } - - chlng.AuthorizationURL = getLink(resp.Header, "up") - chlng.RetryAfter = getRetryAfter(resp) - return chlng, nil -} - -// Get Gets a challenge. -func (c *ChallengeService) Get(chlgURL string) (acme.ExtendedChallenge, error) { - if len(chlgURL) == 0 { - return acme.ExtendedChallenge{}, errors.New("challenge[get]: empty URL") - } - - var chlng acme.ExtendedChallenge - resp, err := c.core.postAsGet(chlgURL, &chlng) - if err != nil { - return acme.ExtendedChallenge{}, err - } - - chlng.AuthorizationURL = getLink(resp.Header, "up") - chlng.RetryAfter = getRetryAfter(resp) - return chlng, nil -} diff --git a/vendor/github.com/go-acme/lego/acme/api/internal/nonces/nonce_manager.go b/vendor/github.com/go-acme/lego/acme/api/internal/nonces/nonce_manager.go deleted file mode 100644 index c08f6d493..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/internal/nonces/nonce_manager.go +++ /dev/null @@ -1,78 +0,0 @@ -package nonces - -import ( - "errors" - "fmt" - "net/http" - "sync" - - "github.com/go-acme/lego/acme/api/internal/sender" -) - -// Manager Manages nonces. -type Manager struct { - do *sender.Doer - nonceURL string - nonces []string - sync.Mutex -} - -// NewManager Creates a new Manager. -func NewManager(do *sender.Doer, nonceURL string) *Manager { - return &Manager{ - do: do, - nonceURL: nonceURL, - } -} - -// Pop Pops a nonce. -func (n *Manager) Pop() (string, bool) { - n.Lock() - defer n.Unlock() - - if len(n.nonces) == 0 { - return "", false - } - - nonce := n.nonces[len(n.nonces)-1] - n.nonces = n.nonces[:len(n.nonces)-1] - return nonce, true -} - -// Push Pushes a nonce. -func (n *Manager) Push(nonce string) { - n.Lock() - defer n.Unlock() - n.nonces = append(n.nonces, nonce) -} - -// Nonce implement jose.NonceSource -func (n *Manager) Nonce() (string, error) { - if nonce, ok := n.Pop(); ok { - return nonce, nil - } - return n.getNonce() -} - -func (n *Manager) getNonce() (string, error) { - resp, err := n.do.Head(n.nonceURL) - if err != nil { - return "", fmt.Errorf("failed to get nonce from HTTP HEAD -> %v", err) - } - - return GetFromResponse(resp) -} - -// GetFromResponse Extracts a nonce from a HTTP response. -func GetFromResponse(resp *http.Response) (string, error) { - if resp == nil { - return "", errors.New("nil response") - } - - nonce := resp.Header.Get("Replay-Nonce") - if nonce == "" { - return "", fmt.Errorf("server did not respond with a proper nonce header") - } - - return nonce, nil -} diff --git a/vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go b/vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go deleted file mode 100644 index 213aeda0a..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/internal/secure/jws.go +++ /dev/null @@ -1,130 +0,0 @@ -package secure - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/base64" - "fmt" - - "github.com/go-acme/lego/acme/api/internal/nonces" - jose "gopkg.in/square/go-jose.v2" -) - -// JWS Represents a JWS. -type JWS struct { - privKey crypto.PrivateKey - kid string // Key identifier - nonces *nonces.Manager -} - -// NewJWS Create a new JWS. -func NewJWS(privateKey crypto.PrivateKey, kid string, nonceManager *nonces.Manager) *JWS { - return &JWS{ - privKey: privateKey, - nonces: nonceManager, - kid: kid, - } -} - -// SetKid Sets a key identifier. -func (j *JWS) SetKid(kid string) { - j.kid = kid -} - -// SignContent Signs a content with the JWS. -func (j *JWS) SignContent(url string, content []byte) (*jose.JSONWebSignature, error) { - var alg jose.SignatureAlgorithm - switch k := j.privKey.(type) { - case *rsa.PrivateKey: - alg = jose.RS256 - case *ecdsa.PrivateKey: - if k.Curve == elliptic.P256() { - alg = jose.ES256 - } else if k.Curve == elliptic.P384() { - alg = jose.ES384 - } - } - - signKey := jose.SigningKey{ - Algorithm: alg, - Key: jose.JSONWebKey{Key: j.privKey, KeyID: j.kid}, - } - - options := jose.SignerOptions{ - NonceSource: j.nonces, - ExtraHeaders: map[jose.HeaderKey]interface{}{ - "url": url, - }, - } - - if j.kid == "" { - options.EmbedJWK = true - } - - signer, err := jose.NewSigner(signKey, &options) - if err != nil { - return nil, fmt.Errorf("failed to create jose signer -> %v", err) - } - - signed, err := signer.Sign(content) - if err != nil { - return nil, fmt.Errorf("failed to sign content -> %v", err) - } - return signed, nil -} - -// SignEABContent Signs an external account binding content with the JWS. -func (j *JWS) SignEABContent(url, kid string, hmac []byte) (*jose.JSONWebSignature, error) { - jwk := jose.JSONWebKey{Key: j.privKey} - jwkJSON, err := jwk.Public().MarshalJSON() - if err != nil { - return nil, fmt.Errorf("acme: error encoding eab jwk key: %v", err) - } - - signer, err := jose.NewSigner( - jose.SigningKey{Algorithm: jose.HS256, Key: hmac}, - &jose.SignerOptions{ - EmbedJWK: false, - ExtraHeaders: map[jose.HeaderKey]interface{}{ - "kid": kid, - "url": url, - }, - }, - ) - if err != nil { - return nil, fmt.Errorf("failed to create External Account Binding jose signer -> %v", err) - } - - signed, err := signer.Sign(jwkJSON) - if err != nil { - return nil, fmt.Errorf("failed to External Account Binding sign content -> %v", err) - } - - return signed, nil -} - -// GetKeyAuthorization Gets the key authorization for a token. -func (j *JWS) GetKeyAuthorization(token string) (string, error) { - var publicKey crypto.PublicKey - switch k := j.privKey.(type) { - case *ecdsa.PrivateKey: - publicKey = k.Public() - case *rsa.PrivateKey: - publicKey = k.Public() - } - - // Generate the Key Authorization for the challenge - jwk := &jose.JSONWebKey{Key: publicKey} - - thumbBytes, err := jwk.Thumbprint(crypto.SHA256) - if err != nil { - return "", err - } - - // unpad the base64URL - keyThumb := base64.RawURLEncoding.EncodeToString(thumbBytes) - - return token + "." + keyThumb, nil -} diff --git a/vendor/github.com/go-acme/lego/acme/api/internal/sender/sender.go b/vendor/github.com/go-acme/lego/acme/api/internal/sender/sender.go deleted file mode 100644 index e08f2ffba..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/internal/sender/sender.go +++ /dev/null @@ -1,146 +0,0 @@ -package sender - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "runtime" - "strings" - - "github.com/go-acme/lego/acme" -) - -type RequestOption func(*http.Request) error - -func contentType(ct string) RequestOption { - return func(req *http.Request) error { - req.Header.Set("Content-Type", ct) - return nil - } -} - -type Doer struct { - httpClient *http.Client - userAgent string -} - -// NewDoer Creates a new Doer. -func NewDoer(client *http.Client, userAgent string) *Doer { - return &Doer{ - httpClient: client, - userAgent: userAgent, - } -} - -// Get performs a GET request with a proper User-Agent string. -// If "response" is not provided, callers should close resp.Body when done reading from it. -func (d *Doer) Get(url string, response interface{}) (*http.Response, error) { - req, err := d.newRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - - return d.do(req, response) -} - -// Head performs a HEAD request with a proper User-Agent string. -// The response body (resp.Body) is already closed when this function returns. -func (d *Doer) Head(url string) (*http.Response, error) { - req, err := d.newRequest(http.MethodHead, url, nil) - if err != nil { - return nil, err - } - - return d.do(req, nil) -} - -// Post performs a POST request with a proper User-Agent string. -// If "response" is not provided, callers should close resp.Body when done reading from it. -func (d *Doer) Post(url string, body io.Reader, bodyType string, response interface{}) (*http.Response, error) { - req, err := d.newRequest(http.MethodPost, url, body, contentType(bodyType)) - if err != nil { - return nil, err - } - - return d.do(req, response) -} - -func (d *Doer) newRequest(method, uri string, body io.Reader, opts ...RequestOption) (*http.Request, error) { - req, err := http.NewRequest(method, uri, body) - if err != nil { - return nil, fmt.Errorf("failed to create request: %v", err) - } - - req.Header.Set("User-Agent", d.formatUserAgent()) - - for _, opt := range opts { - err = opt(req) - if err != nil { - return nil, fmt.Errorf("failed to create request: %v", err) - } - } - - return req, nil -} - -func (d *Doer) do(req *http.Request, response interface{}) (*http.Response, error) { - resp, err := d.httpClient.Do(req) - if err != nil { - return nil, err - } - - if err = checkError(req, resp); err != nil { - return resp, err - } - - if response != nil { - raw, err := ioutil.ReadAll(resp.Body) - if err != nil { - return resp, err - } - - defer resp.Body.Close() - - err = json.Unmarshal(raw, response) - if err != nil { - return resp, fmt.Errorf("failed to unmarshal %q to type %T: %v", raw, response, err) - } - } - - return resp, nil -} - -// formatUserAgent builds and returns the User-Agent string to use in requests. -func (d *Doer) formatUserAgent() string { - ua := fmt.Sprintf("%s %s (%s; %s; %s)", d.userAgent, ourUserAgent, ourUserAgentComment, runtime.GOOS, runtime.GOARCH) - return strings.TrimSpace(ua) -} - -func checkError(req *http.Request, resp *http.Response) error { - if resp.StatusCode >= http.StatusBadRequest { - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("%d :: %s :: %s :: %v", resp.StatusCode, req.Method, req.URL, err) - } - - var errorDetails *acme.ProblemDetails - err = json.Unmarshal(body, &errorDetails) - if err != nil { - return fmt.Errorf("%d ::%s :: %s :: %v :: %s", resp.StatusCode, req.Method, req.URL, err, string(body)) - } - - errorDetails.Method = req.Method - errorDetails.URL = req.URL.String() - - // Check for errors we handle specifically - if errorDetails.HTTPStatus == http.StatusBadRequest && errorDetails.Type == acme.BadNonceErr { - return &acme.NonceError{ProblemDetails: errorDetails} - } - - return errorDetails - } - return nil -} diff --git a/vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go b/vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go deleted file mode 100644 index f01719547..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/internal/sender/useragent.go +++ /dev/null @@ -1,14 +0,0 @@ -package sender - -// CODE GENERATED AUTOMATICALLY -// THIS FILE MUST NOT BE EDITED BY HAND - -const ( - // ourUserAgent is the User-Agent of this underlying library package. - ourUserAgent = "xenolf-acme/2.5.0" - - // ourUserAgentComment is part of the UA comment linked to the version status of this underlying library package. - // values: detach|release - // NOTE: Update this with each tagged release. - ourUserAgentComment = "release" -) diff --git a/vendor/github.com/go-acme/lego/acme/api/order.go b/vendor/github.com/go-acme/lego/acme/api/order.go deleted file mode 100644 index 11240b4cd..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/order.go +++ /dev/null @@ -1,65 +0,0 @@ -package api - -import ( - "encoding/base64" - "errors" - - "github.com/go-acme/lego/acme" -) - -type OrderService service - -// New Creates a new order. -func (o *OrderService) New(domains []string) (acme.ExtendedOrder, error) { - var identifiers []acme.Identifier - for _, domain := range domains { - identifiers = append(identifiers, acme.Identifier{Type: "dns", Value: domain}) - } - - orderReq := acme.Order{Identifiers: identifiers} - - var order acme.Order - resp, err := o.core.post(o.core.GetDirectory().NewOrderURL, orderReq, &order) - if err != nil { - return acme.ExtendedOrder{}, err - } - - return acme.ExtendedOrder{ - Location: resp.Header.Get("Location"), - Order: order, - }, nil -} - -// Get Gets an order. -func (o *OrderService) Get(orderURL string) (acme.Order, error) { - if len(orderURL) == 0 { - return acme.Order{}, errors.New("order[get]: empty URL") - } - - var order acme.Order - _, err := o.core.postAsGet(orderURL, &order) - if err != nil { - return acme.Order{}, err - } - - return order, nil -} - -// UpdateForCSR Updates an order for a CSR. -func (o *OrderService) UpdateForCSR(orderURL string, csr []byte) (acme.Order, error) { - csrMsg := acme.CSRMessage{ - Csr: base64.RawURLEncoding.EncodeToString(csr), - } - - var order acme.Order - _, err := o.core.post(orderURL, csrMsg, &order) - if err != nil { - return acme.Order{}, err - } - - if order.Status == acme.StatusInvalid { - return acme.Order{}, order.Error - } - - return order, nil -} diff --git a/vendor/github.com/go-acme/lego/acme/api/service.go b/vendor/github.com/go-acme/lego/acme/api/service.go deleted file mode 100644 index ff043bc7b..000000000 --- a/vendor/github.com/go-acme/lego/acme/api/service.go +++ /dev/null @@ -1,45 +0,0 @@ -package api - -import ( - "net/http" - "regexp" -) - -type service struct { - core *Core -} - -// getLink get a rel into the Link header -func getLink(header http.Header, rel string) string { - var linkExpr = regexp.MustCompile(`<(.+?)>;\s*rel="(.+?)"`) - - for _, link := range header["Link"] { - for _, m := range linkExpr.FindAllStringSubmatch(link, -1) { - if len(m) != 3 { - continue - } - if m[2] == rel { - return m[1] - } - } - } - return "" -} - -// getLocation get the value of the header Location -func getLocation(resp *http.Response) string { - if resp == nil { - return "" - } - - return resp.Header.Get("Location") -} - -// getRetryAfter get the value of the header Retry-After -func getRetryAfter(resp *http.Response) string { - if resp == nil { - return "" - } - - return resp.Header.Get("Retry-After") -} diff --git a/vendor/github.com/go-acme/lego/acme/commons.go b/vendor/github.com/go-acme/lego/acme/commons.go deleted file mode 100644 index c4493696a..000000000 --- a/vendor/github.com/go-acme/lego/acme/commons.go +++ /dev/null @@ -1,284 +0,0 @@ -// Package acme contains all objects related the ACME endpoints. -// https://tools.ietf.org/html/draft-ietf-acme-acme-16 -package acme - -import ( - "encoding/json" - "time" -) - -// Challenge statuses -// https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.6 -const ( - StatusPending = "pending" - StatusInvalid = "invalid" - StatusValid = "valid" - StatusProcessing = "processing" - StatusDeactivated = "deactivated" - StatusExpired = "expired" - StatusRevoked = "revoked" -) - -// Directory the ACME directory object. -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.1 -type Directory struct { - NewNonceURL string `json:"newNonce"` - NewAccountURL string `json:"newAccount"` - NewOrderURL string `json:"newOrder"` - NewAuthzURL string `json:"newAuthz"` - RevokeCertURL string `json:"revokeCert"` - KeyChangeURL string `json:"keyChange"` - Meta Meta `json:"meta"` -} - -// Meta the ACME meta object (related to Directory). -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.1 -type Meta struct { - // termsOfService (optional, string): - // A URL identifying the current terms of service. - TermsOfService string `json:"termsOfService"` - - // website (optional, string): - // An HTTP or HTTPS URL locating a website providing more information about the ACME server. - Website string `json:"website"` - - // caaIdentities (optional, array of string): - // The hostnames that the ACME server recognizes as referring to itself - // for the purposes of CAA record validation as defined in [RFC6844]. - // Each string MUST represent the same sequence of ASCII code points - // that the server will expect to see as the "Issuer Domain Name" in a CAA issue or issuewild property tag. - // This allows clients to determine the correct issuer domain name to use when configuring CAA records. - CaaIdentities []string `json:"caaIdentities"` - - // externalAccountRequired (optional, boolean): - // If this field is present and set to "true", - // then the CA requires that all new- account requests include an "externalAccountBinding" field - // associating the new account with an external account. - ExternalAccountRequired bool `json:"externalAccountRequired"` -} - -// ExtendedAccount a extended Account. -type ExtendedAccount struct { - Account - // Contains the value of the response header `Location` - Location string `json:"-"` -} - -// Account the ACME account Object. -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.2 -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.3 -type Account struct { - // status (required, string): - // The status of this account. - // Possible values are: "valid", "deactivated", and "revoked". - // The value "deactivated" should be used to indicate client-initiated deactivation - // whereas "revoked" should be used to indicate server- initiated deactivation. (See Section 7.1.6) - Status string `json:"status,omitempty"` - - // contact (optional, array of string): - // An array of URLs that the server can use to contact the client for issues related to this account. - // For example, the server may wish to notify the client about server-initiated revocation or certificate expiration. - // For information on supported URL schemes, see Section 7.3 - Contact []string `json:"contact,omitempty"` - - // termsOfServiceAgreed (optional, boolean): - // Including this field in a new-account request, - // with a value of true, indicates the client's agreement with the terms of service. - // This field is not updateable by the client. - TermsOfServiceAgreed bool `json:"termsOfServiceAgreed,omitempty"` - - // orders (required, string): - // A URL from which a list of orders submitted by this account can be fetched via a POST-as-GET request, - // as described in Section 7.1.2.1. - Orders string `json:"orders,omitempty"` - - // onlyReturnExisting (optional, boolean): - // If this field is present with the value "true", - // then the server MUST NOT create a new account if one does not already exist. - // This allows a client to look up an account URL based on an account key (see Section 7.3.1). - OnlyReturnExisting bool `json:"onlyReturnExisting,omitempty"` - - // externalAccountBinding (optional, object): - // An optional field for binding the new account with an existing non-ACME account (see Section 7.3.4). - ExternalAccountBinding json.RawMessage `json:"externalAccountBinding,omitempty"` -} - -// ExtendedOrder a extended Order. -type ExtendedOrder struct { - Order - // The order URL, contains the value of the response header `Location` - Location string `json:"-"` -} - -// Order the ACME order Object. -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.3 -type Order struct { - // status (required, string): - // The status of this order. - // Possible values are: "pending", "ready", "processing", "valid", and "invalid". - Status string `json:"status,omitempty"` - - // expires (optional, string): - // The timestamp after which the server will consider this order invalid, - // encoded in the format specified in RFC 3339 [RFC3339]. - // This field is REQUIRED for objects with "pending" or "valid" in the status field. - Expires string `json:"expires,omitempty"` - - // identifiers (required, array of object): - // An array of identifier objects that the order pertains to. - Identifiers []Identifier `json:"identifiers"` - - // notBefore (optional, string): - // The requested value of the notBefore field in the certificate, - // in the date format defined in [RFC3339]. - NotBefore string `json:"notBefore,omitempty"` - - // notAfter (optional, string): - // The requested value of the notAfter field in the certificate, - // in the date format defined in [RFC3339]. - NotAfter string `json:"notAfter,omitempty"` - - // error (optional, object): - // The error that occurred while processing the order, if any. - // This field is structured as a problem document [RFC7807]. - Error *ProblemDetails `json:"error,omitempty"` - - // authorizations (required, array of string): - // For pending orders, - // the authorizations that the client needs to complete before the requested certificate can be issued (see Section 7.5), - // including unexpired authorizations that the client has completed in the past for identifiers specified in the order. - // The authorizations required are dictated by server policy - // and there may not be a 1:1 relationship between the order identifiers and the authorizations required. - // For final orders (in the "valid" or "invalid" state), the authorizations that were completed. - // Each entry is a URL from which an authorization can be fetched with a POST-as-GET request. - Authorizations []string `json:"authorizations,omitempty"` - - // finalize (required, string): - // A URL that a CSR must be POSTed to once all of the order's authorizations are satisfied to finalize the order. - // The result of a successful finalization will be the population of the certificate URL for the order. - Finalize string `json:"finalize,omitempty"` - - // certificate (optional, string): - // A URL for the certificate that has been issued in response to this order - Certificate string `json:"certificate,omitempty"` -} - -// Authorization the ACME authorization object. -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.4 -type Authorization struct { - // status (required, string): - // The status of this authorization. - // Possible values are: "pending", "valid", "invalid", "deactivated", "expired", and "revoked". - Status string `json:"status"` - - // expires (optional, string): - // The timestamp after which the server will consider this authorization invalid, - // encoded in the format specified in RFC 3339 [RFC3339]. - // This field is REQUIRED for objects with "valid" in the "status" field. - Expires time.Time `json:"expires,omitempty"` - - // identifier (required, object): - // The identifier that the account is authorized to represent - Identifier Identifier `json:"identifier,omitempty"` - - // challenges (required, array of objects): - // For pending authorizations, the challenges that the client can fulfill in order to prove possession of the identifier. - // For valid authorizations, the challenge that was validated. - // For invalid authorizations, the challenge that was attempted and failed. - // Each array entry is an object with parameters required to validate the challenge. - // A client should attempt to fulfill one of these challenges, - // and a server should consider any one of the challenges sufficient to make the authorization valid. - Challenges []Challenge `json:"challenges,omitempty"` - - // wildcard (optional, boolean): - // For authorizations created as a result of a newOrder request containing a DNS identifier - // with a value that contained a wildcard prefix this field MUST be present, and true. - Wildcard bool `json:"wildcard,omitempty"` -} - -// ExtendedChallenge a extended Challenge. -type ExtendedChallenge struct { - Challenge - // Contains the value of the response header `Retry-After` - RetryAfter string `json:"-"` - // Contains the value of the response header `Link` rel="up" - AuthorizationURL string `json:"-"` -} - -// Challenge the ACME challenge object. -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.5 -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-8 -type Challenge struct { - // type (required, string): - // The type of challenge encoded in the object. - Type string `json:"type"` - - // url (required, string): - // The URL to which a response can be posted. - URL string `json:"url"` - - // status (required, string): - // The status of this challenge. Possible values are: "pending", "processing", "valid", and "invalid". - Status string `json:"status"` - - // validated (optional, string): - // The time at which the server validated this challenge, - // encoded in the format specified in RFC 3339 [RFC3339]. - // This field is REQUIRED if the "status" field is "valid". - Validated time.Time `json:"validated,omitempty"` - - // error (optional, object): - // Error that occurred while the server was validating the challenge, if any, - // structured as a problem document [RFC7807]. - // Multiple errors can be indicated by using subproblems Section 6.7.1. - // A challenge object with an error MUST have status equal to "invalid". - Error *ProblemDetails `json:"error,omitempty"` - - // token (required, string): - // A random value that uniquely identifies the challenge. - // This value MUST have at least 128 bits of entropy. - // It MUST NOT contain any characters outside the base64url alphabet, - // and MUST NOT include base64 padding characters ("="). - // See [RFC4086] for additional information on randomness requirements. - // https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-8.3 - // https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-8.4 - Token string `json:"token"` - - // https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-8.1 - KeyAuthorization string `json:"keyAuthorization"` -} - -// Identifier the ACME identifier object. -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-9.7.7 -type Identifier struct { - Type string `json:"type"` - Value string `json:"value"` -} - -// CSRMessage Certificate Signing Request -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.4 -type CSRMessage struct { - // csr (required, string): - // A CSR encoding the parameters for the certificate being requested [RFC2986]. - // The CSR is sent in the base64url-encoded version of the DER format. - // (Note: Because this field uses base64url, and does not include headers, it is different from PEM.). - Csr string `json:"csr"` -} - -// RevokeCertMessage a certificate revocation message -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.6 -// - https://tools.ietf.org/html/rfc5280#section-5.3.1 -type RevokeCertMessage struct { - // certificate (required, string): - // The certificate to be revoked, in the base64url-encoded version of the DER format. - // (Note: Because this field uses base64url, and does not include headers, it is different from PEM.) - Certificate string `json:"certificate"` - - // reason (optional, int): - // One of the revocation reasonCodes defined in Section 5.3.1 of [RFC5280] to be used when generating OCSP responses and CRLs. - // If this field is not set the server SHOULD omit the reasonCode CRL entry extension when generating OCSP responses and CRLs. - // The server MAY disallow a subset of reasonCodes from being used by the user. - // If a request contains a disallowed reasonCode the server MUST reject it with the error type "urn:ietf:params:acme:error:badRevocationReason". - // The problem document detail SHOULD indicate which reasonCodes are allowed. - Reason *uint `json:"reason,omitempty"` -} diff --git a/vendor/github.com/go-acme/lego/acme/errors.go b/vendor/github.com/go-acme/lego/acme/errors.go deleted file mode 100644 index 1658fe8d1..000000000 --- a/vendor/github.com/go-acme/lego/acme/errors.go +++ /dev/null @@ -1,58 +0,0 @@ -package acme - -import ( - "fmt" -) - -// Errors types -const ( - errNS = "urn:ietf:params:acme:error:" - BadNonceErr = errNS + "badNonce" -) - -// ProblemDetails the problem details object -// - https://tools.ietf.org/html/rfc7807#section-3.1 -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.3.3 -type ProblemDetails struct { - Type string `json:"type,omitempty"` - Detail string `json:"detail,omitempty"` - HTTPStatus int `json:"status,omitempty"` - Instance string `json:"instance,omitempty"` - SubProblems []SubProblem `json:"subproblems,omitempty"` - - // additional values to have a better error message (Not defined by the RFC) - Method string `json:"method,omitempty"` - URL string `json:"url,omitempty"` -} - -// SubProblem a "subproblems" -// - https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-6.7.1 -type SubProblem struct { - Type string `json:"type,omitempty"` - Detail string `json:"detail,omitempty"` - Identifier Identifier `json:"identifier,omitempty"` -} - -func (p ProblemDetails) Error() string { - msg := fmt.Sprintf("acme: error: %d", p.HTTPStatus) - if len(p.Method) != 0 || len(p.URL) != 0 { - msg += fmt.Sprintf(" :: %s :: %s", p.Method, p.URL) - } - msg += fmt.Sprintf(" :: %s :: %s", p.Type, p.Detail) - - for _, sub := range p.SubProblems { - msg += fmt.Sprintf(", problem: %q :: %s", sub.Type, sub.Detail) - } - - if len(p.Instance) == 0 { - msg += ", url: " + p.Instance - } - - return msg -} - -// NonceError represents the error which is returned -// if the nonce sent by the client was not accepted by the server. -type NonceError struct { - *ProblemDetails -} diff --git a/vendor/github.com/go-acme/lego/certcrypto/crypto.go b/vendor/github.com/go-acme/lego/certcrypto/crypto.go deleted file mode 100644 index c9d0c1098..000000000 --- a/vendor/github.com/go-acme/lego/certcrypto/crypto.go +++ /dev/null @@ -1,256 +0,0 @@ -package certcrypto - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "errors" - "fmt" - "math/big" - "time" - - "golang.org/x/crypto/ocsp" -) - -// Constants for all key types we support. -const ( - EC256 = KeyType("P256") - EC384 = KeyType("P384") - RSA2048 = KeyType("2048") - RSA4096 = KeyType("4096") - RSA8192 = KeyType("8192") -) - -const ( - // OCSPGood means that the certificate is valid. - OCSPGood = ocsp.Good - // OCSPRevoked means that the certificate has been deliberately revoked. - OCSPRevoked = ocsp.Revoked - // OCSPUnknown means that the OCSP responder doesn't know about the certificate. - OCSPUnknown = ocsp.Unknown - // OCSPServerFailed means that the OCSP responder failed to process the request. - OCSPServerFailed = ocsp.ServerFailed -) - -// Constants for OCSP must staple -var ( - tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24} - ocspMustStapleFeature = []byte{0x30, 0x03, 0x02, 0x01, 0x05} -) - -// KeyType represents the key algo as well as the key size or curve to use. -type KeyType string - -type DERCertificateBytes []byte - -// ParsePEMBundle parses a certificate bundle from top to bottom and returns -// a slice of x509 certificates. This function will error if no certificates are found. -func ParsePEMBundle(bundle []byte) ([]*x509.Certificate, error) { - var certificates []*x509.Certificate - var certDERBlock *pem.Block - - for { - certDERBlock, bundle = pem.Decode(bundle) - if certDERBlock == nil { - break - } - - if certDERBlock.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(certDERBlock.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } - } - - if len(certificates) == 0 { - return nil, errors.New("no certificates were found while parsing the bundle") - } - - return certificates, nil -} - -func ParsePEMPrivateKey(key []byte) (crypto.PrivateKey, error) { - keyBlock, _ := pem.Decode(key) - - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - default: - return nil, errors.New("unknown PEM header value") - } -} - -func GeneratePrivateKey(keyType KeyType) (crypto.PrivateKey, error) { - switch keyType { - case EC256: - return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case EC384: - return ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case RSA2048: - return rsa.GenerateKey(rand.Reader, 2048) - case RSA4096: - return rsa.GenerateKey(rand.Reader, 4096) - case RSA8192: - return rsa.GenerateKey(rand.Reader, 8192) - } - - return nil, fmt.Errorf("invalid KeyType: %s", keyType) -} - -func GenerateCSR(privateKey crypto.PrivateKey, domain string, san []string, mustStaple bool) ([]byte, error) { - template := x509.CertificateRequest{ - Subject: pkix.Name{CommonName: domain}, - DNSNames: san, - } - - if mustStaple { - template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{ - Id: tlsFeatureExtensionOID, - Value: ocspMustStapleFeature, - }) - } - - return x509.CreateCertificateRequest(rand.Reader, &template, privateKey) -} - -func PEMEncode(data interface{}) []byte { - return pem.EncodeToMemory(PEMBlock(data)) -} - -func PEMBlock(data interface{}) *pem.Block { - var pemBlock *pem.Block - switch key := data.(type) { - case *ecdsa.PrivateKey: - keyBytes, _ := x509.MarshalECPrivateKey(key) - pemBlock = &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes} - case *rsa.PrivateKey: - pemBlock = &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)} - case *x509.CertificateRequest: - pemBlock = &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: key.Raw} - case DERCertificateBytes: - pemBlock = &pem.Block{Type: "CERTIFICATE", Bytes: []byte(data.(DERCertificateBytes))} - } - - return pemBlock -} - -func pemDecode(data []byte) (*pem.Block, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, fmt.Errorf("PEM decode did not yield a valid block. Is the certificate in the right format?") - } - - return pemBlock, nil -} - -func PemDecodeTox509CSR(pem []byte) (*x509.CertificateRequest, error) { - pemBlock, err := pemDecode(pem) - if pemBlock == nil { - return nil, err - } - - if pemBlock.Type != "CERTIFICATE REQUEST" { - return nil, fmt.Errorf("PEM block is not a certificate request") - } - - return x509.ParseCertificateRequest(pemBlock.Bytes) -} - -// ParsePEMCertificate returns Certificate from a PEM encoded certificate. -// The certificate has to be PEM encoded. Any other encodings like DER will fail. -func ParsePEMCertificate(cert []byte) (*x509.Certificate, error) { - pemBlock, err := pemDecode(cert) - if pemBlock == nil { - return nil, err - } - - // from a DER encoded certificate - return x509.ParseCertificate(pemBlock.Bytes) -} - -func ExtractDomains(cert *x509.Certificate) []string { - domains := []string{cert.Subject.CommonName} - - // Check for SAN certificate - for _, sanDomain := range cert.DNSNames { - if sanDomain == cert.Subject.CommonName { - continue - } - domains = append(domains, sanDomain) - } - - return domains -} - -func ExtractDomainsCSR(csr *x509.CertificateRequest) []string { - domains := []string{csr.Subject.CommonName} - - // loop over the SubjectAltName DNS names - for _, sanName := range csr.DNSNames { - if containsSAN(domains, sanName) { - // Duplicate; skip this name - continue - } - - // Name is unique - domains = append(domains, sanName) - } - - return domains -} - -func containsSAN(domains []string, sanName string) bool { - for _, existingName := range domains { - if existingName == sanName { - return true - } - } - return false -} - -func GeneratePemCert(privateKey *rsa.PrivateKey, domain string, extensions []pkix.Extension) ([]byte, error) { - derBytes, err := generateDerCert(privateKey, time.Time{}, domain, extensions) - if err != nil { - return nil, err - } - - return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil -} - -func generateDerCert(privateKey *rsa.PrivateKey, expiration time.Time, domain string, extensions []pkix.Extension) ([]byte, error) { - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - if expiration.IsZero() { - expiration = time.Now().Add(365) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: "ACME Challenge TEMP", - }, - NotBefore: time.Now(), - NotAfter: expiration, - - KeyUsage: x509.KeyUsageKeyEncipherment, - BasicConstraintsValid: true, - DNSNames: []string{domain}, - ExtraExtensions: extensions, - } - - return x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) -} diff --git a/vendor/github.com/go-acme/lego/certificate/authorization.go b/vendor/github.com/go-acme/lego/certificate/authorization.go deleted file mode 100644 index 00062504b..000000000 --- a/vendor/github.com/go-acme/lego/certificate/authorization.go +++ /dev/null @@ -1,69 +0,0 @@ -package certificate - -import ( - "time" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/log" -) - -const ( - // overallRequestLimit is the overall number of request per second - // limited on the "new-reg", "new-authz" and "new-cert" endpoints. - // From the documentation the limitation is 20 requests per second, - // but using 20 as value doesn't work but 18 do - overallRequestLimit = 18 -) - -func (c *Certifier) getAuthorizations(order acme.ExtendedOrder) ([]acme.Authorization, error) { - resc, errc := make(chan acme.Authorization), make(chan domainError) - - delay := time.Second / overallRequestLimit - - for _, authzURL := range order.Authorizations { - time.Sleep(delay) - - go func(authzURL string) { - authz, err := c.core.Authorizations.Get(authzURL) - if err != nil { - errc <- domainError{Domain: authz.Identifier.Value, Error: err} - return - } - - resc <- authz - }(authzURL) - } - - var responses []acme.Authorization - failures := make(obtainError) - for i := 0; i < len(order.Authorizations); i++ { - select { - case res := <-resc: - responses = append(responses, res) - case err := <-errc: - failures[err.Domain] = err.Error - } - } - - for i, auth := range order.Authorizations { - log.Infof("[%s] AuthURL: %s", order.Identifiers[i].Value, auth) - } - - close(resc) - close(errc) - - // be careful to not return an empty failures map; - // even if empty, they become non-nil error values - if len(failures) > 0 { - return responses, failures - } - return responses, nil -} - -func (c *Certifier) deactivateAuthorizations(order acme.ExtendedOrder) { - for _, auth := range order.Authorizations { - if err := c.core.Authorizations.Deactivate(auth); err != nil { - log.Infof("Unable to deactivated authorizations: %s", auth) - } - } -} diff --git a/vendor/github.com/go-acme/lego/certificate/certificates.go b/vendor/github.com/go-acme/lego/certificate/certificates.go deleted file mode 100644 index b0327d5c1..000000000 --- a/vendor/github.com/go-acme/lego/certificate/certificates.go +++ /dev/null @@ -1,495 +0,0 @@ -package certificate - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/acme/api" - "github.com/go-acme/lego/certcrypto" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/log" - "github.com/go-acme/lego/platform/wait" - "golang.org/x/crypto/ocsp" - "golang.org/x/net/idna" -) - -// maxBodySize is the maximum size of body that we will read. -const maxBodySize = 1024 * 1024 - -// Resource represents a CA issued certificate. -// PrivateKey, Certificate and IssuerCertificate are all -// already PEM encoded and can be directly written to disk. -// Certificate may be a certificate bundle, -// depending on the options supplied to create it. -type Resource struct { - Domain string `json:"domain"` - CertURL string `json:"certUrl"` - CertStableURL string `json:"certStableUrl"` - PrivateKey []byte `json:"-"` - Certificate []byte `json:"-"` - IssuerCertificate []byte `json:"-"` - CSR []byte `json:"-"` -} - -// ObtainRequest The request to obtain certificate. -// -// The first domain in domains is used for the CommonName field of the certificate, -// all other domains are added using the Subject Alternate Names extension. -// -// A new private key is generated for every invocation of the function Obtain. -// If you do not want that you can supply your own private key in the privateKey parameter. -// If this parameter is non-nil it will be used instead of generating a new one. -// -// If bundle is true, the []byte contains both the issuer certificate and your issued certificate as a bundle. -type ObtainRequest struct { - Domains []string - Bundle bool - PrivateKey crypto.PrivateKey - MustStaple bool -} - -type resolver interface { - Solve(authorizations []acme.Authorization) error -} - -type CertifierOptions struct { - KeyType certcrypto.KeyType - Timeout time.Duration -} - -// Certifier A service to obtain/renew/revoke certificates. -type Certifier struct { - core *api.Core - resolver resolver - options CertifierOptions -} - -// NewCertifier creates a Certifier. -func NewCertifier(core *api.Core, resolver resolver, options CertifierOptions) *Certifier { - return &Certifier{ - core: core, - resolver: resolver, - options: options, - } -} - -// Obtain tries to obtain a single certificate using all domains passed into it. -// -// This function will never return a partial certificate. -// If one domain in the list fails, the whole certificate will fail. -func (c *Certifier) Obtain(request ObtainRequest) (*Resource, error) { - if len(request.Domains) == 0 { - return nil, errors.New("no domains to obtain a certificate for") - } - - domains := sanitizeDomain(request.Domains) - - if request.Bundle { - log.Infof("[%s] acme: Obtaining bundled SAN certificate", strings.Join(domains, ", ")) - } else { - log.Infof("[%s] acme: Obtaining SAN certificate", strings.Join(domains, ", ")) - } - - order, err := c.core.Orders.New(domains) - if err != nil { - return nil, err - } - - authz, err := c.getAuthorizations(order) - if err != nil { - // If any challenge fails, return. Do not generate partial SAN certificates. - c.deactivateAuthorizations(order) - return nil, err - } - - err = c.resolver.Solve(authz) - if err != nil { - // If any challenge fails, return. Do not generate partial SAN certificates. - c.deactivateAuthorizations(order) - return nil, err - } - - log.Infof("[%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - failures := make(obtainError) - cert, err := c.getForOrder(domains, order, request.Bundle, request.PrivateKey, request.MustStaple) - if err != nil { - for _, auth := range authz { - failures[challenge.GetTargetedDomain(auth)] = err - } - } - - // Do not return an empty failures map, because - // it would still be a non-nil error value - if len(failures) > 0 { - return cert, failures - } - return cert, nil -} - -// ObtainForCSR tries to obtain a certificate matching the CSR passed into it. -// -// The domains are inferred from the CommonName and SubjectAltNames, if any. -// The private key for this CSR is not required. -// -// If bundle is true, the []byte contains both the issuer certificate and your issued certificate as a bundle. -// -// This function will never return a partial certificate. -// If one domain in the list fails, the whole certificate will fail. -func (c *Certifier) ObtainForCSR(csr x509.CertificateRequest, bundle bool) (*Resource, error) { - // figure out what domains it concerns - // start with the common name - domains := certcrypto.ExtractDomainsCSR(&csr) - - if bundle { - log.Infof("[%s] acme: Obtaining bundled SAN certificate given a CSR", strings.Join(domains, ", ")) - } else { - log.Infof("[%s] acme: Obtaining SAN certificate given a CSR", strings.Join(domains, ", ")) - } - - order, err := c.core.Orders.New(domains) - if err != nil { - return nil, err - } - - authz, err := c.getAuthorizations(order) - if err != nil { - // If any challenge fails, return. Do not generate partial SAN certificates. - c.deactivateAuthorizations(order) - return nil, err - } - - err = c.resolver.Solve(authz) - if err != nil { - // If any challenge fails, return. Do not generate partial SAN certificates. - c.deactivateAuthorizations(order) - return nil, err - } - - log.Infof("[%s] acme: Validations succeeded; requesting certificates", strings.Join(domains, ", ")) - - failures := make(obtainError) - cert, err := c.getForCSR(domains, order, bundle, csr.Raw, nil) - if err != nil { - for _, auth := range authz { - failures[challenge.GetTargetedDomain(auth)] = err - } - } - - if cert != nil { - // Add the CSR to the certificate so that it can be used for renewals. - cert.CSR = certcrypto.PEMEncode(&csr) - } - - // Do not return an empty failures map, - // because it would still be a non-nil error value - if len(failures) > 0 { - return cert, failures - } - return cert, nil -} - -func (c *Certifier) getForOrder(domains []string, order acme.ExtendedOrder, bundle bool, privateKey crypto.PrivateKey, mustStaple bool) (*Resource, error) { - if privateKey == nil { - var err error - privateKey, err = certcrypto.GeneratePrivateKey(c.options.KeyType) - if err != nil { - return nil, err - } - } - - // Determine certificate name(s) based on the authorization resources - commonName := domains[0] - - // ACME draft Section 7.4 "Applying for Certificate Issuance" - // https://tools.ietf.org/html/draft-ietf-acme-acme-12#section-7.4 - // says: - // Clients SHOULD NOT make any assumptions about the sort order of - // "identifiers" or "authorizations" elements in the returned order - // object. - san := []string{commonName} - for _, auth := range order.Identifiers { - if auth.Value != commonName { - san = append(san, auth.Value) - } - } - - // TODO: should the CSR be customizable? - csr, err := certcrypto.GenerateCSR(privateKey, commonName, san, mustStaple) - if err != nil { - return nil, err - } - - return c.getForCSR(domains, order, bundle, csr, certcrypto.PEMEncode(privateKey)) -} - -func (c *Certifier) getForCSR(domains []string, order acme.ExtendedOrder, bundle bool, csr []byte, privateKeyPem []byte) (*Resource, error) { - respOrder, err := c.core.Orders.UpdateForCSR(order.Finalize, csr) - if err != nil { - return nil, err - } - - commonName := domains[0] - certRes := &Resource{ - Domain: commonName, - CertURL: respOrder.Certificate, - PrivateKey: privateKeyPem, - } - - if respOrder.Status == acme.StatusValid { - // if the certificate is available right away, short cut! - ok, errR := c.checkResponse(respOrder, certRes, bundle) - if errR != nil { - return nil, errR - } - - if ok { - return certRes, nil - } - } - - timeout := c.options.Timeout - if c.options.Timeout <= 0 { - timeout = 30 * time.Second - } - - err = wait.For("certificate", timeout, timeout/60, func() (bool, error) { - ord, errW := c.core.Orders.Get(order.Location) - if errW != nil { - return false, errW - } - - done, errW := c.checkResponse(ord, certRes, bundle) - if errW != nil { - return false, errW - } - - return done, nil - }) - - return certRes, err -} - -// checkResponse checks to see if the certificate is ready and a link is contained in the response. -// -// If so, loads it into certRes and returns true. -// If the cert is not yet ready, it returns false. -// -// The certRes input should already have the Domain (common name) field populated. -// -// If bundle is true, the certificate will be bundled with the issuer's cert. -func (c *Certifier) checkResponse(order acme.Order, certRes *Resource, bundle bool) (bool, error) { - valid, err := checkOrderStatus(order) - if err != nil || !valid { - return valid, err - } - - cert, issuer, err := c.core.Certificates.Get(order.Certificate, bundle) - if err != nil { - return false, err - } - - log.Infof("[%s] Server responded with a certificate.", certRes.Domain) - - certRes.IssuerCertificate = issuer - certRes.Certificate = cert - certRes.CertURL = order.Certificate - certRes.CertStableURL = order.Certificate - - return true, nil -} - -// Revoke takes a PEM encoded certificate or bundle and tries to revoke it at the CA. -func (c *Certifier) Revoke(cert []byte) error { - certificates, err := certcrypto.ParsePEMBundle(cert) - if err != nil { - return err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return fmt.Errorf("certificate bundle starts with a CA certificate") - } - - revokeMsg := acme.RevokeCertMessage{ - Certificate: base64.RawURLEncoding.EncodeToString(x509Cert.Raw), - } - - return c.core.Certificates.Revoke(revokeMsg) -} - -// Renew takes a Resource and tries to renew the certificate. -// -// If the renewal process succeeds, the new certificate will ge returned in a new CertResource. -// Please be aware that this function will return a new certificate in ANY case that is not an error. -// If the server does not provide us with a new cert on a GET request to the CertURL -// this function will start a new-cert flow where a new certificate gets generated. -// -// If bundle is true, the []byte contains both the issuer certificate and your issued certificate as a bundle. -// -// For private key reuse the PrivateKey property of the passed in Resource should be non-nil. -func (c *Certifier) Renew(certRes Resource, bundle, mustStaple bool) (*Resource, error) { - // Input certificate is PEM encoded. - // Decode it here as we may need the decoded cert later on in the renewal process. - // The input may be a bundle or a single certificate. - certificates, err := certcrypto.ParsePEMBundle(certRes.Certificate) - if err != nil { - return nil, err - } - - x509Cert := certificates[0] - if x509Cert.IsCA { - return nil, fmt.Errorf("[%s] Certificate bundle starts with a CA certificate", certRes.Domain) - } - - // This is just meant to be informal for the user. - timeLeft := x509Cert.NotAfter.Sub(time.Now().UTC()) - log.Infof("[%s] acme: Trying renewal with %d hours remaining", certRes.Domain, int(timeLeft.Hours())) - - // We always need to request a new certificate to renew. - // Start by checking to see if the certificate was based off a CSR, - // and use that if it's defined. - if len(certRes.CSR) > 0 { - csr, errP := certcrypto.PemDecodeTox509CSR(certRes.CSR) - if errP != nil { - return nil, errP - } - - return c.ObtainForCSR(*csr, bundle) - } - - var privateKey crypto.PrivateKey - if certRes.PrivateKey != nil { - privateKey, err = certcrypto.ParsePEMPrivateKey(certRes.PrivateKey) - if err != nil { - return nil, err - } - } - - query := ObtainRequest{ - Domains: certcrypto.ExtractDomains(x509Cert), - Bundle: bundle, - PrivateKey: privateKey, - MustStaple: mustStaple, - } - return c.Obtain(query) -} - -// GetOCSP takes a PEM encoded cert or cert bundle returning the raw OCSP response, -// the parsed response, and an error, if any. -// -// The returned []byte can be passed directly into the OCSPStaple property of a tls.Certificate. -// If the bundle only contains the issued certificate, -// this function will try to get the issuer certificate from the IssuingCertificateURL in the certificate. -// -// If the []byte and/or ocsp.Response return values are nil, the OCSP status may be assumed OCSPUnknown. -func (c *Certifier) GetOCSP(bundle []byte) ([]byte, *ocsp.Response, error) { - certificates, err := certcrypto.ParsePEMBundle(bundle) - if err != nil { - return nil, nil, err - } - - // We expect the certificate slice to be ordered downwards the chain. - // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, - // which should always be the first two certificates. - // If there's no OCSP server listed in the leaf cert, there's nothing to do. - // And if we have only one certificate so far, we need to get the issuer cert. - - issuedCert := certificates[0] - - if len(issuedCert.OCSPServer) == 0 { - return nil, nil, errors.New("no OCSP server specified in cert") - } - - if len(certificates) == 1 { - // TODO: build fallback. If this fails, check the remaining array entries. - if len(issuedCert.IssuingCertificateURL) == 0 { - return nil, nil, errors.New("no issuing certificate URL") - } - - resp, errC := c.core.HTTPClient.Get(issuedCert.IssuingCertificateURL[0]) - if errC != nil { - return nil, nil, errC - } - defer resp.Body.Close() - - issuerBytes, errC := ioutil.ReadAll(http.MaxBytesReader(nil, resp.Body, maxBodySize)) - if errC != nil { - return nil, nil, errC - } - - issuerCert, errC := x509.ParseCertificate(issuerBytes) - if errC != nil { - return nil, nil, errC - } - - // Insert it into the slice on position 0 - // We want it ordered right SRV CRT -> CA - certificates = append(certificates, issuerCert) - } - - issuerCert := certificates[1] - - // Finally kick off the OCSP request. - ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) - if err != nil { - return nil, nil, err - } - - resp, err := c.core.HTTPClient.Post(issuedCert.OCSPServer[0], "application/ocsp-request", bytes.NewReader(ocspReq)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - ocspResBytes, err := ioutil.ReadAll(http.MaxBytesReader(nil, resp.Body, maxBodySize)) - if err != nil { - return nil, nil, err - } - - ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) - if err != nil { - return nil, nil, err - } - - return ocspResBytes, ocspRes, nil -} - -func checkOrderStatus(order acme.Order) (bool, error) { - switch order.Status { - case acme.StatusValid: - return true, nil - case acme.StatusInvalid: - return false, order.Error - default: - return false, nil - } -} - -// https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-7.1.4 -// The domain name MUST be encoded -// in the form in which it would appear in a certificate. That is, it -// MUST be encoded according to the rules in Section 7 of [RFC5280]. -// -// https://tools.ietf.org/html/rfc5280#section-7 -func sanitizeDomain(domains []string) []string { - var sanitizedDomains []string - for _, domain := range domains { - sanitizedDomain, err := idna.ToASCII(domain) - if err != nil { - log.Infof("skip domain %q: unable to sanitize (punnycode): %v", domain, err) - } else { - sanitizedDomains = append(sanitizedDomains, sanitizedDomain) - } - } - return sanitizedDomains -} diff --git a/vendor/github.com/go-acme/lego/certificate/errors.go b/vendor/github.com/go-acme/lego/certificate/errors.go deleted file mode 100644 index 0fec7c16a..000000000 --- a/vendor/github.com/go-acme/lego/certificate/errors.go +++ /dev/null @@ -1,30 +0,0 @@ -package certificate - -import ( - "bytes" - "fmt" - "sort" -) - -// obtainError is returned when there are specific errors available per domain. -type obtainError map[string]error - -func (e obtainError) Error() string { - buffer := bytes.NewBufferString("acme: Error -> One or more domains had a problem:\n") - - var domains []string - for domain := range e { - domains = append(domains, domain) - } - sort.Strings(domains) - - for _, domain := range domains { - buffer.WriteString(fmt.Sprintf("[%s] %s\n", domain, e[domain])) - } - return buffer.String() -} - -type domainError struct { - Domain string - Error error -} diff --git a/vendor/github.com/go-acme/lego/challenge/challenges.go b/vendor/github.com/go-acme/lego/challenge/challenges.go deleted file mode 100644 index b3281402f..000000000 --- a/vendor/github.com/go-acme/lego/challenge/challenges.go +++ /dev/null @@ -1,44 +0,0 @@ -package challenge - -import ( - "fmt" - - "github.com/go-acme/lego/acme" -) - -// Type is a string that identifies a particular challenge type and version of ACME challenge. -type Type string - -const ( - // HTTP01 is the "http-01" ACME challenge https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-8.3 - // Note: ChallengePath returns the URL path to fulfill this challenge - HTTP01 = Type("http-01") - - // DNS01 is the "dns-01" ACME challenge https://tools.ietf.org/html/draft-ietf-acme-acme-16#section-8.4 - // Note: GetRecord returns a DNS record which will fulfill this challenge - DNS01 = Type("dns-01") - - // TLSALPN01 is the "tls-alpn-01" ACME challenge https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05 - TLSALPN01 = Type("tls-alpn-01") -) - -func (t Type) String() string { - return string(t) -} - -func FindChallenge(chlgType Type, authz acme.Authorization) (acme.Challenge, error) { - for _, chlg := range authz.Challenges { - if chlg.Type == string(chlgType) { - return chlg, nil - } - } - - return acme.Challenge{}, fmt.Errorf("[%s] acme: unable to find challenge %s", GetTargetedDomain(authz), chlgType) -} - -func GetTargetedDomain(authz acme.Authorization) string { - if authz.Wildcard { - return "*." + authz.Identifier.Value - } - return authz.Identifier.Value -} diff --git a/vendor/github.com/go-acme/lego/challenge/dns01/cname.go b/vendor/github.com/go-acme/lego/challenge/dns01/cname.go deleted file mode 100644 index 619c84763..000000000 --- a/vendor/github.com/go-acme/lego/challenge/dns01/cname.go +++ /dev/null @@ -1,16 +0,0 @@ -package dns01 - -import "github.com/miekg/dns" - -// Update FQDN with CNAME if any -func updateDomainWithCName(r *dns.Msg, fqdn string) string { - for _, rr := range r.Answer { - if cn, ok := rr.(*dns.CNAME); ok { - if cn.Hdr.Name == fqdn { - return cn.Target - } - } - } - - return fqdn -} diff --git a/vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge.go b/vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge.go deleted file mode 100644 index 9500305f8..000000000 --- a/vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge.go +++ /dev/null @@ -1,188 +0,0 @@ -package dns01 - -import ( - "crypto/sha256" - "encoding/base64" - "fmt" - "os" - "strconv" - "time" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/acme/api" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/log" - "github.com/go-acme/lego/platform/wait" - "github.com/miekg/dns" -) - -const ( - // DefaultPropagationTimeout default propagation timeout - DefaultPropagationTimeout = 60 * time.Second - - // DefaultPollingInterval default polling interval - DefaultPollingInterval = 2 * time.Second - - // DefaultTTL default TTL - DefaultTTL = 120 -) - -type ValidateFunc func(core *api.Core, domain string, chlng acme.Challenge) error - -type ChallengeOption func(*Challenge) error - -// CondOption Conditional challenge option. -func CondOption(condition bool, opt ChallengeOption) ChallengeOption { - if !condition { - // NoOp options - return func(*Challenge) error { - return nil - } - } - return opt -} - -// Challenge implements the dns-01 challenge -type Challenge struct { - core *api.Core - validate ValidateFunc - provider challenge.Provider - preCheck preCheck - dnsTimeout time.Duration -} - -func NewChallenge(core *api.Core, validate ValidateFunc, provider challenge.Provider, opts ...ChallengeOption) *Challenge { - chlg := &Challenge{ - core: core, - validate: validate, - provider: provider, - preCheck: newPreCheck(), - dnsTimeout: 10 * time.Second, - } - - for _, opt := range opts { - err := opt(chlg) - if err != nil { - log.Infof("challenge option error: %v", err) - } - } - - return chlg -} - -// PreSolve just submits the txt record to the dns provider. -// It does not validate record propagation, or do anything at all with the acme server. -func (c *Challenge) PreSolve(authz acme.Authorization) error { - domain := challenge.GetTargetedDomain(authz) - log.Infof("[%s] acme: Preparing to solve DNS-01", domain) - - chlng, err := challenge.FindChallenge(challenge.DNS01, authz) - if err != nil { - return err - } - - if c.provider == nil { - return fmt.Errorf("[%s] acme: no DNS Provider configured", domain) - } - - // Generate the Key Authorization for the challenge - keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) - if err != nil { - return err - } - - err = c.provider.Present(authz.Identifier.Value, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] acme: error presenting token: %s", domain, err) - } - - return nil -} - -func (c *Challenge) Solve(authz acme.Authorization) error { - domain := challenge.GetTargetedDomain(authz) - log.Infof("[%s] acme: Trying to solve DNS-01", domain) - - chlng, err := challenge.FindChallenge(challenge.DNS01, authz) - if err != nil { - return err - } - - // Generate the Key Authorization for the challenge - keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) - if err != nil { - return err - } - - fqdn, value := GetRecord(authz.Identifier.Value, keyAuth) - - var timeout, interval time.Duration - switch provider := c.provider.(type) { - case challenge.ProviderTimeout: - timeout, interval = provider.Timeout() - default: - timeout, interval = DefaultPropagationTimeout, DefaultPollingInterval - } - - log.Infof("[%s] acme: Checking DNS record propagation using %+v", domain, recursiveNameservers) - - err = wait.For("propagation", timeout, interval, func() (bool, error) { - stop, errP := c.preCheck.call(domain, fqdn, value) - if !stop || errP != nil { - log.Infof("[%s] acme: Waiting for DNS record propagation.", domain) - } - return stop, errP - }) - if err != nil { - return err - } - - chlng.KeyAuthorization = keyAuth - return c.validate(c.core, domain, chlng) -} - -// CleanUp cleans the challenge. -func (c *Challenge) CleanUp(authz acme.Authorization) error { - log.Infof("[%s] acme: Cleaning DNS-01 challenge", challenge.GetTargetedDomain(authz)) - - chlng, err := challenge.FindChallenge(challenge.DNS01, authz) - if err != nil { - return err - } - - keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) - if err != nil { - return err - } - - return c.provider.CleanUp(authz.Identifier.Value, chlng.Token, keyAuth) -} - -func (c *Challenge) Sequential() (bool, time.Duration) { - if p, ok := c.provider.(sequential); ok { - return ok, p.Sequential() - } - return false, 0 -} - -type sequential interface { - Sequential() time.Duration -} - -// GetRecord returns a DNS record which will fulfill the `dns-01` challenge -func GetRecord(domain, keyAuth string) (fqdn string, value string) { - keyAuthShaBytes := sha256.Sum256([]byte(keyAuth)) - // base64URL encoding without padding - value = base64.RawURLEncoding.EncodeToString(keyAuthShaBytes[:sha256.Size]) - fqdn = fmt.Sprintf("_acme-challenge.%s.", domain) - - if ok, _ := strconv.ParseBool(os.Getenv("LEGO_EXPERIMENTAL_CNAME_SUPPORT")); ok { - r, err := dnsQuery(fqdn, dns.TypeCNAME, recursiveNameservers, true) - // Check if the domain has CNAME then return that - if err == nil && r.Rcode == dns.RcodeSuccess { - fqdn = updateDomainWithCName(r, fqdn) - } - } - - return -} diff --git a/vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge_manual.go b/vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge_manual.go deleted file mode 100644 index 490108dd1..000000000 --- a/vendor/github.com/go-acme/lego/challenge/dns01/dns_challenge_manual.go +++ /dev/null @@ -1,52 +0,0 @@ -package dns01 - -import ( - "bufio" - "fmt" - "os" -) - -const ( - dnsTemplate = `%s %d IN TXT "%s"` -) - -// DNSProviderManual is an implementation of the ChallengeProvider interface -type DNSProviderManual struct{} - -// NewDNSProviderManual returns a DNSProviderManual instance. -func NewDNSProviderManual() (*DNSProviderManual, error) { - return &DNSProviderManual{}, nil -} - -// Present prints instructions for manually creating the TXT record -func (*DNSProviderManual) Present(domain, token, keyAuth string) error { - fqdn, value := GetRecord(domain, keyAuth) - - authZone, err := FindZoneByFqdn(fqdn) - if err != nil { - return err - } - - fmt.Printf("lego: Please create the following TXT record in your %s zone:\n", authZone) - fmt.Printf(dnsTemplate+"\n", fqdn, DefaultTTL, value) - fmt.Printf("lego: Press 'Enter' when you are done\n") - - _, err = bufio.NewReader(os.Stdin).ReadBytes('\n') - - return err -} - -// CleanUp prints instructions for manually removing the TXT record -func (*DNSProviderManual) CleanUp(domain, token, keyAuth string) error { - fqdn, _ := GetRecord(domain, keyAuth) - - authZone, err := FindZoneByFqdn(fqdn) - if err != nil { - return err - } - - fmt.Printf("lego: You can now remove this TXT record from your %s zone:\n", authZone) - fmt.Printf(dnsTemplate+"\n", fqdn, DefaultTTL, "...") - - return nil -} diff --git a/vendor/github.com/go-acme/lego/challenge/dns01/fqdn.go b/vendor/github.com/go-acme/lego/challenge/dns01/fqdn.go deleted file mode 100644 index c238c8cf5..000000000 --- a/vendor/github.com/go-acme/lego/challenge/dns01/fqdn.go +++ /dev/null @@ -1,19 +0,0 @@ -package dns01 - -// ToFqdn converts the name into a fqdn appending a trailing dot. -func ToFqdn(name string) string { - n := len(name) - if n == 0 || name[n-1] == '.' { - return name - } - return name + "." -} - -// UnFqdn converts the fqdn into a name removing the trailing dot. -func UnFqdn(name string) string { - n := len(name) - if n != 0 && name[n-1] == '.' { - return name[:n-1] - } - return name -} diff --git a/vendor/github.com/go-acme/lego/challenge/dns01/nameserver.go b/vendor/github.com/go-acme/lego/challenge/dns01/nameserver.go deleted file mode 100644 index 03f1a8d12..000000000 --- a/vendor/github.com/go-acme/lego/challenge/dns01/nameserver.go +++ /dev/null @@ -1,232 +0,0 @@ -package dns01 - -import ( - "fmt" - "net" - "strings" - "sync" - "time" - - "github.com/miekg/dns" -) - -const defaultResolvConf = "/etc/resolv.conf" - -// dnsTimeout is used to override the default DNS timeout of 10 seconds. -var dnsTimeout = 10 * time.Second - -var ( - fqdnToZone = map[string]string{} - muFqdnToZone sync.Mutex -) - -var defaultNameservers = []string{ - "google-public-dns-a.google.com:53", - "google-public-dns-b.google.com:53", -} - -// recursiveNameservers are used to pre-check DNS propagation -var recursiveNameservers = getNameservers(defaultResolvConf, defaultNameservers) - -// ClearFqdnCache clears the cache of fqdn to zone mappings. Primarily used in testing. -func ClearFqdnCache() { - muFqdnToZone.Lock() - fqdnToZone = map[string]string{} - muFqdnToZone.Unlock() -} - -func AddDNSTimeout(timeout time.Duration) ChallengeOption { - return func(_ *Challenge) error { - dnsTimeout = timeout - return nil - } -} - -func AddRecursiveNameservers(nameservers []string) ChallengeOption { - return func(_ *Challenge) error { - recursiveNameservers = ParseNameservers(nameservers) - return nil - } -} - -// getNameservers attempts to get systems nameservers before falling back to the defaults -func getNameservers(path string, defaults []string) []string { - config, err := dns.ClientConfigFromFile(path) - if err != nil || len(config.Servers) == 0 { - return defaults - } - - return ParseNameservers(config.Servers) -} - -func ParseNameservers(servers []string) []string { - var resolvers []string - for _, resolver := range servers { - // ensure all servers have a port number - if _, _, err := net.SplitHostPort(resolver); err != nil { - resolvers = append(resolvers, net.JoinHostPort(resolver, "53")) - } else { - resolvers = append(resolvers, resolver) - } - } - return resolvers -} - -// lookupNameservers returns the authoritative nameservers for the given fqdn. -func lookupNameservers(fqdn string) ([]string, error) { - var authoritativeNss []string - - zone, err := FindZoneByFqdn(fqdn) - if err != nil { - return nil, fmt.Errorf("could not determine the zone: %v", err) - } - - r, err := dnsQuery(zone, dns.TypeNS, recursiveNameservers, true) - if err != nil { - return nil, err - } - - for _, rr := range r.Answer { - if ns, ok := rr.(*dns.NS); ok { - authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns)) - } - } - - if len(authoritativeNss) > 0 { - return authoritativeNss, nil - } - return nil, fmt.Errorf("could not determine authoritative nameservers") -} - -// FindZoneByFqdn determines the zone apex for the given fqdn -// by recursing up the domain labels until the nameserver returns a SOA record in the answer section. -func FindZoneByFqdn(fqdn string) (string, error) { - return FindZoneByFqdnCustom(fqdn, recursiveNameservers) -} - -// FindZoneByFqdnCustom determines the zone apex for the given fqdn -// by recursing up the domain labels until the nameserver returns a SOA record in the answer section. -func FindZoneByFqdnCustom(fqdn string, nameservers []string) (string, error) { - muFqdnToZone.Lock() - defer muFqdnToZone.Unlock() - - // Do we have it cached? - if zone, ok := fqdnToZone[fqdn]; ok { - return zone, nil - } - - var err error - var in *dns.Msg - - labelIndexes := dns.Split(fqdn) - for _, index := range labelIndexes { - domain := fqdn[index:] - - in, err = dnsQuery(domain, dns.TypeSOA, nameservers, true) - if err != nil { - continue - } - - if in == nil { - continue - } - - switch in.Rcode { - case dns.RcodeSuccess: - // Check if we got a SOA RR in the answer section - - if len(in.Answer) == 0 { - continue - } - - // CNAME records cannot/should not exist at the root of a zone. - // So we skip a domain when a CNAME is found. - if dnsMsgContainsCNAME(in) { - continue - } - - for _, ans := range in.Answer { - if soa, ok := ans.(*dns.SOA); ok { - zone := soa.Hdr.Name - fqdnToZone[fqdn] = zone - return zone, nil - } - } - case dns.RcodeNameError: - // NXDOMAIN - default: - // Any response code other than NOERROR and NXDOMAIN is treated as error - return "", fmt.Errorf("unexpected response code '%s' for %s", dns.RcodeToString[in.Rcode], domain) - } - } - - return "", fmt.Errorf("could not find the start of authority for %s%s", fqdn, formatDNSError(in, err)) -} - -// dnsMsgContainsCNAME checks for a CNAME answer in msg -func dnsMsgContainsCNAME(msg *dns.Msg) bool { - for _, ans := range msg.Answer { - if _, ok := ans.(*dns.CNAME); ok { - return true - } - } - return false -} - -func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (*dns.Msg, error) { - m := createDNSMsg(fqdn, rtype, recursive) - - var in *dns.Msg - var err error - - for _, ns := range nameservers { - in, err = sendDNSQuery(m, ns) - if err == nil && len(in.Answer) > 0 { - break - } - } - return in, err -} - -func createDNSMsg(fqdn string, rtype uint16, recursive bool) *dns.Msg { - m := new(dns.Msg) - m.SetQuestion(fqdn, rtype) - m.SetEdns0(4096, false) - - if !recursive { - m.RecursionDesired = false - } - - return m -} - -func sendDNSQuery(m *dns.Msg, ns string) (*dns.Msg, error) { - udp := &dns.Client{Net: "udp", Timeout: dnsTimeout} - in, _, err := udp.Exchange(m, ns) - - if in != nil && in.Truncated { - tcp := &dns.Client{Net: "tcp", Timeout: dnsTimeout} - // If the TCP request succeeds, the err will reset to nil - in, _, err = tcp.Exchange(m, ns) - } - - return in, err -} - -func formatDNSError(msg *dns.Msg, err error) string { - var parts []string - - if msg != nil { - parts = append(parts, dns.RcodeToString[msg.Rcode]) - } - - if err != nil { - parts = append(parts, fmt.Sprintf("%v", err)) - } - - if len(parts) > 0 { - return ": " + strings.Join(parts, " ") - } - - return "" -} diff --git a/vendor/github.com/go-acme/lego/challenge/dns01/precheck.go b/vendor/github.com/go-acme/lego/challenge/dns01/precheck.go deleted file mode 100644 index 00e09854d..000000000 --- a/vendor/github.com/go-acme/lego/challenge/dns01/precheck.go +++ /dev/null @@ -1,127 +0,0 @@ -package dns01 - -import ( - "errors" - "fmt" - "net" - "strings" - - "github.com/miekg/dns" -) - -// PreCheckFunc checks DNS propagation before notifying ACME that the DNS challenge is ready. -type PreCheckFunc func(fqdn, value string) (bool, error) - -// WrapPreCheckFunc wraps a PreCheckFunc in order to do extra operations before or after -// the main check, put it in a loop, etc. -type WrapPreCheckFunc func(domain, fqdn, value string, check PreCheckFunc) (bool, error) - -// WrapPreCheck Allow to define checks before notifying ACME that the DNS challenge is ready. -func WrapPreCheck(wrap WrapPreCheckFunc) ChallengeOption { - return func(chlg *Challenge) error { - chlg.preCheck.checkFunc = wrap - return nil - } -} - -// AddPreCheck Allow to define checks before notifying ACME that the DNS challenge is ready. -// Deprecated: use WrapPreCheck instead. -func AddPreCheck(preCheck PreCheckFunc) ChallengeOption { - // Prevent race condition - check := preCheck - return func(chlg *Challenge) error { - chlg.preCheck.checkFunc = func(_, fqdn, value string, _ PreCheckFunc) (bool, error) { - if check == nil { - return false, errors.New("invalid preCheck: preCheck is nil") - } - return check(fqdn, value) - } - return nil - } -} - -func DisableCompletePropagationRequirement() ChallengeOption { - return func(chlg *Challenge) error { - chlg.preCheck.requireCompletePropagation = false - return nil - } -} - -type preCheck struct { - // checks DNS propagation before notifying ACME that the DNS challenge is ready. - checkFunc WrapPreCheckFunc - // require the TXT record to be propagated to all authoritative name servers - requireCompletePropagation bool -} - -func newPreCheck() preCheck { - return preCheck{ - requireCompletePropagation: true, - } -} - -func (p preCheck) call(domain, fqdn, value string) (bool, error) { - if p.checkFunc == nil { - return p.checkDNSPropagation(fqdn, value) - } - - return p.checkFunc(domain, fqdn, value, p.checkDNSPropagation) -} - -// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers. -func (p preCheck) checkDNSPropagation(fqdn, value string) (bool, error) { - // Initial attempt to resolve at the recursive NS - r, err := dnsQuery(fqdn, dns.TypeTXT, recursiveNameservers, true) - if err != nil { - return false, err - } - - if !p.requireCompletePropagation { - return true, nil - } - - if r.Rcode == dns.RcodeSuccess { - fqdn = updateDomainWithCName(r, fqdn) - } - - authoritativeNss, err := lookupNameservers(fqdn) - if err != nil { - return false, err - } - - return checkAuthoritativeNss(fqdn, value, authoritativeNss) -} - -// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record. -func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) { - for _, ns := range nameservers { - r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false) - if err != nil { - return false, err - } - - if r.Rcode != dns.RcodeSuccess { - return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn) - } - - var records []string - - var found bool - for _, rr := range r.Answer { - if txt, ok := rr.(*dns.TXT); ok { - record := strings.Join(txt.Txt, "") - records = append(records, record) - if record == value { - found = true - break - } - } - } - - if !found { - return false, fmt.Errorf("NS %s did not return the expected TXT record [fqdn: %s, value: %s]: %s", ns, fqdn, value, strings.Join(records, " ,")) - } - } - - return true, nil -} diff --git a/vendor/github.com/go-acme/lego/challenge/http01/http_challenge.go b/vendor/github.com/go-acme/lego/challenge/http01/http_challenge.go deleted file mode 100644 index c1cf3d45e..000000000 --- a/vendor/github.com/go-acme/lego/challenge/http01/http_challenge.go +++ /dev/null @@ -1,65 +0,0 @@ -package http01 - -import ( - "fmt" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/acme/api" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/log" -) - -type ValidateFunc func(core *api.Core, domain string, chlng acme.Challenge) error - -// ChallengePath returns the URL path for the `http-01` challenge -func ChallengePath(token string) string { - return "/.well-known/acme-challenge/" + token -} - -type Challenge struct { - core *api.Core - validate ValidateFunc - provider challenge.Provider -} - -func NewChallenge(core *api.Core, validate ValidateFunc, provider challenge.Provider) *Challenge { - return &Challenge{ - core: core, - validate: validate, - provider: provider, - } -} - -func (c *Challenge) SetProvider(provider challenge.Provider) { - c.provider = provider -} - -func (c *Challenge) Solve(authz acme.Authorization) error { - domain := challenge.GetTargetedDomain(authz) - log.Infof("[%s] acme: Trying to solve HTTP-01", domain) - - chlng, err := challenge.FindChallenge(challenge.HTTP01, authz) - if err != nil { - return err - } - - // Generate the Key Authorization for the challenge - keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) - if err != nil { - return err - } - - err = c.provider.Present(authz.Identifier.Value, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] acme: error presenting token: %v", domain, err) - } - defer func() { - err := c.provider.CleanUp(authz.Identifier.Value, chlng.Token, keyAuth) - if err != nil { - log.Warnf("[%s] acme: error cleaning up: %v", domain, err) - } - }() - - chlng.KeyAuthorization = keyAuth - return c.validate(c.core, domain, chlng) -} diff --git a/vendor/github.com/go-acme/lego/challenge/http01/http_challenge_server.go b/vendor/github.com/go-acme/lego/challenge/http01/http_challenge_server.go deleted file mode 100644 index 9f4429111..000000000 --- a/vendor/github.com/go-acme/lego/challenge/http01/http_challenge_server.go +++ /dev/null @@ -1,96 +0,0 @@ -package http01 - -import ( - "fmt" - "net" - "net/http" - "strings" - - "github.com/go-acme/lego/log" -) - -// ProviderServer implements ChallengeProvider for `http-01` challenge -// It may be instantiated without using the NewProviderServer function if -// you want only to use the default values. -type ProviderServer struct { - iface string - port string - done chan bool - listener net.Listener -} - -// NewProviderServer creates a new ProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 80 respectively. -func NewProviderServer(iface, port string) *ProviderServer { - return &ProviderServer{iface: iface, port: port} -} - -// Present starts a web server and makes the token available at `ChallengePath(token)` for web requests. -func (s *ProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - s.port = "80" - } - - var err error - s.listener, err = net.Listen("tcp", s.GetAddress()) - if err != nil { - return fmt.Errorf("could not start HTTP server for challenge -> %v", err) - } - - s.done = make(chan bool) - go s.serve(domain, token, keyAuth) - return nil -} - -func (s *ProviderServer) GetAddress() string { - return net.JoinHostPort(s.iface, s.port) -} - -// CleanUp closes the HTTP server and removes the token from `ChallengePath(token)` -func (s *ProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - s.listener.Close() - <-s.done - return nil -} - -func (s *ProviderServer) serve(domain, token, keyAuth string) { - path := ChallengePath(token) - - // The handler validates the HOST header and request type. - // For validation it then writes the token the server returned with the challenge - mux := http.NewServeMux() - mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { - if strings.HasPrefix(r.Host, domain) && r.Method == http.MethodGet { - w.Header().Add("Content-Type", "text/plain") - _, err := w.Write([]byte(keyAuth)) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - log.Infof("[%s] Served key authentication", domain) - } else { - log.Warnf("Received request for domain %s with method %s but the domain did not match any challenge. Please ensure your are passing the HOST header properly.", r.Host, r.Method) - _, err := w.Write([]byte("TEST")) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - } - }) - - httpServer := &http.Server{Handler: mux} - - // Once httpServer is shut down - // we don't want any lingering connections, so disable KeepAlives. - httpServer.SetKeepAlivesEnabled(false) - - err := httpServer.Serve(s.listener) - if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - log.Println(err) - } - s.done <- true -} diff --git a/vendor/github.com/go-acme/lego/challenge/provider.go b/vendor/github.com/go-acme/lego/challenge/provider.go deleted file mode 100644 index d7cc213f7..000000000 --- a/vendor/github.com/go-acme/lego/challenge/provider.go +++ /dev/null @@ -1,28 +0,0 @@ -package challenge - -import "time" - -// Provider enables implementing a custom challenge -// provider. Present presents the solution to a challenge available to -// be solved. CleanUp will be called by the challenge if Present ends -// in a non-error state. -type Provider interface { - Present(domain, token, keyAuth string) error - CleanUp(domain, token, keyAuth string) error -} - -// ProviderTimeout allows for implementing a -// Provider where an unusually long timeout is required when -// waiting for an ACME challenge to be satisfied, such as when -// checking for DNS record propagation. If an implementor of a -// Provider provides a Timeout method, then the return values -// of the Timeout method will be used when appropriate by the acme -// package. The interval value is the time between checks. -// -// The default values used for timeout and interval are 60 seconds and -// 2 seconds respectively. These are used when no Timeout method is -// defined for the Provider. -type ProviderTimeout interface { - Provider - Timeout() (timeout, interval time.Duration) -} diff --git a/vendor/github.com/go-acme/lego/challenge/resolver/errors.go b/vendor/github.com/go-acme/lego/challenge/resolver/errors.go deleted file mode 100644 index 9d6091432..000000000 --- a/vendor/github.com/go-acme/lego/challenge/resolver/errors.go +++ /dev/null @@ -1,25 +0,0 @@ -package resolver - -import ( - "bytes" - "fmt" - "sort" -) - -// obtainError is returned when there are specific errors available per domain. -type obtainError map[string]error - -func (e obtainError) Error() string { - buffer := bytes.NewBufferString("acme: Error -> One or more domains had a problem:\n") - - var domains []string - for domain := range e { - domains = append(domains, domain) - } - sort.Strings(domains) - - for _, domain := range domains { - buffer.WriteString(fmt.Sprintf("[%s] %s\n", domain, e[domain])) - } - return buffer.String() -} diff --git a/vendor/github.com/go-acme/lego/challenge/resolver/prober.go b/vendor/github.com/go-acme/lego/challenge/resolver/prober.go deleted file mode 100644 index b787caf13..000000000 --- a/vendor/github.com/go-acme/lego/challenge/resolver/prober.go +++ /dev/null @@ -1,173 +0,0 @@ -package resolver - -import ( - "fmt" - "time" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/log" -) - -// Interface for all challenge solvers to implement. -type solver interface { - Solve(authorization acme.Authorization) error -} - -// Interface for challenges like dns, where we can set a record in advance for ALL challenges. -// This saves quite a bit of time vs creating the records and solving them serially. -type preSolver interface { - PreSolve(authorization acme.Authorization) error -} - -// Interface for challenges like dns, where we can solve all the challenges before to delete them. -type cleanup interface { - CleanUp(authorization acme.Authorization) error -} - -type sequential interface { - Sequential() (bool, time.Duration) -} - -// an authz with the solver we have chosen and the index of the challenge associated with it -type selectedAuthSolver struct { - authz acme.Authorization - solver solver -} - -type Prober struct { - solverManager *SolverManager -} - -func NewProber(solverManager *SolverManager) *Prober { - return &Prober{ - solverManager: solverManager, - } -} - -// Solve Looks through the challenge combinations to find a solvable match. -// Then solves the challenges in series and returns. -func (p *Prober) Solve(authorizations []acme.Authorization) error { - failures := make(obtainError) - - var authSolvers []*selectedAuthSolver - var authSolversSequential []*selectedAuthSolver - - // Loop through the resources, basically through the domains. - // First pass just selects a solver for each authz. - for _, authz := range authorizations { - domain := challenge.GetTargetedDomain(authz) - if authz.Status == acme.StatusValid { - // Boulder might recycle recent validated authz (see issue #267) - log.Infof("[%s] acme: authorization already valid; skipping challenge", domain) - continue - } - - if solvr := p.solverManager.chooseSolver(authz); solvr != nil { - authSolver := &selectedAuthSolver{authz: authz, solver: solvr} - - switch s := solvr.(type) { - case sequential: - if ok, _ := s.Sequential(); ok { - authSolversSequential = append(authSolversSequential, authSolver) - } else { - authSolvers = append(authSolvers, authSolver) - } - default: - authSolvers = append(authSolvers, authSolver) - } - } else { - failures[domain] = fmt.Errorf("[%s] acme: could not determine solvers", domain) - } - } - - parallelSolve(authSolvers, failures) - - sequentialSolve(authSolversSequential, failures) - - // Be careful not to return an empty failures map, - // for even an empty obtainError is a non-nil error value - if len(failures) > 0 { - return failures - } - return nil -} - -func sequentialSolve(authSolvers []*selectedAuthSolver, failures obtainError) { - for i, authSolver := range authSolvers { - // Submit the challenge - domain := challenge.GetTargetedDomain(authSolver.authz) - - if solvr, ok := authSolver.solver.(preSolver); ok { - err := solvr.PreSolve(authSolver.authz) - if err != nil { - failures[domain] = err - cleanUp(authSolver.solver, authSolver.authz) - continue - } - } - - // Solve challenge - err := authSolver.solver.Solve(authSolver.authz) - if err != nil { - failures[domain] = err - cleanUp(authSolver.solver, authSolver.authz) - continue - } - - // Clean challenge - cleanUp(authSolver.solver, authSolver.authz) - - if len(authSolvers)-1 > i { - solvr := authSolver.solver.(sequential) - _, interval := solvr.Sequential() - log.Infof("sequence: wait for %s", interval) - time.Sleep(interval) - } - } -} - -func parallelSolve(authSolvers []*selectedAuthSolver, failures obtainError) { - // For all valid preSolvers, first submit the challenges so they have max time to propagate - for _, authSolver := range authSolvers { - authz := authSolver.authz - if solvr, ok := authSolver.solver.(preSolver); ok { - err := solvr.PreSolve(authz) - if err != nil { - failures[challenge.GetTargetedDomain(authz)] = err - } - } - } - - defer func() { - // Clean all created TXT records - for _, authSolver := range authSolvers { - cleanUp(authSolver.solver, authSolver.authz) - } - }() - - // Finally solve all challenges for real - for _, authSolver := range authSolvers { - authz := authSolver.authz - domain := challenge.GetTargetedDomain(authz) - if failures[domain] != nil { - // already failed in previous loop - continue - } - - err := authSolver.solver.Solve(authz) - if err != nil { - failures[domain] = err - } - } -} - -func cleanUp(solvr solver, authz acme.Authorization) { - if solvr, ok := solvr.(cleanup); ok { - domain := challenge.GetTargetedDomain(authz) - err := solvr.CleanUp(authz) - if err != nil { - log.Warnf("[%s] acme: error cleaning up: %v ", domain, err) - } - } -} diff --git a/vendor/github.com/go-acme/lego/challenge/resolver/solver_manager.go b/vendor/github.com/go-acme/lego/challenge/resolver/solver_manager.go deleted file mode 100644 index de6f02db8..000000000 --- a/vendor/github.com/go-acme/lego/challenge/resolver/solver_manager.go +++ /dev/null @@ -1,169 +0,0 @@ -package resolver - -import ( - "context" - "errors" - "fmt" - "sort" - "strconv" - "time" - - "github.com/cenkalti/backoff" - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/acme/api" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/challenge/dns01" - "github.com/go-acme/lego/challenge/http01" - "github.com/go-acme/lego/challenge/tlsalpn01" - "github.com/go-acme/lego/log" -) - -type byType []acme.Challenge - -func (a byType) Len() int { return len(a) } -func (a byType) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byType) Less(i, j int) bool { return a[i].Type > a[j].Type } - -type SolverManager struct { - core *api.Core - solvers map[challenge.Type]solver -} - -func NewSolversManager(core *api.Core) *SolverManager { - return &SolverManager{ - solvers: map[challenge.Type]solver{}, - core: core, - } -} - -// SetHTTP01Provider specifies a custom provider p that can solve the given HTTP-01 challenge. -func (c *SolverManager) SetHTTP01Provider(p challenge.Provider) error { - c.solvers[challenge.HTTP01] = http01.NewChallenge(c.core, validate, p) - return nil -} - -// SetTLSALPN01Provider specifies a custom provider p that can solve the given TLS-ALPN-01 challenge. -func (c *SolverManager) SetTLSALPN01Provider(p challenge.Provider) error { - c.solvers[challenge.TLSALPN01] = tlsalpn01.NewChallenge(c.core, validate, p) - return nil -} - -// SetDNS01Provider specifies a custom provider p that can solve the given DNS-01 challenge. -func (c *SolverManager) SetDNS01Provider(p challenge.Provider, opts ...dns01.ChallengeOption) error { - c.solvers[challenge.DNS01] = dns01.NewChallenge(c.core, validate, p, opts...) - return nil -} - -// Remove Remove a challenge type from the available solvers. -func (c *SolverManager) Remove(chlgType challenge.Type) { - delete(c.solvers, chlgType) -} - -// Checks all challenges from the server in order and returns the first matching solver. -func (c *SolverManager) chooseSolver(authz acme.Authorization) solver { - // Allow to have a deterministic challenge order - sort.Sort(byType(authz.Challenges)) - - domain := challenge.GetTargetedDomain(authz) - for _, chlg := range authz.Challenges { - if solvr, ok := c.solvers[challenge.Type(chlg.Type)]; ok { - log.Infof("[%s] acme: use %s solver", domain, chlg.Type) - return solvr - } - log.Infof("[%s] acme: Could not find solver for: %s", domain, chlg.Type) - } - - return nil -} - -func validate(core *api.Core, domain string, chlg acme.Challenge) error { - chlng, err := core.Challenges.New(chlg.URL) - if err != nil { - return fmt.Errorf("failed to initiate challenge: %v", err) - } - - valid, err := checkChallengeStatus(chlng) - if err != nil { - return err - } - - if valid { - log.Infof("[%s] The server validated our request", domain) - return nil - } - - ra, err := strconv.Atoi(chlng.RetryAfter) - if err != nil { - // The ACME server MUST return a Retry-After. - // If it doesn't, we'll just poll hard. - // Boulder does not implement the ability to retry challenges or the Retry-After header. - // https://github.com/letsencrypt/boulder/blob/master/docs/acme-divergences.md#section-82 - ra = 5 - } - initialInterval := time.Duration(ra) * time.Second - - bo := backoff.NewExponentialBackOff() - bo.InitialInterval = initialInterval - bo.MaxInterval = 10 * initialInterval - bo.MaxElapsedTime = 100 * initialInterval - - ctx, cancel := context.WithCancel(context.Background()) - - // After the path is sent, the ACME server will access our server. - // Repeatedly check the server for an updated status on our request. - operation := func() error { - authz, err := core.Authorizations.Get(chlng.AuthorizationURL) - if err != nil { - cancel() - return err - } - - valid, err := checkAuthorizationStatus(authz) - if err != nil { - cancel() - return err - } - - if valid { - log.Infof("[%s] The server validated our request", domain) - return nil - } - - return errors.New("the server didn't respond to our request") - } - - return backoff.Retry(operation, backoff.WithContext(bo, ctx)) -} - -func checkChallengeStatus(chlng acme.ExtendedChallenge) (bool, error) { - switch chlng.Status { - case acme.StatusValid: - return true, nil - case acme.StatusPending, acme.StatusProcessing: - return false, nil - case acme.StatusInvalid: - return false, chlng.Error - default: - return false, errors.New("the server returned an unexpected state") - } -} - -func checkAuthorizationStatus(authz acme.Authorization) (bool, error) { - switch authz.Status { - case acme.StatusValid: - return true, nil - case acme.StatusPending, acme.StatusProcessing: - return false, nil - case acme.StatusDeactivated, acme.StatusExpired, acme.StatusRevoked: - return false, fmt.Errorf("the authorization state %s", authz.Status) - case acme.StatusInvalid: - for _, chlg := range authz.Challenges { - if chlg.Status == acme.StatusInvalid && chlg.Error != nil { - return false, chlg.Error - } - } - return false, fmt.Errorf("the authorization state %s", authz.Status) - default: - return false, errors.New("the server returned an unexpected state") - } -} diff --git a/vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge.go b/vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge.go deleted file mode 100644 index a3fa7ef46..000000000 --- a/vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge.go +++ /dev/null @@ -1,129 +0,0 @@ -package tlsalpn01 - -import ( - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "crypto/x509/pkix" - "encoding/asn1" - "fmt" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/acme/api" - "github.com/go-acme/lego/certcrypto" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/log" -) - -// idPeAcmeIdentifierV1 is the SMI Security for PKIX Certification Extension OID referencing the ACME extension. -// Reference: https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05#section-5.1 -var idPeAcmeIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} - -type ValidateFunc func(core *api.Core, domain string, chlng acme.Challenge) error - -type Challenge struct { - core *api.Core - validate ValidateFunc - provider challenge.Provider -} - -func NewChallenge(core *api.Core, validate ValidateFunc, provider challenge.Provider) *Challenge { - return &Challenge{ - core: core, - validate: validate, - provider: provider, - } -} - -func (c *Challenge) SetProvider(provider challenge.Provider) { - c.provider = provider -} - -// Solve manages the provider to validate and solve the challenge. -func (c *Challenge) Solve(authz acme.Authorization) error { - domain := authz.Identifier.Value - log.Infof("[%s] acme: Trying to solve TLS-ALPN-01", challenge.GetTargetedDomain(authz)) - - chlng, err := challenge.FindChallenge(challenge.TLSALPN01, authz) - if err != nil { - return err - } - - // Generate the Key Authorization for the challenge - keyAuth, err := c.core.GetKeyAuthorization(chlng.Token) - if err != nil { - return err - } - - err = c.provider.Present(domain, chlng.Token, keyAuth) - if err != nil { - return fmt.Errorf("[%s] acme: error presenting token: %v", challenge.GetTargetedDomain(authz), err) - } - defer func() { - err := c.provider.CleanUp(domain, chlng.Token, keyAuth) - if err != nil { - log.Warnf("[%s] acme: error cleaning up: %v", challenge.GetTargetedDomain(authz), err) - } - }() - - chlng.KeyAuthorization = keyAuth - return c.validate(c.core, domain, chlng) -} - -// ChallengeBlocks returns PEM blocks (certPEMBlock, keyPEMBlock) with the acmeValidation-v1 extension -// and domain name for the `tls-alpn-01` challenge. -func ChallengeBlocks(domain, keyAuth string) ([]byte, []byte, error) { - // Compute the SHA-256 digest of the key authorization. - zBytes := sha256.Sum256([]byte(keyAuth)) - - value, err := asn1.Marshal(zBytes[:sha256.Size]) - if err != nil { - return nil, nil, err - } - - // Add the keyAuth digest as the acmeValidation-v1 extension - // (marked as critical such that it won't be used by non-ACME software). - // Reference: https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05#section-3 - extensions := []pkix.Extension{ - { - Id: idPeAcmeIdentifierV1, - Critical: true, - Value: value, - }, - } - - // Generate a new RSA key for the certificates. - tempPrivateKey, err := certcrypto.GeneratePrivateKey(certcrypto.RSA2048) - if err != nil { - return nil, nil, err - } - - rsaPrivateKey := tempPrivateKey.(*rsa.PrivateKey) - - // Generate the PEM certificate using the provided private key, domain, and extra extensions. - tempCertPEM, err := certcrypto.GeneratePemCert(rsaPrivateKey, domain, extensions) - if err != nil { - return nil, nil, err - } - - // Encode the private key into a PEM format. We'll need to use it to generate the x509 keypair. - rsaPrivatePEM := certcrypto.PEMEncode(rsaPrivateKey) - - return tempCertPEM, rsaPrivatePEM, nil -} - -// ChallengeCert returns a certificate with the acmeValidation-v1 extension -// and domain name for the `tls-alpn-01` challenge. -func ChallengeCert(domain, keyAuth string) (*tls.Certificate, error) { - tempCertPEM, rsaPrivatePEM, err := ChallengeBlocks(domain, keyAuth) - if err != nil { - return nil, err - } - - cert, err := tls.X509KeyPair(tempCertPEM, rsaPrivatePEM) - if err != nil { - return nil, err - } - - return &cert, nil -} diff --git a/vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge_server.go b/vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge_server.go deleted file mode 100644 index 61e353bef..000000000 --- a/vendor/github.com/go-acme/lego/challenge/tlsalpn01/tls_alpn_challenge_server.go +++ /dev/null @@ -1,95 +0,0 @@ -package tlsalpn01 - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "strings" - - "github.com/go-acme/lego/log" -) - -const ( - // ACMETLS1Protocol is the ALPN Protocol ID for the ACME-TLS/1 Protocol. - ACMETLS1Protocol = "acme-tls/1" - - // defaultTLSPort is the port that the ProviderServer will default to - // when no other port is provided. - defaultTLSPort = "443" -) - -// ProviderServer implements ChallengeProvider for `TLS-ALPN-01` challenge. -// It may be instantiated without using the NewProviderServer -// if you want only to use the default values. -type ProviderServer struct { - iface string - port string - listener net.Listener -} - -// NewProviderServer creates a new ProviderServer on the selected interface and port. -// Setting iface and / or port to an empty string will make the server fall back to -// the "any" interface and port 443 respectively. -func NewProviderServer(iface, port string) *ProviderServer { - return &ProviderServer{iface: iface, port: port} -} - -func (s *ProviderServer) GetAddress() string { - return net.JoinHostPort(s.iface, s.port) -} - -// Present generates a certificate with a SHA-256 digest of the keyAuth provided -// as the acmeValidation-v1 extension value to conform to the ACME-TLS-ALPN spec. -func (s *ProviderServer) Present(domain, token, keyAuth string) error { - if s.port == "" { - // Fallback to port 443 if the port was not provided. - s.port = defaultTLSPort - } - - // Generate the challenge certificate using the provided keyAuth and domain. - cert, err := ChallengeCert(domain, keyAuth) - if err != nil { - return err - } - - // Place the generated certificate with the extension into the TLS config - // so that it can serve the correct details. - tlsConf := new(tls.Config) - tlsConf.Certificates = []tls.Certificate{*cert} - - // We must set that the `acme-tls/1` application level protocol is supported - // so that the protocol negotiation can succeed. Reference: - // https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-01#section-5.2 - tlsConf.NextProtos = []string{ACMETLS1Protocol} - - // Create the listener with the created tls.Config. - s.listener, err = tls.Listen("tcp", s.GetAddress(), tlsConf) - if err != nil { - return fmt.Errorf("could not start HTTPS server for challenge -> %v", err) - } - - // Shut the server down when we're finished. - go func() { - err := http.Serve(s.listener, nil) - if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - log.Println(err) - } - }() - - return nil -} - -// CleanUp closes the HTTPS server. -func (s *ProviderServer) CleanUp(domain, token, keyAuth string) error { - if s.listener == nil { - return nil - } - - // Server was created, close it. - if err := s.listener.Close(); err != nil && err != http.ErrServerClosed { - return err - } - - return nil -} diff --git a/vendor/github.com/go-acme/lego/lego/client.go b/vendor/github.com/go-acme/lego/lego/client.go deleted file mode 100644 index c55dd25d3..000000000 --- a/vendor/github.com/go-acme/lego/lego/client.go +++ /dev/null @@ -1,74 +0,0 @@ -package lego - -import ( - "errors" - "net/url" - - "github.com/go-acme/lego/acme/api" - "github.com/go-acme/lego/certificate" - "github.com/go-acme/lego/challenge/resolver" - "github.com/go-acme/lego/registration" -) - -// Client is the user-friendly way to ACME -type Client struct { - Certificate *certificate.Certifier - Challenge *resolver.SolverManager - Registration *registration.Registrar - core *api.Core -} - -// NewClient creates a new ACME client on behalf of the user. -// The client will depend on the ACME directory located at CADirURL for the rest of its actions. -// A private key of type keyType (see KeyType constants) will be generated when requesting a new certificate if one isn't provided. -func NewClient(config *Config) (*Client, error) { - if config == nil { - return nil, errors.New("a configuration must be provided") - } - - _, err := url.Parse(config.CADirURL) - if err != nil { - return nil, err - } - - if config.HTTPClient == nil { - return nil, errors.New("the HTTP client cannot be nil") - } - - privateKey := config.User.GetPrivateKey() - if privateKey == nil { - return nil, errors.New("private key was nil") - } - - var kid string - if reg := config.User.GetRegistration(); reg != nil { - kid = reg.URI - } - - core, err := api.New(config.HTTPClient, config.UserAgent, config.CADirURL, kid, privateKey) - if err != nil { - return nil, err - } - - solversManager := resolver.NewSolversManager(core) - - prober := resolver.NewProber(solversManager) - certifier := certificate.NewCertifier(core, prober, certificate.CertifierOptions{KeyType: config.Certificate.KeyType, Timeout: config.Certificate.Timeout}) - - return &Client{ - Certificate: certifier, - Challenge: solversManager, - Registration: registration.NewRegistrar(core, config.User), - core: core, - }, nil -} - -// GetToSURL returns the current ToS URL from the Directory -func (c *Client) GetToSURL() string { - return c.core.GetDirectory().Meta.TermsOfService -} - -// GetExternalAccountRequired returns the External Account Binding requirement of the Directory -func (c *Client) GetExternalAccountRequired() bool { - return c.core.GetDirectory().Meta.ExternalAccountRequired -} diff --git a/vendor/github.com/go-acme/lego/lego/client_config.go b/vendor/github.com/go-acme/lego/lego/client_config.go deleted file mode 100644 index 2421d7fa3..000000000 --- a/vendor/github.com/go-acme/lego/lego/client_config.go +++ /dev/null @@ -1,104 +0,0 @@ -package lego - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "time" - - "github.com/go-acme/lego/certcrypto" - "github.com/go-acme/lego/registration" -) - -const ( - // caCertificatesEnvVar is the environment variable name that can be used to - // specify the path to PEM encoded CA Certificates that can be used to - // authenticate an ACME server with a HTTPS certificate not issued by a CA in - // the system-wide trusted root list. - caCertificatesEnvVar = "LEGO_CA_CERTIFICATES" - - // caServerNameEnvVar is the environment variable name that can be used to - // specify the CA server name that can be used to - // authenticate an ACME server with a HTTPS certificate not issued by a CA in - // the system-wide trusted root list. - caServerNameEnvVar = "LEGO_CA_SERVER_NAME" - - // LEDirectoryProduction URL to the Let's Encrypt production - LEDirectoryProduction = "https://acme-v02.api.letsencrypt.org/directory" - - // LEDirectoryStaging URL to the Let's Encrypt staging - LEDirectoryStaging = "https://acme-staging-v02.api.letsencrypt.org/directory" -) - -type Config struct { - CADirURL string - User registration.User - UserAgent string - HTTPClient *http.Client - Certificate CertificateConfig -} - -func NewConfig(user registration.User) *Config { - return &Config{ - CADirURL: LEDirectoryProduction, - User: user, - HTTPClient: createDefaultHTTPClient(), - Certificate: CertificateConfig{ - KeyType: certcrypto.RSA2048, - Timeout: 30 * time.Second, - }, - } -} - -type CertificateConfig struct { - KeyType certcrypto.KeyType - Timeout time.Duration -} - -// createDefaultHTTPClient Creates an HTTP client with a reasonable timeout value -// and potentially a custom *x509.CertPool -// based on the caCertificatesEnvVar environment variable (see the `initCertPool` function) -func createDefaultHTTPClient() *http.Client { - return &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 15 * time.Second, - ResponseHeaderTimeout: 15 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - ServerName: os.Getenv(caServerNameEnvVar), - RootCAs: initCertPool(), - }, - }, - } -} - -// initCertPool creates a *x509.CertPool populated with the PEM certificates -// found in the filepath specified in the caCertificatesEnvVar OS environment -// variable. If the caCertificatesEnvVar is not set then initCertPool will -// return nil. If there is an error creating a *x509.CertPool from the provided -// caCertificatesEnvVar value then initCertPool will panic. -func initCertPool() *x509.CertPool { - if customCACertsPath := os.Getenv(caCertificatesEnvVar); customCACertsPath != "" { - customCAs, err := ioutil.ReadFile(customCACertsPath) - if err != nil { - panic(fmt.Sprintf("error reading %s=%q: %v", - caCertificatesEnvVar, customCACertsPath, err)) - } - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(customCAs); !ok { - panic(fmt.Sprintf("error creating x509 cert pool from %s=%q: %v", - caCertificatesEnvVar, customCACertsPath, err)) - } - return certPool - } - return nil -} diff --git a/vendor/github.com/go-acme/lego/log/logger.go b/vendor/github.com/go-acme/lego/log/logger.go deleted file mode 100644 index 22ec98f0a..000000000 --- a/vendor/github.com/go-acme/lego/log/logger.go +++ /dev/null @@ -1,59 +0,0 @@ -package log - -import ( - "log" - "os" -) - -// Logger is an optional custom logger. -var Logger StdLogger = log.New(os.Stdout, "", log.LstdFlags) - -// StdLogger interface for Standard Logger. -type StdLogger interface { - Fatal(args ...interface{}) - Fatalln(args ...interface{}) - Fatalf(format string, args ...interface{}) - Print(args ...interface{}) - Println(args ...interface{}) - Printf(format string, args ...interface{}) -} - -// Fatal writes a log entry. -// It uses Logger if not nil, otherwise it uses the default log.Logger. -func Fatal(args ...interface{}) { - Logger.Fatal(args...) -} - -// Fatalf writes a log entry. -// It uses Logger if not nil, otherwise it uses the default log.Logger. -func Fatalf(format string, args ...interface{}) { - Logger.Fatalf(format, args...) -} - -// Print writes a log entry. -// It uses Logger if not nil, otherwise it uses the default log.Logger. -func Print(args ...interface{}) { - Logger.Print(args...) -} - -// Println writes a log entry. -// It uses Logger if not nil, otherwise it uses the default log.Logger. -func Println(args ...interface{}) { - Logger.Println(args...) -} - -// Printf writes a log entry. -// It uses Logger if not nil, otherwise it uses the default log.Logger. -func Printf(format string, args ...interface{}) { - Logger.Printf(format, args...) -} - -// Warnf writes a log entry. -func Warnf(format string, args ...interface{}) { - Printf("[WARN] "+format, args...) -} - -// Infof writes a log entry. -func Infof(format string, args ...interface{}) { - Printf("[INFO] "+format, args...) -} diff --git a/vendor/github.com/go-acme/lego/platform/wait/wait.go b/vendor/github.com/go-acme/lego/platform/wait/wait.go deleted file mode 100644 index 97af5dceb..000000000 --- a/vendor/github.com/go-acme/lego/platform/wait/wait.go +++ /dev/null @@ -1,33 +0,0 @@ -package wait - -import ( - "fmt" - "time" - - "github.com/go-acme/lego/log" -) - -// For polls the given function 'f', once every 'interval', up to 'timeout'. -func For(msg string, timeout, interval time.Duration, f func() (bool, error)) error { - log.Infof("Wait for %s [timeout: %s, interval: %s]", msg, timeout, interval) - - var lastErr string - timeUp := time.After(timeout) - for { - select { - case <-timeUp: - return fmt.Errorf("time limit exceeded: last error: %s", lastErr) - default: - } - - stop, err := f() - if stop { - return nil - } - if err != nil { - lastErr = err.Error() - } - - time.Sleep(interval) - } -} diff --git a/vendor/github.com/go-acme/lego/registration/registar.go b/vendor/github.com/go-acme/lego/registration/registar.go deleted file mode 100644 index 09e866574..000000000 --- a/vendor/github.com/go-acme/lego/registration/registar.go +++ /dev/null @@ -1,146 +0,0 @@ -package registration - -import ( - "errors" - "net/http" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/acme/api" - "github.com/go-acme/lego/log" -) - -// Resource represents all important information about a registration -// of which the client needs to keep track itself. -// Deprecated: will be remove in the future (acme.ExtendedAccount). -type Resource struct { - Body acme.Account `json:"body,omitempty"` - URI string `json:"uri,omitempty"` -} - -type RegisterOptions struct { - TermsOfServiceAgreed bool -} - -type RegisterEABOptions struct { - TermsOfServiceAgreed bool - Kid string - HmacEncoded string -} - -type Registrar struct { - core *api.Core - user User -} - -func NewRegistrar(core *api.Core, user User) *Registrar { - return &Registrar{ - core: core, - user: user, - } -} - -// Register the current account to the ACME server. -func (r *Registrar) Register(options RegisterOptions) (*Resource, error) { - if r == nil || r.user == nil { - return nil, errors.New("acme: cannot register a nil client or user") - } - - accMsg := acme.Account{ - TermsOfServiceAgreed: options.TermsOfServiceAgreed, - Contact: []string{}, - } - - if r.user.GetEmail() != "" { - log.Infof("acme: Registering account for %s", r.user.GetEmail()) - accMsg.Contact = []string{"mailto:" + r.user.GetEmail()} - } - - account, err := r.core.Accounts.New(accMsg) - if err != nil { - // FIXME seems impossible - errorDetails, ok := err.(acme.ProblemDetails) - if !ok || errorDetails.HTTPStatus != http.StatusConflict { - return nil, err - } - } - - return &Resource{URI: account.Location, Body: account.Account}, nil -} - -// RegisterWithExternalAccountBinding Register the current account to the ACME server. -func (r *Registrar) RegisterWithExternalAccountBinding(options RegisterEABOptions) (*Resource, error) { - accMsg := acme.Account{ - TermsOfServiceAgreed: options.TermsOfServiceAgreed, - Contact: []string{}, - } - - if r.user.GetEmail() != "" { - log.Infof("acme: Registering account for %s", r.user.GetEmail()) - accMsg.Contact = []string{"mailto:" + r.user.GetEmail()} - } - - account, err := r.core.Accounts.NewEAB(accMsg, options.Kid, options.HmacEncoded) - if err != nil { - errorDetails, ok := err.(acme.ProblemDetails) - // FIXME seems impossible - if !ok || errorDetails.HTTPStatus != http.StatusConflict { - return nil, err - } - } - - return &Resource{URI: account.Location, Body: account.Account}, nil -} - -// QueryRegistration runs a POST request on the client's registration and returns the result. -// -// This is similar to the Register function, -// but acting on an existing registration link and resource. -func (r *Registrar) QueryRegistration() (*Resource, error) { - if r == nil || r.user == nil { - return nil, errors.New("acme: cannot query the registration of a nil client or user") - } - - // Log the URL here instead of the email as the email may not be set - log.Infof("acme: Querying account for %s", r.user.GetRegistration().URI) - - account, err := r.core.Accounts.Get(r.user.GetRegistration().URI) - if err != nil { - return nil, err - } - - return &Resource{ - Body: account, - // Location: header is not returned so this needs to be populated off of existing URI - URI: r.user.GetRegistration().URI, - }, nil -} - -// DeleteRegistration deletes the client's user registration from the ACME server. -func (r *Registrar) DeleteRegistration() error { - if r == nil || r.user == nil { - return errors.New("acme: cannot unregister a nil client or user") - } - - log.Infof("acme: Deleting account for %s", r.user.GetEmail()) - - return r.core.Accounts.Deactivate(r.user.GetRegistration().URI) -} - -// ResolveAccountByKey will attempt to look up an account using the given account key -// and return its registration resource. -func (r *Registrar) ResolveAccountByKey() (*Resource, error) { - log.Infof("acme: Trying to resolve account by key") - - accMsg := acme.Account{OnlyReturnExisting: true} - accountTransit, err := r.core.Accounts.New(accMsg) - if err != nil { - return nil, err - } - - account, err := r.core.Accounts.Get(accountTransit.Location) - if err != nil { - return nil, err - } - - return &Resource{URI: accountTransit.Location, Body: account}, nil -} diff --git a/vendor/github.com/go-acme/lego/registration/user.go b/vendor/github.com/go-acme/lego/registration/user.go deleted file mode 100644 index 1e29300e2..000000000 --- a/vendor/github.com/go-acme/lego/registration/user.go +++ /dev/null @@ -1,13 +0,0 @@ -package registration - -import ( - "crypto" -) - -// User interface is to be implemented by users of this library. -// It is used by the client type to get user specific information. -type User interface { - GetEmail() string - GetRegistration() *Resource - GetPrivateKey() crypto.PrivateKey -} diff --git a/vendor/github.com/hashicorp/go-syslog/.gitignore b/vendor/github.com/hashicorp/go-syslog/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/hashicorp/go-syslog/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/hashicorp/go-syslog/LICENSE b/vendor/github.com/hashicorp/go-syslog/LICENSE deleted file mode 100644 index a5df10e67..000000000 --- a/vendor/github.com/hashicorp/go-syslog/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-syslog/README.md b/vendor/github.com/hashicorp/go-syslog/README.md deleted file mode 100644 index bbfae8f9b..000000000 --- a/vendor/github.com/hashicorp/go-syslog/README.md +++ /dev/null @@ -1,11 +0,0 @@ -go-syslog -========= - -This repository provides a very simple `gsyslog` package. The point of this -package is to allow safe importing of syslog without introducing cross-compilation -issues. The stdlib `log/syslog` cannot be imported on Windows systems, and without -conditional compilation this adds complications. - -Instead, `gsyslog` provides a very simple wrapper around `log/syslog` but returns -a runtime error if attempting to initialize on a non Linux or OSX system. - diff --git a/vendor/github.com/hashicorp/go-syslog/builtin.go b/vendor/github.com/hashicorp/go-syslog/builtin.go deleted file mode 100644 index 72bdd61c9..000000000 --- a/vendor/github.com/hashicorp/go-syslog/builtin.go +++ /dev/null @@ -1,214 +0,0 @@ -// This file is taken from the log/syslog in the standard lib. -// However, there is a bug with overwhelming syslog that causes writes -// to block indefinitely. This is fixed by adding a write deadline. -// -// +build !windows,!nacl,!plan9 - -package gsyslog - -import ( - "errors" - "fmt" - "log/syslog" - "net" - "os" - "strings" - "sync" - "time" -) - -const severityMask = 0x07 -const facilityMask = 0xf8 -const localDeadline = 20 * time.Millisecond -const remoteDeadline = 50 * time.Millisecond - -// A builtinWriter is a connection to a syslog server. -type builtinWriter struct { - priority syslog.Priority - tag string - hostname string - network string - raddr string - - mu sync.Mutex // guards conn - conn serverConn -} - -// This interface and the separate syslog_unix.go file exist for -// Solaris support as implemented by gccgo. On Solaris you can not -// simply open a TCP connection to the syslog daemon. The gccgo -// sources have a syslog_solaris.go file that implements unixSyslog to -// return a type that satisfies this interface and simply calls the C -// library syslog function. -type serverConn interface { - writeString(p syslog.Priority, hostname, tag, s, nl string) error - close() error -} - -type netConn struct { - local bool - conn net.Conn -} - -// New establishes a new connection to the system log daemon. Each -// write to the returned writer sends a log message with the given -// priority and prefix. -func newBuiltin(priority syslog.Priority, tag string) (w *builtinWriter, err error) { - return dialBuiltin("", "", priority, tag) -} - -// Dial establishes a connection to a log daemon by connecting to -// address raddr on the specified network. Each write to the returned -// writer sends a log message with the given facility, severity and -// tag. -// If network is empty, Dial will connect to the local syslog server. -func dialBuiltin(network, raddr string, priority syslog.Priority, tag string) (*builtinWriter, error) { - if priority < 0 || priority > syslog.LOG_LOCAL7|syslog.LOG_DEBUG { - return nil, errors.New("log/syslog: invalid priority") - } - - if tag == "" { - tag = os.Args[0] - } - hostname, _ := os.Hostname() - - w := &builtinWriter{ - priority: priority, - tag: tag, - hostname: hostname, - network: network, - raddr: raddr, - } - - w.mu.Lock() - defer w.mu.Unlock() - - err := w.connect() - if err != nil { - return nil, err - } - return w, err -} - -// connect makes a connection to the syslog server. -// It must be called with w.mu held. -func (w *builtinWriter) connect() (err error) { - if w.conn != nil { - // ignore err from close, it makes sense to continue anyway - w.conn.close() - w.conn = nil - } - - if w.network == "" { - w.conn, err = unixSyslog() - if w.hostname == "" { - w.hostname = "localhost" - } - } else { - var c net.Conn - c, err = net.DialTimeout(w.network, w.raddr, remoteDeadline) - if err == nil { - w.conn = &netConn{conn: c} - if w.hostname == "" { - w.hostname = c.LocalAddr().String() - } - } - } - return -} - -// Write sends a log message to the syslog daemon. -func (w *builtinWriter) Write(b []byte) (int, error) { - return w.writeAndRetry(w.priority, string(b)) -} - -// Close closes a connection to the syslog daemon. -func (w *builtinWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.conn != nil { - err := w.conn.close() - w.conn = nil - return err - } - return nil -} - -func (w *builtinWriter) writeAndRetry(p syslog.Priority, s string) (int, error) { - pr := (w.priority & facilityMask) | (p & severityMask) - - w.mu.Lock() - defer w.mu.Unlock() - - if w.conn != nil { - if n, err := w.write(pr, s); err == nil { - return n, err - } - } - if err := w.connect(); err != nil { - return 0, err - } - return w.write(pr, s) -} - -// write generates and writes a syslog formatted string. The -// format is as follows: TIMESTAMP HOSTNAME TAG[PID]: MSG -func (w *builtinWriter) write(p syslog.Priority, msg string) (int, error) { - // ensure it ends in a \n - nl := "" - if !strings.HasSuffix(msg, "\n") { - nl = "\n" - } - - err := w.conn.writeString(p, w.hostname, w.tag, msg, nl) - if err != nil { - return 0, err - } - // Note: return the length of the input, not the number of - // bytes printed by Fprintf, because this must behave like - // an io.Writer. - return len(msg), nil -} - -func (n *netConn) writeString(p syslog.Priority, hostname, tag, msg, nl string) error { - if n.local { - // Compared to the network form below, the changes are: - // 1. Use time.Stamp instead of time.RFC3339. - // 2. Drop the hostname field from the Fprintf. - timestamp := time.Now().Format(time.Stamp) - n.conn.SetWriteDeadline(time.Now().Add(localDeadline)) - _, err := fmt.Fprintf(n.conn, "<%d>%s %s[%d]: %s%s", - p, timestamp, - tag, os.Getpid(), msg, nl) - return err - } - timestamp := time.Now().Format(time.RFC3339) - n.conn.SetWriteDeadline(time.Now().Add(remoteDeadline)) - _, err := fmt.Fprintf(n.conn, "<%d>%s %s %s[%d]: %s%s", - p, timestamp, hostname, - tag, os.Getpid(), msg, nl) - return err -} - -func (n *netConn) close() error { - return n.conn.Close() -} - -// unixSyslog opens a connection to the syslog daemon running on the -// local machine using a Unix domain socket. -func unixSyslog() (conn serverConn, err error) { - logTypes := []string{"unixgram", "unix"} - logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} - for _, network := range logTypes { - for _, path := range logPaths { - conn, err := net.DialTimeout(network, path, localDeadline) - if err != nil { - continue - } else { - return &netConn{conn: conn, local: true}, nil - } - } - } - return nil, errors.New("Unix syslog delivery error") -} diff --git a/vendor/github.com/hashicorp/go-syslog/go.mod b/vendor/github.com/hashicorp/go-syslog/go.mod deleted file mode 100644 index 0e4c2d0dc..000000000 --- a/vendor/github.com/hashicorp/go-syslog/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/hashicorp/go-syslog diff --git a/vendor/github.com/hashicorp/go-syslog/syslog.go b/vendor/github.com/hashicorp/go-syslog/syslog.go deleted file mode 100644 index 3f5a6f3fb..000000000 --- a/vendor/github.com/hashicorp/go-syslog/syslog.go +++ /dev/null @@ -1,27 +0,0 @@ -package gsyslog - -// Priority maps to the syslog priority levels -type Priority int - -const ( - LOG_EMERG Priority = iota - LOG_ALERT - LOG_CRIT - LOG_ERR - LOG_WARNING - LOG_NOTICE - LOG_INFO - LOG_DEBUG -) - -// Syslogger interface is used to write log messages to syslog -type Syslogger interface { - // WriteLevel is used to write a message at a given level - WriteLevel(Priority, []byte) error - - // Write is used to write a message at the default level - Write([]byte) (int, error) - - // Close is used to close the connection to the logger - Close() error -} diff --git a/vendor/github.com/hashicorp/go-syslog/unix.go b/vendor/github.com/hashicorp/go-syslog/unix.go deleted file mode 100644 index 70b71802e..000000000 --- a/vendor/github.com/hashicorp/go-syslog/unix.go +++ /dev/null @@ -1,123 +0,0 @@ -// +build linux darwin dragonfly freebsd netbsd openbsd solaris - -package gsyslog - -import ( - "fmt" - "log/syslog" - "strings" -) - -// builtinLogger wraps the Golang implementation of a -// syslog.Writer to provide the Syslogger interface -type builtinLogger struct { - *builtinWriter -} - -// NewLogger is used to construct a new Syslogger -func NewLogger(p Priority, facility, tag string) (Syslogger, error) { - fPriority, err := facilityPriority(facility) - if err != nil { - return nil, err - } - priority := syslog.Priority(p) | fPriority - l, err := newBuiltin(priority, tag) - if err != nil { - return nil, err - } - return &builtinLogger{l}, nil -} - -// DialLogger is used to construct a new Syslogger that establishes connection to remote syslog server -func DialLogger(network, raddr string, p Priority, facility, tag string) (Syslogger, error) { - fPriority, err := facilityPriority(facility) - if err != nil { - return nil, err - } - - priority := syslog.Priority(p) | fPriority - - l, err := dialBuiltin(network, raddr, priority, tag) - if err != nil { - return nil, err - } - - return &builtinLogger{l}, nil -} - -// WriteLevel writes out a message at the given priority -func (b *builtinLogger) WriteLevel(p Priority, buf []byte) error { - var err error - m := string(buf) - switch p { - case LOG_EMERG: - _, err = b.writeAndRetry(syslog.LOG_EMERG, m) - case LOG_ALERT: - _, err = b.writeAndRetry(syslog.LOG_ALERT, m) - case LOG_CRIT: - _, err = b.writeAndRetry(syslog.LOG_CRIT, m) - case LOG_ERR: - _, err = b.writeAndRetry(syslog.LOG_ERR, m) - case LOG_WARNING: - _, err = b.writeAndRetry(syslog.LOG_WARNING, m) - case LOG_NOTICE: - _, err = b.writeAndRetry(syslog.LOG_NOTICE, m) - case LOG_INFO: - _, err = b.writeAndRetry(syslog.LOG_INFO, m) - case LOG_DEBUG: - _, err = b.writeAndRetry(syslog.LOG_DEBUG, m) - default: - err = fmt.Errorf("Unknown priority: %v", p) - } - return err -} - -// facilityPriority converts a facility string into -// an appropriate priority level or returns an error -func facilityPriority(facility string) (syslog.Priority, error) { - facility = strings.ToUpper(facility) - switch facility { - case "KERN": - return syslog.LOG_KERN, nil - case "USER": - return syslog.LOG_USER, nil - case "MAIL": - return syslog.LOG_MAIL, nil - case "DAEMON": - return syslog.LOG_DAEMON, nil - case "AUTH": - return syslog.LOG_AUTH, nil - case "SYSLOG": - return syslog.LOG_SYSLOG, nil - case "LPR": - return syslog.LOG_LPR, nil - case "NEWS": - return syslog.LOG_NEWS, nil - case "UUCP": - return syslog.LOG_UUCP, nil - case "CRON": - return syslog.LOG_CRON, nil - case "AUTHPRIV": - return syslog.LOG_AUTHPRIV, nil - case "FTP": - return syslog.LOG_FTP, nil - case "LOCAL0": - return syslog.LOG_LOCAL0, nil - case "LOCAL1": - return syslog.LOG_LOCAL1, nil - case "LOCAL2": - return syslog.LOG_LOCAL2, nil - case "LOCAL3": - return syslog.LOG_LOCAL3, nil - case "LOCAL4": - return syslog.LOG_LOCAL4, nil - case "LOCAL5": - return syslog.LOG_LOCAL5, nil - case "LOCAL6": - return syslog.LOG_LOCAL6, nil - case "LOCAL7": - return syslog.LOG_LOCAL7, nil - default: - return 0, fmt.Errorf("invalid syslog facility: %s", facility) - } -} diff --git a/vendor/github.com/hashicorp/go-syslog/unsupported.go b/vendor/github.com/hashicorp/go-syslog/unsupported.go deleted file mode 100644 index b8ca3a5c7..000000000 --- a/vendor/github.com/hashicorp/go-syslog/unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows plan9 nacl - -package gsyslog - -import ( - "fmt" -) - -// NewLogger is used to construct a new Syslogger -func NewLogger(p Priority, facility, tag string) (Syslogger, error) { - return nil, fmt.Errorf("Platform does not support syslog") -} - -// DialLogger is used to construct a new Syslogger that establishes connection to remote syslog server -func DialLogger(network, raddr string, p Priority, facility, tag string) (Syslogger, error) { - return nil, fmt.Errorf("Platform does not support syslog") -} diff --git a/vendor/github.com/jimstudt/http-authentication/LICENSE b/vendor/github.com/jimstudt/http-authentication/LICENSE deleted file mode 100644 index 70def10f9..000000000 --- a/vendor/github.com/jimstudt/http-authentication/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Jim Studt - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/jimstudt/http-authentication/basic/README.md b/vendor/github.com/jimstudt/http-authentication/basic/README.md deleted file mode 100644 index 96a2b0cf0..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# auth/htpasswd [![GoDoc](https://godoc.org/github.com/jimstudt/http-authentication/basic?status.png)](http://godoc.org/github.com/jimstudt/http-authentication/basic) - -Authenticate using Apache-style htpasswd files and HTTP Basic Authentication. - -`htpasswd` has supported a number of different password hashing schemes over the -decades. Most common and sane ones are supported directly. For some you will need -to add another package. - -| Style | Status | -|-------|--------| -| plain | yes+ | -| md5 | yes | -| sha | yes | -| crypt | no (conflicts with plain) | -| bcrypt | no (can add with another package) | - -The standard set of systems will use *Plain*, *Sha*, and *MD5* systems while filtering out *bcrypt*. -Because of its complexity, *bcrypt* will be in another package which you can import and -add if you need it. *Plain* accepts both Apache style plain text and nginx style where the -password is preceded by {PLAIN}. - -## Usage - -~~~ go -import ( - "github.com/codegangsta/martini" - "github.com/jimstudt/http-authentication/basic" - "log" -) - -func main() { - m := martini.Classic() - - pw,err := basic.New("My Realm", "./my-htpasswd-file", htpasswd.DefaultSystems, nil) - if ( err != nil) { - log.Fatalf("Unable to read my htpassword file: %s", err.Error()) - } - - // authenticate every request - m.Use( pw.ServeHTTP) - - // You will also want to call pw.Reload(nil) to reprocess the password file when it changes. - - // You can use pw.ReloadOn( syscall.SIGHUP, nil ) to make it automatically - // reload on a HUP signal. - - // And those 'nil' arguments are where you pass a function to be notified about illegally - // formatted entries, or unsupported hash systems. See the API documents. - - // If you only want to authenticate some requests, then it goes like this... - // m.Get("/secure/thing", pw.ServeHTTP, myRealHandler) - // ... if pw.ServeHTTP does the 401 then your handler will not be called - - m.Run() - -} -~~~ - -## API Documentation - -The API is documented using godoc and also available at [godoc.org](http://godoc.org/github.com/jimstudt/http-authentication/basic) -~~~ - - - diff --git a/vendor/github.com/jimstudt/http-authentication/basic/bcrypt.go b/vendor/github.com/jimstudt/http-authentication/basic/bcrypt.go deleted file mode 100644 index 4fc32b4b5..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/bcrypt.go +++ /dev/null @@ -1,15 +0,0 @@ -package basic - -import ( - "fmt" - "strings" -) - -// Reject any password encoded using bcrypt. -func RejectBcrypt(src string) (EncodedPasswd, error) { - if strings.HasPrefix(src, "$2y$") { - return nil, fmt.Errorf("bcrypt passwords are not accepted: %s", src) - } - - return nil, nil -} diff --git a/vendor/github.com/jimstudt/http-authentication/basic/htpasswd.go b/vendor/github.com/jimstudt/http-authentication/basic/htpasswd.go deleted file mode 100644 index bc4bf8e8b..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/htpasswd.go +++ /dev/null @@ -1,208 +0,0 @@ -// Package htpasswd provides HTTP Basic Authentication using Apache-style htpasswd files -// for the user and password data. -// -// It supports most common hashing systems used over the decades and can be easily extended -// by the programmer to support others. (See the sha.go source file as a guide.) -// -// You will want to use something like... -// myauth := htpasswd.New("My Realm", "./my-htpasswd-file", htpasswd.DefaultSystems, nil) -// m.Use(myauth.Handler) -// ...to configure your authentication and then use the myauth.Handler as a middleware handler in your Martini stack. -// You should read about that nil, as well as Reread() too. -package basic - -import ( - "bufio" - "encoding/base64" - "fmt" - "net/http" - "os" - "os/signal" - "strings" - "sync" -) - -// An EncodedPasswd is created from the encoded password in a password file by a PasswdParser. -// -// The password files consist of lines like "user:passwd-encoding". The user part is stripped off and -// the passwd-encoding part is captured in an EncodedPasswd. -type EncodedPasswd interface { - // Return true if the string matches the password. - // This may cache the result in the case of expensive comparison functions. - MatchesPassword(pw string) bool -} - -// Examine an encoded password, and if it is formatted correctly and sane, return an -// EncodedPasswd which will recognize it. -// -// If the format is not understood, then return nil -// so that another parser may have a chance. If the format is understood but not sane, -// return an error to prevent other formats from possibly claiming it -// -// You may write and supply one of these functions to support a format (e.g. bcrypt) not -// already included in this package. Use sha.c as a template, it is simple but not too simple. -type PasswdParser func(pw string) (EncodedPasswd, error) - -type passwdTable map[string]EncodedPasswd - -// A BadLineHandler is used to notice bad lines in a password file. If not nil, it will be -// called for each bad line with a descriptive error. Think about what you do with these, they -// will sometimes contain hashed passwords. -type BadLineHandler func(err error) - -// An HtpasswdFile encompasses an Apache-style htpasswd file for HTTP Basic authentication -type HtpasswdFile struct { - realm string - filePath string - mutex sync.Mutex - passwds passwdTable - parsers []PasswdParser -} - -// An array of PasswdParser including all builtin parsers. Notice that Plain is last, since it accepts anything -var DefaultSystems []PasswdParser = []PasswdParser{AcceptMd5, AcceptSha, RejectBcrypt, AcceptPlain} - -// New creates an HtpasswdFile from an Apache-style htpasswd file for HTTP Basic Authentication. -// -// The realm is presented to the user in the login dialog. -// -// The filename must exist and be accessible to the process, as well as being a valid htpasswd file. -// -// parsers is a list of functions to handle various hashing systems. In practice you will probably -// just pass htpasswd.DefaultSystems, but you could make your own to explicitly reject some formats or -// implement your own. -// -// bad is a function, which if not nil will be called for each malformed or rejected entry in -// the password file. -func New(realm string, filename string, parsers []PasswdParser, bad BadLineHandler) (*HtpasswdFile, error) { - bf := HtpasswdFile{ - realm: realm, - filePath: filename, - parsers: parsers, - } - - if err := bf.Reload(bad); err != nil { - return nil, err - } - - return &bf, nil -} - -// A Martini middleware handler to enforce HTTP Basic Auth using the policy read from the htpasswd file. -func (bf *HtpasswdFile) ServeHTTP(res http.ResponseWriter, req *http.Request) { - // if everything works, we return, otherwise we get to the - // end where we do an http.Error to stop the request - auth := req.Header.Get("Authorization") - - if auth != "" { - userPassword, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err == nil { - parts := strings.SplitN(string(userPassword), ":", 2) - if len(parts) == 2 { - user := parts[0] - pw := parts[1] - - bf.mutex.Lock() - matcher, ok := bf.passwds[user] - bf.mutex.Unlock() - - if ok && matcher.MatchesPassword(pw) { - // we are good - return - } - } - } - } - - res.Header().Set("WWW-Authenticate", "Basic realm=\""+bf.realm+"\"") - http.Error(res, "Not Authorized", http.StatusUnauthorized) -} - -// Reread the password file for this HtpasswdFile. -// You will need to call this to notice any changes to the password file. -// This function is thread safe. Someone versed in fsnotify might make it -// happen automatically. Likewise you might also connect a SIGHUP handler to -// this function. -func (bf *HtpasswdFile) Reload(bad BadLineHandler) error { - // with the file... - f, err := os.Open(bf.filePath) - if err != nil { - return err - } - defer f.Close() - - // ... and a new map ... - newPasswdMap := passwdTable{} - - // ... for each line ... - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - - // ... add it to the map, noting errors along the way - if perr := bf.addHtpasswdUser(&newPasswdMap, line); perr != nil && bad != nil { - bad(perr) - } - } - if err := scanner.Err(); err != nil { - return fmt.Errorf("Error scanning htpasswd file: %s", err.Error()) - } - - // .. finally, safely swap in the new map - bf.mutex.Lock() - bf.passwds = newPasswdMap - bf.mutex.Unlock() - - return nil -} - -// Reload the htpasswd file on a signal. If there is an error, the old data will be kept instead. -// Typically you would use syscall.SIGHUP for the value of "when" -func (bf *HtpasswdFile) ReloadOn(when os.Signal, onbad BadLineHandler) { - // this is rather common with code in digest, but I don't have a common area... - c := make(chan os.Signal, 1) - signal.Notify(c, when) - - go func() { - for { - _ = <-c - bf.Reload(onbad) - } - }() -} - -// Process a line from an htpasswd file and add it to the user/password map. We may -// encounter some malformed lines, this will not be an error, but we will log them if -// the caller has given us a logger. -func (bf *HtpasswdFile) addHtpasswdUser(pwmap *passwdTable, rawLine string) error { - // ignore white space lines - line := strings.TrimSpace(rawLine) - if line == "" { - return nil - } - - // split "user:encoding" at colon - parts := strings.SplitN(line, ":", 2) - if len(parts) != 2 { - return fmt.Errorf("malformed line, no colon: %s", line) - } - - user := parts[0] - encoding := parts[1] - - // give each parser a shot. The first one to produce a matcher wins. - // If one produces an error then stop (to prevent Plain from catching it) - for _, p := range bf.parsers { - matcher, err := p(encoding) - if err != nil { - return err - } - if matcher != nil { - (*pwmap)[user] = matcher - return nil // we are done, we took to first match - } - } - - // No one liked this line - return fmt.Errorf("unable to recognize password for %s in %s", user, encoding) -} diff --git a/vendor/github.com/jimstudt/http-authentication/basic/md5.go b/vendor/github.com/jimstudt/http-authentication/basic/md5.go deleted file mode 100644 index 0ac793a97..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/md5.go +++ /dev/null @@ -1,143 +0,0 @@ -package basic - -import ( - "bytes" - "crypto/md5" - "fmt" - "strings" -) - -type md5Password struct { - salt string - hashed string -} - -// Accept valid MD5 encoded passwords -func AcceptMd5(src string) (EncodedPasswd, error) { - if !strings.HasPrefix(src, "$apr1$") { - return nil, nil - } - - rest := strings.TrimPrefix(src, "$apr1$") - mparts := strings.SplitN(rest, "$", 2) - if len(mparts) != 2 { - return nil, fmt.Errorf("malformed md5 password: %s", src) - } - - salt, hashed := mparts[0], mparts[1] - return &md5Password{salt, hashed}, nil -} - -// Reject any MD5 encoded password -func RejectMd5(src string) (EncodedPasswd, error) { - if !strings.HasPrefix(src, "$apr1$") { - return nil, nil - } - return nil, fmt.Errorf("md5 password rejected: %s", src) -} - -// This is the MD5 hashing function out of Apache's htpasswd program. The algorithm -// is insane, but we have to match it. Mercifully I found a PHP variant of it at -// http://stackoverflow.com/questions/2994637/how-to-edit-htpasswd-using-php -// in an answer. That reads better than the original C, and is easy to instrument. -// We will eventually go back to the original apr_md5.c for inspiration when the -// PHP gets too weird. -// The algorithm makes more sense if you imagine the original authors in a pub, -// drinking beer and rolling dice as the fundamental design process. -func apr1Md5(password string, salt string) string { - - // start with a hash of password and salt - initBin := md5.Sum([]byte(password + salt + password)) - - // begin an initial string with hash and salt - initText := bytes.NewBufferString(password + "$apr1$" + salt) - - // add crap to the string willy-nilly - for i := len(password); i > 0; i -= 16 { - lim := i - if lim > 16 { - lim = 16 - } - initText.Write(initBin[0:lim]) - } - - // add more crap to the string willy-nilly - for i := len(password); i > 0; i >>= 1 { - if (i & 1) == 1 { - initText.WriteByte(byte(0)) - } else { - initText.WriteByte(password[0]) - } - } - - // Begin our hashing in earnest using our initial string - bin := md5.Sum(initText.Bytes()) - - n := bytes.NewBuffer([]byte{}) - - for i := 0; i < 1000; i++ { - // prepare to make a new muddle - n.Reset() - - // alternate password+crap+bin with bin+crap+password - if (i & 1) == 1 { - n.WriteString(password) - } else { - n.Write(bin[:]) - } - - // usually add the salt, but not always - if i%3 != 0 { - n.WriteString(salt) - } - - // usually add the password but not always - if i%7 != 0 { - n.WriteString(password) - } - - // the back half of that alternation - if (i & 1) == 1 { - n.Write(bin[:]) - } else { - n.WriteString(password) - } - - // replace bin with the md5 of this muddle - bin = md5.Sum(n.Bytes()) - } - - // At this point we stop transliterating the PHP code and flip back to - // reading the Apache source. The PHP uses their base64 library, but that - // uses the wrong character set so needs to be repaired afterwards and reversed - // and it is just really weird to read. - - result := bytes.NewBuffer([]byte{}) - - // This is our own little similar-to-base64-but-not-quite filler - fill := func(a byte, b byte, c byte) { - v := (uint(a) << 16) + (uint(b) << 8) + uint(c) // take our 24 input bits - - for i := 0; i < 4; i++ { // and pump out a character for each 6 bits - result.WriteByte("./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"[v&0x3f]) - v >>= 6 - } - } - - // The order of these indices is strange, be careful - fill(bin[0], bin[6], bin[12]) - fill(bin[1], bin[7], bin[13]) - fill(bin[2], bin[8], bin[14]) - fill(bin[3], bin[9], bin[15]) - fill(bin[4], bin[10], bin[5]) // 5? Yes. - fill(0, 0, bin[11]) - - resultString := string(result.Bytes()[0:22]) // we wrote two extras since we only need 22. - - return resultString -} - -func (m *md5Password) MatchesPassword(pw string) bool { - hashed := apr1Md5(pw, m.salt) - return constantTimeEquals(hashed, m.hashed) -} diff --git a/vendor/github.com/jimstudt/http-authentication/basic/plain.go b/vendor/github.com/jimstudt/http-authentication/basic/plain.go deleted file mode 100644 index 5435e954b..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/plain.go +++ /dev/null @@ -1,28 +0,0 @@ -package basic - -import ( - "fmt" -) - -type plainPassword struct { - password string -} - -// Accept any password in the plain text encoding. -// Be careful: This matches any line, so it *must* be the last parser in you list. -func AcceptPlain(pw string) (EncodedPasswd, error) { - return &plainPassword{pw}, nil -} - -// Reject any plain text encoded passoword. -// Be careful: This matches any line, so it *must* be the last parser in you list. -func RejectPlain(pw string) (EncodedPasswd, error) { - return nil, fmt.Errorf("plain password rejected: %s", pw) -} - -func (p *plainPassword) MatchesPassword(pw string) bool { - // Notice: nginx prefixes plain passwords with {PLAIN}, so we see if that would - // let us match too. I'd split {PLAIN} off, but someone probably uses that - // in their password. It's a big planet. - return constantTimeEquals(pw, p.password) || constantTimeEquals("{PLAIN}"+pw, p.password) -} diff --git a/vendor/github.com/jimstudt/http-authentication/basic/sha.go b/vendor/github.com/jimstudt/http-authentication/basic/sha.go deleted file mode 100644 index dbe9f9a5a..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/sha.go +++ /dev/null @@ -1,43 +0,0 @@ -package basic - -import ( - "crypto/sha1" - "crypto/subtle" - "encoding/base64" - "fmt" - "strings" -) - -type shaPassword struct { - hashed []byte -} - -// Accept valid SHA encoded passwords. -func AcceptSha(src string) (EncodedPasswd, error) { - if !strings.HasPrefix(src, "{SHA}") { - return nil, nil - } - - b64 := strings.TrimPrefix(src, "{SHA}") - hashed, err := base64.StdEncoding.DecodeString(b64) - if err != nil { - return nil, fmt.Errorf("Malformed sha1(%s): %s", src, err.Error()) - } - if len(hashed) != sha1.Size { - return nil, fmt.Errorf("Malformed sha1(%s): wrong length", src) - } - return &shaPassword{hashed}, nil -} - -// Reject any password encoded as SHA. -func RejectSha(src string) (EncodedPasswd, error) { - if !strings.HasPrefix(src, "{SHA}") { - return nil, nil - } - return nil, fmt.Errorf("sha password rejected: %s", src) -} - -func (s *shaPassword) MatchesPassword(pw string) bool { - h := sha1.Sum([]byte(pw)) - return subtle.ConstantTimeCompare(h[:], s.hashed) == 1 -} diff --git a/vendor/github.com/jimstudt/http-authentication/basic/util.go b/vendor/github.com/jimstudt/http-authentication/basic/util.go deleted file mode 100644 index daeb30cf3..000000000 --- a/vendor/github.com/jimstudt/http-authentication/basic/util.go +++ /dev/null @@ -1,18 +0,0 @@ -package basic - -import ( - "crypto/sha1" - "crypto/subtle" -) - -func constantTimeEquals(a string, b string) bool { - // compare SHA-1 as a gatekeeper in constant time - // then check that we didn't get by because of a collision - aSha := sha1.Sum([]byte(a)) - bSha := sha1.Sum([]byte(b)) - if subtle.ConstantTimeCompare(aSha[:], bSha[:]) == 1 { - // yes, this bit isn't constant, but you had to make a Sha1 collision to get here - return a == b - } - return false -} diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/klauspost/cpuid/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml deleted file mode 100644 index 630192d59..000000000 --- a/vendor/github.com/klauspost/cpuid/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -sudo: false - -os: - - linux - - osx -go: - - 1.8.x - - 1.9.x - - 1.10.x - - master - -script: - - go vet ./... - - go test -v ./... - - go test -race ./... - - diff <(gofmt -d .) <("") - -matrix: - allow_failures: - - go: 'master' - fast_finish: true diff --git a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt b/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt deleted file mode 100644 index 2ef4714f7..000000000 --- a/vendor/github.com/klauspost/cpuid/CONTRIBUTING.txt +++ /dev/null @@ -1,35 +0,0 @@ -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2015- Klaus Post & Contributors. -Email: klauspost@gmail.com - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. diff --git a/vendor/github.com/klauspost/cpuid/LICENSE b/vendor/github.com/klauspost/cpuid/LICENSE deleted file mode 100644 index 5cec7ee94..000000000 --- a/vendor/github.com/klauspost/cpuid/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/klauspost/cpuid/README.md b/vendor/github.com/klauspost/cpuid/README.md deleted file mode 100644 index a7fb41fbe..000000000 --- a/vendor/github.com/klauspost/cpuid/README.md +++ /dev/null @@ -1,147 +0,0 @@ -# cpuid -Package cpuid provides information about the CPU running the current program. - -CPU features are detected on startup, and kept for fast access through the life of the application. -Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. - -You can access the CPU information by accessing the shared CPU variable of the cpuid library. - -Package home: https://github.com/klauspost/cpuid - -[![GoDoc][1]][2] [![Build Status][3]][4] - -[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg -[2]: https://godoc.org/github.com/klauspost/cpuid -[3]: https://travis-ci.org/klauspost/cpuid.svg -[4]: https://travis-ci.org/klauspost/cpuid - -# features -## CPU Instructions -* **CMOV** (i686 CMOV) -* **NX** (NX (No-Execute) bit) -* **AMD3DNOW** (AMD 3DNOW) -* **AMD3DNOWEXT** (AMD 3DNowExt) -* **MMX** (standard MMX) -* **MMXEXT** (SSE integer functions or AMD MMX ext) -* **SSE** (SSE functions) -* **SSE2** (P4 SSE functions) -* **SSE3** (Prescott SSE3 functions) -* **SSSE3** (Conroe SSSE3 functions) -* **SSE4** (Penryn SSE4.1 functions) -* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) -* **SSE42** (Nehalem SSE4.2 functions) -* **AVX** (AVX functions) -* **AVX2** (AVX2 functions) -* **FMA3** (Intel FMA 3) -* **FMA4** (Bulldozer FMA4 functions) -* **XOP** (Bulldozer XOP functions) -* **F16C** (Half-precision floating-point conversion) -* **BMI1** (Bit Manipulation Instruction Set 1) -* **BMI2** (Bit Manipulation Instruction Set 2) -* **TBM** (AMD Trailing Bit Manipulation) -* **LZCNT** (LZCNT instruction) -* **POPCNT** (POPCNT instruction) -* **AESNI** (Advanced Encryption Standard New Instructions) -* **CLMUL** (Carry-less Multiplication) -* **HTT** (Hyperthreading (enabled)) -* **HLE** (Hardware Lock Elision) -* **RTM** (Restricted Transactional Memory) -* **RDRAND** (RDRAND instruction is available) -* **RDSEED** (RDSEED instruction is available) -* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) -* **SHA** (Intel SHA Extensions) -* **AVX512F** (AVX-512 Foundation) -* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) -* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) -* **AVX512PF** (AVX-512 Prefetch Instructions) -* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) -* **AVX512CD** (AVX-512 Conflict Detection Instructions) -* **AVX512BW** (AVX-512 Byte and Word Instructions) -* **AVX512VL** (AVX-512 Vector Length Extensions) -* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) -* **MPX** (Intel MPX (Memory Protection Extensions)) -* **ERMS** (Enhanced REP MOVSB/STOSB) -* **RDTSCP** (RDTSCP Instruction) -* **CX16** (CMPXCHG16B Instruction) -* **SGX** (Software Guard Extensions, with activation details) - -## Performance -* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. -* **SSE2SLOW** (SSE2 is supported, but usually not faster) -* **SSE3SLOW** (SSE3 is supported, but usually not faster) -* **ATOM** (Atom processor, some SSSE3 instructions are slower) -* **Cache line** (Probable size of a cache line). -* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. - -## Cpu Vendor/VM -* **Intel** -* **AMD** -* **VIA** -* **Transmeta** -* **NSC** -* **KVM** (Kernel-based Virtual Machine) -* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) -* **VMware** -* **XenHVM** -* **Bhyve** -* **Hygon** - -# installing - -```go get github.com/klauspost/cpuid``` - -# example - -```Go -package main - -import ( - "fmt" - "github.com/klauspost/cpuid" -) - -func main() { - // Print basic CPU information: - fmt.Println("Name:", cpuid.CPU.BrandName) - fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) - fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) - fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) - fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) - fmt.Println("Features:", cpuid.CPU.Features) - fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) - fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") - fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") - fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") - - // Test if we have a specific feature: - if cpuid.CPU.SSE() { - fmt.Println("We have Streaming SIMD Extensions") - } -} -``` - -Sample output: -``` ->go run main.go -Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz -PhysicalCores: 2 -ThreadsPerCore: 2 -LogicalCores: 4 -Family 6 Model: 42 -Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL -Cacheline bytes: 64 -We have Streaming SIMD Extensions -``` - -# private package - -In the "private" folder you can find an autogenerated version of the library you can include in your own packages. - -For this purpose all exports are removed, and functions and constants are lowercased. - -This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. - -# license - -This code is published under an MIT license. See LICENSE file for more information. diff --git a/vendor/github.com/klauspost/cpuid/cpuid.go b/vendor/github.com/klauspost/cpuid/cpuid.go deleted file mode 100644 index db9591321..000000000 --- a/vendor/github.com/klauspost/cpuid/cpuid.go +++ /dev/null @@ -1,1049 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// Package cpuid provides information about the CPU running the current program. -// -// CPU features are detected on startup, and kept for fast access through the life of the application. -// Currently x86 / x64 (AMD64) is supported. -// -// You can access the CPU information by accessing the shared CPU variable of the cpuid library. -// -// Package home: https://github.com/klauspost/cpuid -package cpuid - -import "strings" - -// Vendor is a representation of a CPU vendor. -type Vendor int - -const ( - Other Vendor = iota - Intel - AMD - VIA - Transmeta - NSC - KVM // Kernel-based Virtual Machine - MSVM // Microsoft Hyper-V or Windows Virtual PC - VMware - XenHVM - Bhyve - Hygon -) - -const ( - CMOV = 1 << iota // i686 CMOV - NX // NX (No-Execute) bit - AMD3DNOW // AMD 3DNOW - AMD3DNOWEXT // AMD 3DNowExt - MMX // standard MMX - MMXEXT // SSE integer functions or AMD MMX ext - SSE // SSE functions - SSE2 // P4 SSE functions - SSE3 // Prescott SSE3 functions - SSSE3 // Conroe SSSE3 functions - SSE4 // Penryn SSE4.1 functions - SSE4A // AMD Barcelona microarchitecture SSE4a instructions - SSE42 // Nehalem SSE4.2 functions - AVX // AVX functions - AVX2 // AVX2 functions - FMA3 // Intel FMA 3 - FMA4 // Bulldozer FMA4 functions - XOP // Bulldozer XOP functions - F16C // Half-precision floating-point conversion - BMI1 // Bit Manipulation Instruction Set 1 - BMI2 // Bit Manipulation Instruction Set 2 - TBM // AMD Trailing Bit Manipulation - LZCNT // LZCNT instruction - POPCNT // POPCNT instruction - AESNI // Advanced Encryption Standard New Instructions - CLMUL // Carry-less Multiplication - HTT // Hyperthreading (enabled) - HLE // Hardware Lock Elision - RTM // Restricted Transactional Memory - RDRAND // RDRAND instruction is available - RDSEED // RDSEED instruction is available - ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - SHA // Intel SHA Extensions - AVX512F // AVX-512 Foundation - AVX512DQ // AVX-512 Doubleword and Quadword Instructions - AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF // AVX-512 Prefetch Instructions - AVX512ER // AVX-512 Exponential and Reciprocal Instructions - AVX512CD // AVX-512 Conflict Detection Instructions - AVX512BW // AVX-512 Byte and Word Instructions - AVX512VL // AVX-512 Vector Length Extensions - AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions - MPX // Intel MPX (Memory Protection Extensions) - ERMS // Enhanced REP MOVSB/STOSB - RDTSCP // RDTSCP Instruction - CX16 // CMPXCHG16B Instruction - SGX // Software Guard Extensions - IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) - STIBP // Single Thread Indirect Branch Predictors - - // Performance indicators - SSE2SLOW // SSE2 is supported, but usually not faster - SSE3SLOW // SSE3 is supported, but usually not faster - ATOM // Atom processor, some SSSE3 instructions are slower -) - -var flagNames = map[Flags]string{ - CMOV: "CMOV", // i686 CMOV - NX: "NX", // NX (No-Execute) bit - AMD3DNOW: "AMD3DNOW", // AMD 3DNOW - AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt - MMX: "MMX", // Standard MMX - MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext - SSE: "SSE", // SSE functions - SSE2: "SSE2", // P4 SSE2 functions - SSE3: "SSE3", // Prescott SSE3 functions - SSSE3: "SSSE3", // Conroe SSSE3 functions - SSE4: "SSE4.1", // Penryn SSE4.1 functions - SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions - SSE42: "SSE4.2", // Nehalem SSE4.2 functions - AVX: "AVX", // AVX functions - AVX2: "AVX2", // AVX functions - FMA3: "FMA3", // Intel FMA 3 - FMA4: "FMA4", // Bulldozer FMA4 functions - XOP: "XOP", // Bulldozer XOP functions - F16C: "F16C", // Half-precision floating-point conversion - BMI1: "BMI1", // Bit Manipulation Instruction Set 1 - BMI2: "BMI2", // Bit Manipulation Instruction Set 2 - TBM: "TBM", // AMD Trailing Bit Manipulation - LZCNT: "LZCNT", // LZCNT instruction - POPCNT: "POPCNT", // POPCNT instruction - AESNI: "AESNI", // Advanced Encryption Standard New Instructions - CLMUL: "CLMUL", // Carry-less Multiplication - HTT: "HTT", // Hyperthreading (enabled) - HLE: "HLE", // Hardware Lock Elision - RTM: "RTM", // Restricted Transactional Memory - RDRAND: "RDRAND", // RDRAND instruction is available - RDSEED: "RDSEED", // RDSEED instruction is available - ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) - SHA: "SHA", // Intel SHA Extensions - AVX512F: "AVX512F", // AVX-512 Foundation - AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions - AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions - AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions - AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions - AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions - AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions - AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions - AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions - MPX: "MPX", // Intel MPX (Memory Protection Extensions) - ERMS: "ERMS", // Enhanced REP MOVSB/STOSB - RDTSCP: "RDTSCP", // RDTSCP Instruction - CX16: "CX16", // CMPXCHG16B Instruction - SGX: "SGX", // Software Guard Extensions - IBPB: "IBPB", // Indirect Branch Restricted Speculation and Indirect Branch Predictor Barrier - STIBP: "STIBP", // Single Thread Indirect Branch Predictors - - // Performance indicators - SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster - SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster - ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower - -} - -// CPUInfo contains information about the detected system CPU. -type CPUInfo struct { - BrandName string // Brand name reported by the CPU - VendorID Vendor // Comparable CPU vendor ID - Features Flags // Features of the CPU - PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. - ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. - LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. - Family int // CPU family number - Model int // CPU model number - CacheLine int // Cache line size in bytes. Will be 0 if undetectable. - Cache struct { - L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected - L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected - L2 int // L2 Cache (per core or shared). Will be -1 if undetected - L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected - } - SGX SGXSupport - maxFunc uint32 - maxExFunc uint32 -} - -var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) -var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) -var xgetbv func(index uint32) (eax, edx uint32) -var rdtscpAsm func() (eax, ebx, ecx, edx uint32) - -// CPU contains information about the CPU as detected on startup, -// or when Detect last was called. -// -// Use this as the primary entry point to you data, -// this way queries are -var CPU CPUInfo - -func init() { - initCPU() - Detect() -} - -// Detect will re-detect current CPU info. -// This will replace the content of the exported CPU variable. -// -// Unless you expect the CPU to change while you are running your program -// you should not need to call this function. -// If you call this, you must ensure that no other goroutine is accessing the -// exported CPU variable. -func Detect() { - CPU.maxFunc = maxFunctionID() - CPU.maxExFunc = maxExtendedFunction() - CPU.BrandName = brandName() - CPU.CacheLine = cacheLine() - CPU.Family, CPU.Model = familyModel() - CPU.Features = support() - CPU.SGX = hasSGX(CPU.Features&SGX != 0) - CPU.ThreadsPerCore = threadsPerCore() - CPU.LogicalCores = logicalCores() - CPU.PhysicalCores = physicalCores() - CPU.VendorID = vendorID() - CPU.cacheSize() -} - -// Generated here: http://play.golang.org/p/BxFH2Gdc0G - -// Cmov indicates support of CMOV instructions -func (c CPUInfo) Cmov() bool { - return c.Features&CMOV != 0 -} - -// Amd3dnow indicates support of AMD 3DNOW! instructions -func (c CPUInfo) Amd3dnow() bool { - return c.Features&AMD3DNOW != 0 -} - -// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions -func (c CPUInfo) Amd3dnowExt() bool { - return c.Features&AMD3DNOWEXT != 0 -} - -// MMX indicates support of MMX instructions -func (c CPUInfo) MMX() bool { - return c.Features&MMX != 0 -} - -// MMXExt indicates support of MMXEXT instructions -// (SSE integer functions or AMD MMX ext) -func (c CPUInfo) MMXExt() bool { - return c.Features&MMXEXT != 0 -} - -// SSE indicates support of SSE instructions -func (c CPUInfo) SSE() bool { - return c.Features&SSE != 0 -} - -// SSE2 indicates support of SSE 2 instructions -func (c CPUInfo) SSE2() bool { - return c.Features&SSE2 != 0 -} - -// SSE3 indicates support of SSE 3 instructions -func (c CPUInfo) SSE3() bool { - return c.Features&SSE3 != 0 -} - -// SSSE3 indicates support of SSSE 3 instructions -func (c CPUInfo) SSSE3() bool { - return c.Features&SSSE3 != 0 -} - -// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions -func (c CPUInfo) SSE4() bool { - return c.Features&SSE4 != 0 -} - -// SSE42 indicates support of SSE4.2 instructions -func (c CPUInfo) SSE42() bool { - return c.Features&SSE42 != 0 -} - -// AVX indicates support of AVX instructions -// and operating system support of AVX instructions -func (c CPUInfo) AVX() bool { - return c.Features&AVX != 0 -} - -// AVX2 indicates support of AVX2 instructions -func (c CPUInfo) AVX2() bool { - return c.Features&AVX2 != 0 -} - -// FMA3 indicates support of FMA3 instructions -func (c CPUInfo) FMA3() bool { - return c.Features&FMA3 != 0 -} - -// FMA4 indicates support of FMA4 instructions -func (c CPUInfo) FMA4() bool { - return c.Features&FMA4 != 0 -} - -// XOP indicates support of XOP instructions -func (c CPUInfo) XOP() bool { - return c.Features&XOP != 0 -} - -// F16C indicates support of F16C instructions -func (c CPUInfo) F16C() bool { - return c.Features&F16C != 0 -} - -// BMI1 indicates support of BMI1 instructions -func (c CPUInfo) BMI1() bool { - return c.Features&BMI1 != 0 -} - -// BMI2 indicates support of BMI2 instructions -func (c CPUInfo) BMI2() bool { - return c.Features&BMI2 != 0 -} - -// TBM indicates support of TBM instructions -// (AMD Trailing Bit Manipulation) -func (c CPUInfo) TBM() bool { - return c.Features&TBM != 0 -} - -// Lzcnt indicates support of LZCNT instruction -func (c CPUInfo) Lzcnt() bool { - return c.Features&LZCNT != 0 -} - -// Popcnt indicates support of POPCNT instruction -func (c CPUInfo) Popcnt() bool { - return c.Features&POPCNT != 0 -} - -// HTT indicates the processor has Hyperthreading enabled -func (c CPUInfo) HTT() bool { - return c.Features&HTT != 0 -} - -// SSE2Slow indicates that SSE2 may be slow on this processor -func (c CPUInfo) SSE2Slow() bool { - return c.Features&SSE2SLOW != 0 -} - -// SSE3Slow indicates that SSE3 may be slow on this processor -func (c CPUInfo) SSE3Slow() bool { - return c.Features&SSE3SLOW != 0 -} - -// AesNi indicates support of AES-NI instructions -// (Advanced Encryption Standard New Instructions) -func (c CPUInfo) AesNi() bool { - return c.Features&AESNI != 0 -} - -// Clmul indicates support of CLMUL instructions -// (Carry-less Multiplication) -func (c CPUInfo) Clmul() bool { - return c.Features&CLMUL != 0 -} - -// NX indicates support of NX (No-Execute) bit -func (c CPUInfo) NX() bool { - return c.Features&NX != 0 -} - -// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions -func (c CPUInfo) SSE4A() bool { - return c.Features&SSE4A != 0 -} - -// HLE indicates support of Hardware Lock Elision -func (c CPUInfo) HLE() bool { - return c.Features&HLE != 0 -} - -// RTM indicates support of Restricted Transactional Memory -func (c CPUInfo) RTM() bool { - return c.Features&RTM != 0 -} - -// Rdrand indicates support of RDRAND instruction is available -func (c CPUInfo) Rdrand() bool { - return c.Features&RDRAND != 0 -} - -// Rdseed indicates support of RDSEED instruction is available -func (c CPUInfo) Rdseed() bool { - return c.Features&RDSEED != 0 -} - -// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) -func (c CPUInfo) ADX() bool { - return c.Features&ADX != 0 -} - -// SHA indicates support of Intel SHA Extensions -func (c CPUInfo) SHA() bool { - return c.Features&SHA != 0 -} - -// AVX512F indicates support of AVX-512 Foundation -func (c CPUInfo) AVX512F() bool { - return c.Features&AVX512F != 0 -} - -// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions -func (c CPUInfo) AVX512DQ() bool { - return c.Features&AVX512DQ != 0 -} - -// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions -func (c CPUInfo) AVX512IFMA() bool { - return c.Features&AVX512IFMA != 0 -} - -// AVX512PF indicates support of AVX-512 Prefetch Instructions -func (c CPUInfo) AVX512PF() bool { - return c.Features&AVX512PF != 0 -} - -// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions -func (c CPUInfo) AVX512ER() bool { - return c.Features&AVX512ER != 0 -} - -// AVX512CD indicates support of AVX-512 Conflict Detection Instructions -func (c CPUInfo) AVX512CD() bool { - return c.Features&AVX512CD != 0 -} - -// AVX512BW indicates support of AVX-512 Byte and Word Instructions -func (c CPUInfo) AVX512BW() bool { - return c.Features&AVX512BW != 0 -} - -// AVX512VL indicates support of AVX-512 Vector Length Extensions -func (c CPUInfo) AVX512VL() bool { - return c.Features&AVX512VL != 0 -} - -// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions -func (c CPUInfo) AVX512VBMI() bool { - return c.Features&AVX512VBMI != 0 -} - -// MPX indicates support of Intel MPX (Memory Protection Extensions) -func (c CPUInfo) MPX() bool { - return c.Features&MPX != 0 -} - -// ERMS indicates support of Enhanced REP MOVSB/STOSB -func (c CPUInfo) ERMS() bool { - return c.Features&ERMS != 0 -} - -// RDTSCP Instruction is available. -func (c CPUInfo) RDTSCP() bool { - return c.Features&RDTSCP != 0 -} - -// CX16 indicates if CMPXCHG16B instruction is available. -func (c CPUInfo) CX16() bool { - return c.Features&CX16 != 0 -} - -// TSX is split into HLE (Hardware Lock Elision) and RTM (Restricted Transactional Memory) detection. -// So TSX simply checks that. -func (c CPUInfo) TSX() bool { - return c.Features&(HLE|RTM) == HLE|RTM -} - -// Atom indicates an Atom processor -func (c CPUInfo) Atom() bool { - return c.Features&ATOM != 0 -} - -// Intel returns true if vendor is recognized as Intel -func (c CPUInfo) Intel() bool { - return c.VendorID == Intel -} - -// AMD returns true if vendor is recognized as AMD -func (c CPUInfo) AMD() bool { - return c.VendorID == AMD -} - -// Hygon returns true if vendor is recognized as Hygon -func (c CPUInfo) Hygon() bool { - return c.VendorID == Hygon -} - -// Transmeta returns true if vendor is recognized as Transmeta -func (c CPUInfo) Transmeta() bool { - return c.VendorID == Transmeta -} - -// NSC returns true if vendor is recognized as National Semiconductor -func (c CPUInfo) NSC() bool { - return c.VendorID == NSC -} - -// VIA returns true if vendor is recognized as VIA -func (c CPUInfo) VIA() bool { - return c.VendorID == VIA -} - -// RTCounter returns the 64-bit time-stamp counter -// Uses the RDTSCP instruction. The value 0 is returned -// if the CPU does not support the instruction. -func (c CPUInfo) RTCounter() uint64 { - if !c.RDTSCP() { - return 0 - } - a, _, _, d := rdtscpAsm() - return uint64(a) | (uint64(d) << 32) -} - -// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. -// This variable is OS dependent, but on Linux contains information -// about the current cpu/core the code is running on. -// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. -func (c CPUInfo) Ia32TscAux() uint32 { - if !c.RDTSCP() { - return 0 - } - _, _, ecx, _ := rdtscpAsm() - return ecx -} - -// LogicalCPU will return the Logical CPU the code is currently executing on. -// This is likely to change when the OS re-schedules the running thread -// to another CPU. -// If the current core cannot be detected, -1 will be returned. -func (c CPUInfo) LogicalCPU() int { - if c.maxFunc < 1 { - return -1 - } - _, ebx, _, _ := cpuid(1) - return int(ebx >> 24) -} - -// VM Will return true if the cpu id indicates we are in -// a virtual machine. This is only a hint, and will very likely -// have many false negatives. -func (c CPUInfo) VM() bool { - switch c.VendorID { - case MSVM, KVM, VMware, XenHVM, Bhyve: - return true - } - return false -} - -// Flags contains detected cpu features and caracteristics -type Flags uint64 - -// String returns a string representation of the detected -// CPU features. -func (f Flags) String() string { - return strings.Join(f.Strings(), ",") -} - -// Strings returns and array of the detected features. -func (f Flags) Strings() []string { - s := support() - r := make([]string, 0, 20) - for i := uint(0); i < 64; i++ { - key := Flags(1 << i) - val := flagNames[key] - if s&key != 0 { - r = append(r, val) - } - } - return r -} - -func maxExtendedFunction() uint32 { - eax, _, _, _ := cpuid(0x80000000) - return eax -} - -func maxFunctionID() uint32 { - a, _, _, _ := cpuid(0) - return a -} - -func brandName() string { - if maxExtendedFunction() >= 0x80000004 { - v := make([]uint32, 0, 48) - for i := uint32(0); i < 3; i++ { - a, b, c, d := cpuid(0x80000002 + i) - v = append(v, a, b, c, d) - } - return strings.Trim(string(valAsString(v...)), " ") - } - return "unknown" -} - -func threadsPerCore() int { - mfi := maxFunctionID() - if mfi < 0x4 || vendorID() != Intel { - return 1 - } - - if mfi < 0xb { - _, b, _, d := cpuid(1) - if (d & (1 << 28)) != 0 { - // v will contain logical core count - v := (b >> 16) & 255 - if v > 1 { - a4, _, _, _ := cpuid(4) - // physical cores - v2 := (a4 >> 26) + 1 - if v2 > 0 { - return int(v) / int(v2) - } - } - } - return 1 - } - _, b, _, _ := cpuidex(0xb, 0) - if b&0xffff == 0 { - return 1 - } - return int(b & 0xffff) -} - -func logicalCores() int { - mfi := maxFunctionID() - switch vendorID() { - case Intel: - // Use this on old Intel processors - if mfi < 0xb { - if mfi < 1 { - return 0 - } - // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) - // that can be assigned to logical processors in a physical package. - // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. - _, ebx, _, _ := cpuid(1) - logical := (ebx >> 16) & 0xff - return int(logical) - } - _, b, _, _ := cpuidex(0xb, 1) - return int(b & 0xffff) - case AMD, Hygon: - _, b, _, _ := cpuid(1) - return int((b >> 16) & 0xff) - default: - return 0 - } -} - -func familyModel() (int, int) { - if maxFunctionID() < 0x1 { - return 0, 0 - } - eax, _, _, _ := cpuid(1) - family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) - model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) - return int(family), int(model) -} - -func physicalCores() int { - switch vendorID() { - case Intel: - return logicalCores() / threadsPerCore() - case AMD, Hygon: - if maxExtendedFunction() >= 0x80000008 { - _, _, c, _ := cpuid(0x80000008) - return int(c&0xff) + 1 - } - } - return 0 -} - -// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID -var vendorMapping = map[string]Vendor{ - "AMDisbetter!": AMD, - "AuthenticAMD": AMD, - "CentaurHauls": VIA, - "GenuineIntel": Intel, - "TransmetaCPU": Transmeta, - "GenuineTMx86": Transmeta, - "Geode by NSC": NSC, - "VIA VIA VIA ": VIA, - "KVMKVMKVMKVM": KVM, - "Microsoft Hv": MSVM, - "VMwareVMware": VMware, - "XenVMMXenVMM": XenHVM, - "bhyve bhyve ": Bhyve, - "HygonGenuine": Hygon, -} - -func vendorID() Vendor { - _, b, c, d := cpuid(0) - v := valAsString(b, d, c) - vend, ok := vendorMapping[string(v)] - if !ok { - return Other - } - return vend -} - -func cacheLine() int { - if maxFunctionID() < 0x1 { - return 0 - } - - _, ebx, _, _ := cpuid(1) - cache := (ebx & 0xff00) >> 5 // cflush size - if cache == 0 && maxExtendedFunction() >= 0x80000006 { - _, _, ecx, _ := cpuid(0x80000006) - cache = ecx & 0xff // cacheline size - } - // TODO: Read from Cache and TLB Information - return int(cache) -} - -func (c *CPUInfo) cacheSize() { - c.Cache.L1D = -1 - c.Cache.L1I = -1 - c.Cache.L2 = -1 - c.Cache.L3 = -1 - vendor := vendorID() - switch vendor { - case Intel: - if maxFunctionID() < 4 { - return - } - for i := uint32(0); ; i++ { - eax, ebx, ecx, _ := cpuidex(4, i) - cacheType := eax & 15 - if cacheType == 0 { - break - } - cacheLevel := (eax >> 5) & 7 - coherency := int(ebx&0xfff) + 1 - partitions := int((ebx>>12)&0x3ff) + 1 - associativity := int((ebx>>22)&0x3ff) + 1 - sets := int(ecx) + 1 - size := associativity * partitions * coherency * sets - switch cacheLevel { - case 1: - if cacheType == 1 { - // 1 = Data Cache - c.Cache.L1D = size - } else if cacheType == 2 { - // 2 = Instruction Cache - c.Cache.L1I = size - } else { - if c.Cache.L1D < 0 { - c.Cache.L1I = size - } - if c.Cache.L1I < 0 { - c.Cache.L1I = size - } - } - case 2: - c.Cache.L2 = size - case 3: - c.Cache.L3 = size - } - } - case AMD, Hygon: - // Untested. - if maxExtendedFunction() < 0x80000005 { - return - } - _, _, ecx, edx := cpuid(0x80000005) - c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) - c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) - - if maxExtendedFunction() < 0x80000006 { - return - } - _, _, ecx, _ = cpuid(0x80000006) - c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) - } - - return -} - -type SGXSupport struct { - Available bool - SGX1Supported bool - SGX2Supported bool - MaxEnclaveSizeNot64 int64 - MaxEnclaveSize64 int64 -} - -func hasSGX(available bool) (rval SGXSupport) { - rval.Available = available - - if !available { - return - } - - a, _, _, d := cpuidex(0x12, 0) - rval.SGX1Supported = a&0x01 != 0 - rval.SGX2Supported = a&0x02 != 0 - rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 - rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 - - return -} - -func support() Flags { - mfi := maxFunctionID() - vend := vendorID() - if mfi < 0x1 { - return 0 - } - rval := uint64(0) - _, _, c, d := cpuid(1) - if (d & (1 << 15)) != 0 { - rval |= CMOV - } - if (d & (1 << 23)) != 0 { - rval |= MMX - } - if (d & (1 << 25)) != 0 { - rval |= MMXEXT - } - if (d & (1 << 25)) != 0 { - rval |= SSE - } - if (d & (1 << 26)) != 0 { - rval |= SSE2 - } - if (c & 1) != 0 { - rval |= SSE3 - } - if (c & 0x00000200) != 0 { - rval |= SSSE3 - } - if (c & 0x00080000) != 0 { - rval |= SSE4 - } - if (c & 0x00100000) != 0 { - rval |= SSE42 - } - if (c & (1 << 25)) != 0 { - rval |= AESNI - } - if (c & (1 << 1)) != 0 { - rval |= CLMUL - } - if c&(1<<23) != 0 { - rval |= POPCNT - } - if c&(1<<30) != 0 { - rval |= RDRAND - } - if c&(1<<29) != 0 { - rval |= F16C - } - if c&(1<<13) != 0 { - rval |= CX16 - } - if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { - if threadsPerCore() > 1 { - rval |= HTT - } - } - - // Check XGETBV, OXSAVE and AVX bits - if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { - // Check for OS support - eax, _ := xgetbv(0) - if (eax & 0x6) == 0x6 { - rval |= AVX - if (c & 0x00001000) != 0 { - rval |= FMA3 - } - } - } - - // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. - if mfi >= 7 { - _, ebx, ecx, edx := cpuidex(7, 0) - if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { - rval |= AVX2 - } - if (ebx & 0x00000008) != 0 { - rval |= BMI1 - if (ebx & 0x00000100) != 0 { - rval |= BMI2 - } - } - if ebx&(1<<2) != 0 { - rval |= SGX - } - if ebx&(1<<4) != 0 { - rval |= HLE - } - if ebx&(1<<9) != 0 { - rval |= ERMS - } - if ebx&(1<<11) != 0 { - rval |= RTM - } - if ebx&(1<<14) != 0 { - rval |= MPX - } - if ebx&(1<<18) != 0 { - rval |= RDSEED - } - if ebx&(1<<19) != 0 { - rval |= ADX - } - if ebx&(1<<29) != 0 { - rval |= SHA - } - if edx&(1<<26) != 0 { - rval |= IBPB - } - if edx&(1<<27) != 0 { - rval |= STIBP - } - - // Only detect AVX-512 features if XGETBV is supported - if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { - // Check for OS support - eax, _ := xgetbv(0) - - // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and - // ZMM16-ZMM31 state are enabled by OS) - /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). - if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { - if ebx&(1<<16) != 0 { - rval |= AVX512F - } - if ebx&(1<<17) != 0 { - rval |= AVX512DQ - } - if ebx&(1<<21) != 0 { - rval |= AVX512IFMA - } - if ebx&(1<<26) != 0 { - rval |= AVX512PF - } - if ebx&(1<<27) != 0 { - rval |= AVX512ER - } - if ebx&(1<<28) != 0 { - rval |= AVX512CD - } - if ebx&(1<<30) != 0 { - rval |= AVX512BW - } - if ebx&(1<<31) != 0 { - rval |= AVX512VL - } - // ecx - if ecx&(1<<1) != 0 { - rval |= AVX512VBMI - } - } - } - } - - if maxExtendedFunction() >= 0x80000001 { - _, _, c, d := cpuid(0x80000001) - if (c & (1 << 5)) != 0 { - rval |= LZCNT - rval |= POPCNT - } - if (d & (1 << 31)) != 0 { - rval |= AMD3DNOW - } - if (d & (1 << 30)) != 0 { - rval |= AMD3DNOWEXT - } - if (d & (1 << 23)) != 0 { - rval |= MMX - } - if (d & (1 << 22)) != 0 { - rval |= MMXEXT - } - if (c & (1 << 6)) != 0 { - rval |= SSE4A - } - if d&(1<<20) != 0 { - rval |= NX - } - if d&(1<<27) != 0 { - rval |= RDTSCP - } - - /* Allow for selectively disabling SSE2 functions on AMD processors - with SSE2 support but not SSE4a. This includes Athlon64, some - Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster - than SSE2 often enough to utilize this special-case flag. - AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case - so that SSE2 is used unless explicitly disabled by checking - AV_CPU_FLAG_SSE2SLOW. */ - if vendorID() != Intel && - rval&SSE2 != 0 && (c&0x00000040) == 0 { - rval |= SSE2SLOW - } - - /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be - * used unless the OS has AVX support. */ - if (rval & AVX) != 0 { - if (c & 0x00000800) != 0 { - rval |= XOP - } - if (c & 0x00010000) != 0 { - rval |= FMA4 - } - } - - if vendorID() == Intel { - family, model := familyModel() - if family == 6 && (model == 9 || model == 13 || model == 14) { - /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and - * 6/14 (core1 "yonah") theoretically support sse2, but it's - * usually slower than mmx. */ - if (rval & SSE2) != 0 { - rval |= SSE2SLOW - } - if (rval & SSE3) != 0 { - rval |= SSE3SLOW - } - } - /* The Atom processor has SSSE3 support, which is useful in many cases, - * but sometimes the SSSE3 version is slower than the SSE2 equivalent - * on the Atom, but is generally faster on other processors supporting - * SSSE3. This flag allows for selectively disabling certain SSSE3 - * functions on the Atom. */ - if family == 6 && model == 28 { - rval |= ATOM - } - } - } - return Flags(rval) -} - -func valAsString(values ...uint32) []byte { - r := make([]byte, 4*len(values)) - for i, v := range values { - dst := r[i*4:] - dst[0] = byte(v & 0xff) - dst[1] = byte((v >> 8) & 0xff) - dst[2] = byte((v >> 16) & 0xff) - dst[3] = byte((v >> 24) & 0xff) - switch { - case dst[0] == 0: - return r[:i*4] - case dst[1] == 0: - return r[:i*4+1] - case dst[2] == 0: - return r[:i*4+2] - case dst[3] == 0: - return r[:i*4+3] - } - } - return r -} diff --git a/vendor/github.com/klauspost/cpuid/cpuid_386.s b/vendor/github.com/klauspost/cpuid/cpuid_386.s deleted file mode 100644 index 4d731711e..000000000 --- a/vendor/github.com/klauspost/cpuid/cpuid_386.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build 386,!gccgo - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORL CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+4(FP) - MOVL BX, ebx+8(FP) - MOVL CX, ecx+12(FP) - MOVL DX, edx+16(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func xgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+4(FP) - MOVL DX, edx+8(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s b/vendor/github.com/klauspost/cpuid/cpuid_amd64.s deleted file mode 100644 index 3c1d60e42..000000000 --- a/vendor/github.com/klauspost/cpuid/cpuid_amd64.s +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -//+build amd64,!gccgo - -// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuid(SB), 7, $0 - XORQ CX, CX - MOVL op+0(FP), AX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -TEXT ·asmCpuidex(SB), 7, $0 - MOVL op+0(FP), AX - MOVL op2+4(FP), CX - CPUID - MOVL AX, eax+8(FP) - MOVL BX, ebx+12(FP) - MOVL CX, ecx+16(FP) - MOVL DX, edx+20(FP) - RET - -// func asmXgetbv(index uint32) (eax, edx uint32) -TEXT ·asmXgetbv(SB), 7, $0 - MOVL index+0(FP), CX - BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV - MOVL AX, eax+8(FP) - MOVL DX, edx+12(FP) - RET - -// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) -TEXT ·asmRdtscpAsm(SB), 7, $0 - BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP - MOVL AX, eax+0(FP) - MOVL BX, ebx+4(FP) - MOVL CX, ecx+8(FP) - MOVL DX, edx+12(FP) - RET diff --git a/vendor/github.com/klauspost/cpuid/detect_intel.go b/vendor/github.com/klauspost/cpuid/detect_intel.go deleted file mode 100644 index a5f04dd6d..000000000 --- a/vendor/github.com/klauspost/cpuid/detect_intel.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build 386,!gccgo amd64,!gccgo - -package cpuid - -func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) -func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) -func asmXgetbv(index uint32) (eax, edx uint32) -func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) - -func initCPU() { - cpuid = asmCpuid - cpuidex = asmCpuidex - xgetbv = asmXgetbv - rdtscpAsm = asmRdtscpAsm -} diff --git a/vendor/github.com/klauspost/cpuid/detect_ref.go b/vendor/github.com/klauspost/cpuid/detect_ref.go deleted file mode 100644 index 909c5d9a7..000000000 --- a/vendor/github.com/klauspost/cpuid/detect_ref.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. - -// +build !amd64,!386 gccgo - -package cpuid - -func initCPU() { - cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 - } - - cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 - } - - xgetbv = func(index uint32) (eax, edx uint32) { - return 0, 0 - } - - rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { - return 0, 0, 0, 0 - } -} diff --git a/vendor/github.com/klauspost/cpuid/generate.go b/vendor/github.com/klauspost/cpuid/generate.go deleted file mode 100644 index 90e7a98d2..000000000 --- a/vendor/github.com/klauspost/cpuid/generate.go +++ /dev/null @@ -1,4 +0,0 @@ -package cpuid - -//go:generate go run private-gen.go -//go:generate gofmt -w ./private diff --git a/vendor/github.com/klauspost/cpuid/private-gen.go b/vendor/github.com/klauspost/cpuid/private-gen.go deleted file mode 100644 index 437333d29..000000000 --- a/vendor/github.com/klauspost/cpuid/private-gen.go +++ /dev/null @@ -1,476 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "fmt" - "go/ast" - "go/parser" - "go/printer" - "go/token" - "io" - "io/ioutil" - "log" - "os" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -var inFiles = []string{"cpuid.go", "cpuid_test.go"} -var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} -var fileSet = token.NewFileSet() -var reWrites = []rewrite{ - initRewrite("CPUInfo -> cpuInfo"), - initRewrite("Vendor -> vendor"), - initRewrite("Flags -> flags"), - initRewrite("Detect -> detect"), - initRewrite("CPU -> cpu"), -} -var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, - // cpuid_test.go - "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, -} - -var excludePrefixes = []string{"test", "benchmark"} - -func main() { - Package := "private" - parserMode := parser.ParseComments - exported := make(map[string]rewrite) - for _, file := range inFiles { - in, err := os.Open(file) - if err != nil { - log.Fatalf("opening input", err) - } - - src, err := ioutil.ReadAll(in) - if err != nil { - log.Fatalf("reading input", err) - } - - astfile, err := parser.ParseFile(fileSet, file, src, parserMode) - if err != nil { - log.Fatalf("parsing input", err) - } - - for _, rw := range reWrites { - astfile = rw(astfile) - } - - // Inspect the AST and print all identifiers and literals. - var startDecl token.Pos - var endDecl token.Pos - ast.Inspect(astfile, func(n ast.Node) bool { - var s string - switch x := n.(type) { - case *ast.Ident: - if x.IsExported() { - t := strings.ToLower(x.Name) - for _, pre := range excludePrefixes { - if strings.HasPrefix(t, pre) { - return true - } - } - if excludeNames[t] != true { - //if x.Pos() > startDecl && x.Pos() < endDecl { - exported[x.Name] = initRewrite(x.Name + " -> " + t) - } - } - - case *ast.GenDecl: - if x.Tok == token.CONST && x.Lparen > 0 { - startDecl = x.Lparen - endDecl = x.Rparen - // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) - } - } - if s != "" { - fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) - } - return true - }) - - for _, rw := range exported { - astfile = rw(astfile) - } - - var buf bytes.Buffer - - printer.Fprint(&buf, fileSet, astfile) - - // Remove package documentation and insert information - s := buf.String() - ind := strings.Index(buf.String(), "\npackage cpuid") - s = s[ind:] - s = "// Generated, DO NOT EDIT,\n" + - "// but copy it to your own project and rename the package.\n" + - "// See more at http://github.com/klauspost/cpuid\n" + - s - - outputName := Package + string(os.PathSeparator) + file - - err = ioutil.WriteFile(outputName, []byte(s), 0644) - if err != nil { - log.Fatalf("writing output: %s", err) - } - log.Println("Generated", outputName) - } - - for _, file := range copyFiles { - dst := "" - if strings.HasPrefix(file, "cpuid") { - dst = Package + string(os.PathSeparator) + file - } else { - dst = Package + string(os.PathSeparator) + "cpuid_" + file - } - err := copyFile(file, dst) - if err != nil { - log.Fatalf("copying file: %s", err) - } - log.Println("Copied", dst) - } -} - -// CopyFile copies a file from src to dst. If src and dst files exist, and are -// the same, then return success. Copy the file contents from src to dst. -func copyFile(src, dst string) (err error) { - sfi, err := os.Stat(src) - if err != nil { - return - } - if !sfi.Mode().IsRegular() { - // cannot copy non-regular files (e.g., directories, - // symlinks, devices, etc.) - return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) - } - dfi, err := os.Stat(dst) - if err != nil { - if !os.IsNotExist(err) { - return - } - } else { - if !(dfi.Mode().IsRegular()) { - return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) - } - if os.SameFile(sfi, dfi) { - return - } - } - err = copyFileContents(src, dst) - return -} - -// copyFileContents copies the contents of the file named src to the file named -// by dst. The file will be created if it does not already exist. If the -// destination file exists, all it's contents will be replaced by the contents -// of the source file. -func copyFileContents(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - out, err := os.Create(dst) - if err != nil { - return - } - defer func() { - cerr := out.Close() - if err == nil { - err = cerr - } - }() - if _, err = io.Copy(out, in); err != nil { - return - } - err = out.Sync() - return -} - -type rewrite func(*ast.File) *ast.File - -// Mostly copied from gofmt -func initRewrite(rewriteRule string) rewrite { - f := strings.Split(rewriteRule, "->") - if len(f) != 2 { - fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") - os.Exit(2) - } - pattern := parseExpr(f[0], "pattern") - replace := parseExpr(f[1], "replacement") - return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } -} - -// parseExpr parses s as an expression. -// It might make sense to expand this to allow statement patterns, -// but there are problems with preserving formatting and also -// with what a wildcard for a statement looks like. -func parseExpr(s, what string) ast.Expr { - x, err := parser.ParseExpr(s) - if err != nil { - fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) - os.Exit(2) - } - return x -} - -// Keep this function for debugging. -/* -func dump(msg string, val reflect.Value) { - fmt.Printf("%s:\n", msg) - ast.Print(fileSet, val.Interface()) - fmt.Println() -} -*/ - -// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. -func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { - cmap := ast.NewCommentMap(fileSet, p, p.Comments) - m := make(map[string]reflect.Value) - pat := reflect.ValueOf(pattern) - repl := reflect.ValueOf(replace) - - var rewriteVal func(val reflect.Value) reflect.Value - rewriteVal = func(val reflect.Value) reflect.Value { - // don't bother if val is invalid to start with - if !val.IsValid() { - return reflect.Value{} - } - for k := range m { - delete(m, k) - } - val = apply(rewriteVal, val) - if match(m, pat, val) { - val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) - } - return val - } - - r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) - r.Comments = cmap.Filter(r).Comments() // recreate comments list - return r -} - -// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. -func set(x, y reflect.Value) { - // don't bother if x cannot be set or y is invalid - if !x.CanSet() || !y.IsValid() { - return - } - defer func() { - if x := recover(); x != nil { - if s, ok := x.(string); ok && - (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { - // x cannot be set to y - ignore this rewrite - return - } - panic(x) - } - }() - x.Set(y) -} - -// Values/types for special cases. -var ( - objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) - scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) - - identType = reflect.TypeOf((*ast.Ident)(nil)) - objectPtrType = reflect.TypeOf((*ast.Object)(nil)) - positionType = reflect.TypeOf(token.NoPos) - callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) - scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) -) - -// apply replaces each AST field x in val with f(x), returning val. -// To avoid extra conversions, f operates on the reflect.Value form. -func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { - if !val.IsValid() { - return reflect.Value{} - } - - // *ast.Objects introduce cycles and are likely incorrect after - // rewrite; don't follow them but replace with nil instead - if val.Type() == objectPtrType { - return objectPtrNil - } - - // similarly for scopes: they are likely incorrect after a rewrite; - // replace them with nil - if val.Type() == scopePtrType { - return scopePtrNil - } - - switch v := reflect.Indirect(val); v.Kind() { - case reflect.Slice: - for i := 0; i < v.Len(); i++ { - e := v.Index(i) - set(e, f(e)) - } - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - e := v.Field(i) - set(e, f(e)) - } - case reflect.Interface: - e := v.Elem() - set(v, f(e)) - } - return val -} - -func isWildcard(s string) bool { - rune, size := utf8.DecodeRuneInString(s) - return size == len(s) && unicode.IsLower(rune) -} - -// match returns true if pattern matches val, -// recording wildcard submatches in m. -// If m == nil, match checks whether pattern == val. -func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { - // Wildcard matches any expression. If it appears multiple - // times in the pattern, it must match the same expression - // each time. - if m != nil && pattern.IsValid() && pattern.Type() == identType { - name := pattern.Interface().(*ast.Ident).Name - if isWildcard(name) && val.IsValid() { - // wildcards only match valid (non-nil) expressions. - if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { - if old, ok := m[name]; ok { - return match(nil, old, val) - } - m[name] = val - return true - } - } - } - - // Otherwise, pattern and val must match recursively. - if !pattern.IsValid() || !val.IsValid() { - return !pattern.IsValid() && !val.IsValid() - } - if pattern.Type() != val.Type() { - return false - } - - // Special cases. - switch pattern.Type() { - case identType: - // For identifiers, only the names need to match - // (and none of the other *ast.Object information). - // This is a common case, handle it all here instead - // of recursing down any further via reflection. - p := pattern.Interface().(*ast.Ident) - v := val.Interface().(*ast.Ident) - return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name - case objectPtrType, positionType: - // object pointers and token positions always match - return true - case callExprType: - // For calls, the Ellipsis fields (token.Position) must - // match since that is how f(x) and f(x...) are different. - // Check them here but fall through for the remaining fields. - p := pattern.Interface().(*ast.CallExpr) - v := val.Interface().(*ast.CallExpr) - if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { - return false - } - } - - p := reflect.Indirect(pattern) - v := reflect.Indirect(val) - if !p.IsValid() || !v.IsValid() { - return !p.IsValid() && !v.IsValid() - } - - switch p.Kind() { - case reflect.Slice: - if p.Len() != v.Len() { - return false - } - for i := 0; i < p.Len(); i++ { - if !match(m, p.Index(i), v.Index(i)) { - return false - } - } - return true - - case reflect.Struct: - for i := 0; i < p.NumField(); i++ { - if !match(m, p.Field(i), v.Field(i)) { - return false - } - } - return true - - case reflect.Interface: - return match(m, p.Elem(), v.Elem()) - } - - // Handle token integers, etc. - return p.Interface() == v.Interface() -} - -// subst returns a copy of pattern with values from m substituted in place -// of wildcards and pos used as the position of tokens from the pattern. -// if m == nil, subst returns a copy of pattern and doesn't change the line -// number information. -func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { - if !pattern.IsValid() { - return reflect.Value{} - } - - // Wildcard gets replaced with map value. - if m != nil && pattern.Type() == identType { - name := pattern.Interface().(*ast.Ident).Name - if isWildcard(name) { - if old, ok := m[name]; ok { - return subst(nil, old, reflect.Value{}) - } - } - } - - if pos.IsValid() && pattern.Type() == positionType { - // use new position only if old position was valid in the first place - if old := pattern.Interface().(token.Pos); !old.IsValid() { - return pattern - } - return pos - } - - // Otherwise copy. - switch p := pattern; p.Kind() { - case reflect.Slice: - v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) - for i := 0; i < p.Len(); i++ { - v.Index(i).Set(subst(m, p.Index(i), pos)) - } - return v - - case reflect.Struct: - v := reflect.New(p.Type()).Elem() - for i := 0; i < p.NumField(); i++ { - v.Field(i).Set(subst(m, p.Field(i), pos)) - } - return v - - case reflect.Ptr: - v := reflect.New(p.Type()).Elem() - if elem := p.Elem(); elem.IsValid() { - v.Set(subst(m, elem, pos).Addr()) - } - return v - - case reflect.Interface: - v := reflect.New(p.Type()).Elem() - if elem := p.Elem(); elem.IsValid() { - v.Set(subst(m, elem, pos)) - } - return v - } - - return pattern -} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/clientset_generated.go new file mode 100644 index 000000000..3b341cde9 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -0,0 +1,82 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package fake + +import ( + clientset "github.com/kubernetes-sigs/application/pkg/client/clientset/versioned" + appv1beta1 "github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1" + fakeappv1beta1 "github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var _ clientset.Interface = &Clientset{} + +// AppV1beta1 retrieves the AppV1beta1Client +func (c *Clientset) AppV1beta1() appv1beta1.AppV1beta1Interface { + return &fakeappv1beta1.FakeAppV1beta1{Fake: &c.Fake} +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/doc.go new file mode 100644 index 000000000..5bf835605 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/register.go b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/register.go new file mode 100644 index 000000000..3c2e65914 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/fake/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package fake + +import ( + appv1beta1 "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) +var parameterCodec = runtime.NewParameterCodec(scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + appv1beta1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/doc.go b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/doc.go new file mode 100644 index 000000000..2f80c73bd --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_app_client.go b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_app_client.go new file mode 100644 index 000000000..70909506c --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_app_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package fake + +import ( + v1beta1 "github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeAppV1beta1 struct { + *testing.Fake +} + +func (c *FakeAppV1beta1) Applications(namespace string) v1beta1.ApplicationInterface { + return &FakeApplications{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeAppV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_application.go b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_application.go new file mode 100644 index 000000000..b5c74ce67 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/application/pkg/client/clientset/versioned/typed/app/v1beta1/fake/fake_application.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package fake + +import ( + v1beta1 "github.com/kubernetes-sigs/application/pkg/apis/app/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeApplications implements ApplicationInterface +type FakeApplications struct { + Fake *FakeAppV1beta1 + ns string +} + +var applicationsResource = schema.GroupVersionResource{Group: "app.k8s.io", Version: "v1beta1", Resource: "applications"} + +var applicationsKind = schema.GroupVersionKind{Group: "app.k8s.io", Version: "v1beta1", Kind: "Application"} + +// Get takes name of the application, and returns the corresponding application object, and an error if there is any. +func (c *FakeApplications) Get(name string, options v1.GetOptions) (result *v1beta1.Application, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(applicationsResource, c.ns, name), &v1beta1.Application{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Application), err +} + +// List takes label and field selectors, and returns the list of Applications that match those selectors. +func (c *FakeApplications) List(opts v1.ListOptions) (result *v1beta1.ApplicationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(applicationsResource, applicationsKind, c.ns, opts), &v1beta1.ApplicationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.ApplicationList{ListMeta: obj.(*v1beta1.ApplicationList).ListMeta} + for _, item := range obj.(*v1beta1.ApplicationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested applications. +func (c *FakeApplications) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(applicationsResource, c.ns, opts)) + +} + +// Create takes the representation of a application and creates it. Returns the server's representation of the application, and an error, if there is any. +func (c *FakeApplications) Create(application *v1beta1.Application) (result *v1beta1.Application, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(applicationsResource, c.ns, application), &v1beta1.Application{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Application), err +} + +// Update takes the representation of a application and updates it. Returns the server's representation of the application, and an error, if there is any. +func (c *FakeApplications) Update(application *v1beta1.Application) (result *v1beta1.Application, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(applicationsResource, c.ns, application), &v1beta1.Application{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Application), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeApplications) UpdateStatus(application *v1beta1.Application) (*v1beta1.Application, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(applicationsResource, "status", c.ns, application), &v1beta1.Application{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Application), err +} + +// Delete takes name of the application and deletes it. Returns an error if one occurs. +func (c *FakeApplications) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(applicationsResource, c.ns, name), &v1beta1.Application{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeApplications) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(applicationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &v1beta1.ApplicationList{}) + return err +} + +// Patch applies the patch and returns the patched application. +func (c *FakeApplications) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Application, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(applicationsResource, c.ns, name, pt, data, subresources...), &v1beta1.Application{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.Application), err +} diff --git a/vendor/github.com/lucas-clemente/quic-go/.editorconfig b/vendor/github.com/lucas-clemente/quic-go/.editorconfig deleted file mode 100644 index 538ba2b24..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.editorconfig +++ /dev/null @@ -1,5 +0,0 @@ -root = true - -[*] -indent_style = tab -indent_size = 2 diff --git a/vendor/github.com/lucas-clemente/quic-go/.gitignore b/vendor/github.com/lucas-clemente/quic-go/.gitignore deleted file mode 100644 index 040b55ac0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -debug -debug.test -main diff --git a/vendor/github.com/lucas-clemente/quic-go/.golangci.yml b/vendor/github.com/lucas-clemente/quic-go/.golangci.yml deleted file mode 100644 index eb9de2f3b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.golangci.yml +++ /dev/null @@ -1,25 +0,0 @@ -run: - skip-files: - - h2quic/response_writer_closenotifier.go - - internal/handshake/unsafe_test.go - -linters-settings: - misspell: - ignore-words: - - ect - -linters: - disable-all: true - enable: - - deadcode - - goconst - - goimports - - gosimple - - ineffassign - - misspell - - staticcheck - - structcheck - - unconvert - - unused - - varcheck - - vet diff --git a/vendor/github.com/lucas-clemente/quic-go/.travis.yml b/vendor/github.com/lucas-clemente/quic-go/.travis.yml deleted file mode 100644 index a57287e66..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/.travis.yml +++ /dev/null @@ -1,39 +0,0 @@ -dist: trusty -group: travis_latest - -language: go - -go: - - "1.12.x" - -# first part of the GOARCH workaround -# setting the GOARCH directly doesn't work, since the value will be overwritten later -# so set it to a temporary environment variable first -env: - global: - - TIMESCALE_FACTOR=20 - - GO111MODULE=on - matrix: - - TRAVIS_GOARCH=amd64 TESTMODE=lint - - TRAVIS_GOARCH=amd64 TESTMODE=unit - - TRAVIS_GOARCH=amd64 TESTMODE=integration - - TRAVIS_GOARCH=386 TESTMODE=unit - - TRAVIS_GOARCH=386 TESTMODE=integration - - -# second part of the GOARCH workaround -# now actually set the GOARCH env variable to the value of the temporary variable set earlier -before_install: - - go get golang.org/x/tools/cmd/cover - - go get github.com/onsi/ginkgo/ginkgo - - go get github.com/onsi/gomega - - export GOARCH=$TRAVIS_GOARCH - - go env # for debugging - - "export DISPLAY=:99.0" - - "Xvfb $DISPLAY &> /dev/null &" - -script: - - .travis/script.sh - -after_success: - - .travis/after_success.sh diff --git a/vendor/github.com/lucas-clemente/quic-go/Changelog.md b/vendor/github.com/lucas-clemente/quic-go/Changelog.md deleted file mode 100644 index 47b115dcb..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/Changelog.md +++ /dev/null @@ -1,52 +0,0 @@ -# Changelog - -## v0.11.0 (2019-04-05) - -- Drop support for gQUIC. For qQUIC support, please switch to the *gquic* branch. -- Implement QUIC WG draft-19. -- Use [qtls](https://github.com/marten-seemann/qtls) for TLS 1.3. -- Return a `tls.ConnectionState` from `quic.Session.ConnectionState()`. -- Remove the error return values from `quic.Stream.CancelRead()` and `quic.Stream.CancelWrite()` - -## v0.10.0 (2018-08-28) - -- Add support for QUIC 44, drop support for QUIC 42. - -## v0.9.0 (2018-08-15) - -- Add a `quic.Config` option for the length of the connection ID (for IETF QUIC). -- Split Session.Close into one method for regular closing and one for closing with an error. - -## v0.8.0 (2018-06-26) - -- Add support for unidirectional streams (for IETF QUIC). -- Add a `quic.Config` option for the maximum number of incoming streams. -- Add support for QUIC 42 and 43. -- Add dial functions that use a context. -- Multiplex clients on a net.PacketConn, when using Dial(conn). - -## v0.7.0 (2018-02-03) - -- The lower boundary for packets included in ACKs is now derived, and the value sent in STOP_WAITING frames is ignored. -- Remove `DialNonFWSecure` and `DialAddrNonFWSecure`. -- Expose the `ConnectionState` in the `Session` (experimental API). -- Implement packet pacing. - -## v0.6.0 (2017-12-12) - -- Add support for QUIC 39, drop support for QUIC 35 - 37 -- Added `quic.Config` options for maximal flow control windows -- Add a `quic.Config` option for QUIC versions -- Add a `quic.Config` option to request omission of the connection ID from a server -- Add a `quic.Config` option to configure the source address validation -- Add a `quic.Config` option to configure the handshake timeout -- Add a `quic.Config` option to configure the idle timeout -- Add a `quic.Config` option to configure keep-alive -- Rename the STK to Cookie -- Implement `net.Conn`-style deadlines for streams -- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/lucas-clemente/quic-go) for details. -- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/lucas-clemente/quic-go/wiki/Logging) for more details. -- Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper` -- Changed `h2quic.Server.Serve()` to accept a `net.PacketConn` -- Drop support for Go 1.7 and 1.8. -- Various bugfixes diff --git a/vendor/github.com/lucas-clemente/quic-go/LICENSE b/vendor/github.com/lucas-clemente/quic-go/LICENSE deleted file mode 100644 index 51378befb..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 the quic-go authors & Google, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/lucas-clemente/quic-go/README.md b/vendor/github.com/lucas-clemente/quic-go/README.md deleted file mode 100644 index 19f987a4d..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/README.md +++ /dev/null @@ -1,68 +0,0 @@ -# A QUIC implementation in pure Go - - - -[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/lucas-clemente/quic-go) -[![Travis Build Status](https://img.shields.io/travis/lucas-clemente/quic-go/master.svg?style=flat-square&label=Travis+build)](https://travis-ci.org/lucas-clemente/quic-go) -[![CircleCI Build Status](https://img.shields.io/circleci/project/github/lucas-clemente/quic-go.svg?style=flat-square&label=CircleCI+build)](https://circleci.com/gh/lucas-clemente/quic-go) -[![Windows Build Status](https://img.shields.io/appveyor/ci/lucas-clemente/quic-go/master.svg?style=flat-square&label=windows+build)](https://ci.appveyor.com/project/lucas-clemente/quic-go/branch/master) -[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/) - -quic-go is an implementation of the [QUIC](https://en.wikipedia.org/wiki/QUIC) protocol in Go. It roughly implements the [IETF QUIC draft](https://github.com/quicwg/base-drafts), although we don't fully support any of the draft versions at the moment. - -## Version compatibility - -Since quic-go is under active development, there's no guarantee that two builds of different commits are interoperable. The QUIC version used in the *master* branch is just a placeholder, and should not be considered stable. - -If you want to use quic-go as a library in other projects, please consider using a [tagged release](https://github.com/lucas-clemente/quic-go/releases). These releases expose [experimental QUIC versions](https://github.com/quicwg/base-drafts/wiki/QUIC-Versions), which are guaranteed to be stable. - -## Google QUIC - -quic-go used to support both the QUIC versions supported by Google Chrome and QUIC as deployed on Google's servers, as well as IETF QUIC. Due to the divergence of the two protocols, we decided to not support both versions any more. - -The *master* branch **only** supports IETF QUIC. For Google QUIC support, please refer to the [gquic branch](https://github.com/lucas-clemente/quic-go/tree/gquic). - -## Guides - -We currently support Go 1.12+. - -Installing and updating dependencies: - - go get -t -u ./... - -Running tests: - - go test ./... - -### HTTP mapping - -We're currently not implementing the HTTP mapping as described in the [QUIC over HTTP draft](https://quicwg.org/base-drafts/draft-ietf-quic-http.html). The HTTP mapping here is a leftover from Google QUIC. - -### QUIC without HTTP/2 - -Take a look at [this echo example](example/echo/echo.go). - -## Usage - -### As a server - -See the [example server](example/main.go). Starting a QUIC server is very similar to the standard lib http in go: - -```go -http.Handle("/", http.FileServer(http.Dir(wwwDir))) -h2quic.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil) -``` - -### As a client - -See the [example client](example/client/main.go). Use a `h2quic.RoundTripper` as a `Transport` in a `http.Client`. - -```go -http.Client{ - Transport: &h2quic.RoundTripper{}, -} -``` - -## Contributing - -We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment. diff --git a/vendor/github.com/lucas-clemente/quic-go/appveyor.yml b/vendor/github.com/lucas-clemente/quic-go/appveyor.yml deleted file mode 100644 index 9ecd2d04b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/appveyor.yml +++ /dev/null @@ -1,34 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -environment: - GOPATH: c:\gopath - CGO_ENABLED: 0 - TIMESCALE_FACTOR: 20 - matrix: - - GOARCH: 386 - - GOARCH: amd64 - -clone_folder: c:\gopath\src\github.com\lucas-clemente\quic-go - -install: - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go1.12.windows-amd64.zip - - 7z x go1.12.windows-amd64.zip -y -oC:\ > NUL - - set PATH=%PATH%;%GOPATH%\bin\windows_%GOARCH%;%GOPATH%\bin - - set GO111MODULE=on - - echo %PATH% - - echo %GOPATH% - - go get github.com/onsi/ginkgo/ginkgo - - go get github.com/onsi/gomega - - go version - - go env - -build_script: - - ginkgo -r -v -randomizeAllSpecs -randomizeSuites -trace -skipPackage benchmark,integrationtests - - ginkgo -randomizeAllSpecs -randomizeSuites -trace benchmark -- -samples=1 - -test: off - -deploy: off diff --git a/vendor/github.com/lucas-clemente/quic-go/buffer_pool.go b/vendor/github.com/lucas-clemente/quic-go/buffer_pool.go deleted file mode 100644 index d6fb76730..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/buffer_pool.go +++ /dev/null @@ -1,75 +0,0 @@ -package quic - -import ( - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -type packetBuffer struct { - Slice []byte - - // refCount counts how many packets the Slice is used in. - // It doesn't support concurrent use. - // It is > 1 when used for coalesced packet. - refCount int -} - -// Split increases the refCount. -// It must be called when a packet buffer is used for more than one packet, -// e.g. when splitting coalesced packets. -func (b *packetBuffer) Split() { - b.refCount++ -} - -// Decrement decrements the reference counter. -// It doesn't put the buffer back into the pool. -func (b *packetBuffer) Decrement() { - b.refCount-- - if b.refCount < 0 { - panic("negative packetBuffer refCount") - } -} - -// MaybeRelease puts the packet buffer back into the pool, -// if the reference counter already reached 0. -func (b *packetBuffer) MaybeRelease() { - // only put the packetBuffer back if it's not used any more - if b.refCount == 0 { - b.putBack() - } -} - -// Release puts back the packet buffer into the pool. -// It should be called when processing is definitely finished. -func (b *packetBuffer) Release() { - b.Decrement() - if b.refCount != 0 { - panic("packetBuffer refCount not zero") - } - b.putBack() -} - -func (b *packetBuffer) putBack() { - if cap(b.Slice) != int(protocol.MaxReceivePacketSize) { - panic("putPacketBuffer called with packet of wrong size!") - } - bufferPool.Put(b) -} - -var bufferPool sync.Pool - -func getPacketBuffer() *packetBuffer { - buf := bufferPool.Get().(*packetBuffer) - buf.refCount = 1 - buf.Slice = buf.Slice[:protocol.MaxReceivePacketSize] - return buf -} - -func init() { - bufferPool.New = func() interface{} { - return &packetBuffer{ - Slice: make([]byte, 0, protocol.MaxReceivePacketSize), - } - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/client.go b/vendor/github.com/lucas-clemente/quic-go/client.go deleted file mode 100644 index 9ede391fe..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/client.go +++ /dev/null @@ -1,413 +0,0 @@ -package quic - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "sync" - - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type client struct { - mutex sync.Mutex - - conn connection - // If the client is created with DialAddr, we create a packet conn. - // If it is started with Dial, we take a packet conn as a parameter. - createdPacketConn bool - - packetHandlers packetHandlerManager - - versionNegotiated utils.AtomicBool // has the server accepted our version - receivedVersionNegotiationPacket bool - negotiatedVersions []protocol.VersionNumber // the list of versions from the version negotiation packet - - tlsConf *tls.Config - config *Config - - srcConnID protocol.ConnectionID - destConnID protocol.ConnectionID - - initialPacketNumber protocol.PacketNumber - - initialVersion protocol.VersionNumber - version protocol.VersionNumber - - handshakeChan chan struct{} - - session quicSession - - logger utils.Logger -} - -var _ packetHandler = &client{} - -var ( - // make it possible to mock connection ID generation in the tests - generateConnectionID = protocol.GenerateConnectionID - generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial -) - -// DialAddr establishes a new QUIC connection to a server. -// It uses a new UDP connection and closes this connection when the QUIC session is closed. -// The hostname for SNI is taken from the given address. -func DialAddr( - addr string, - tlsConf *tls.Config, - config *Config, -) (Session, error) { - return DialAddrContext(context.Background(), addr, tlsConf, config) -} - -// DialAddrContext establishes a new QUIC connection to a server using the provided context. -// See DialAddr for details. -func DialAddrContext( - ctx context.Context, - addr string, - tlsConf *tls.Config, - config *Config, -) (Session, error) { - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return nil, err - } - udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0}) - if err != nil { - return nil, err - } - return dialContext(ctx, udpConn, udpAddr, addr, tlsConf, config, true) -} - -// Dial establishes a new QUIC connection to a server using a net.PacketConn. -// The same PacketConn can be used for multiple calls to Dial and Listen, -// QUIC connection IDs are used for demultiplexing the different connections. -// The host parameter is used for SNI. -func Dial( - pconn net.PacketConn, - remoteAddr net.Addr, - host string, - tlsConf *tls.Config, - config *Config, -) (Session, error) { - return DialContext(context.Background(), pconn, remoteAddr, host, tlsConf, config) -} - -// DialContext establishes a new QUIC connection to a server using a net.PacketConn using the provided context. -// See Dial for details. -func DialContext( - ctx context.Context, - pconn net.PacketConn, - remoteAddr net.Addr, - host string, - tlsConf *tls.Config, - config *Config, -) (Session, error) { - return dialContext(ctx, pconn, remoteAddr, host, tlsConf, config, false) -} - -func dialContext( - ctx context.Context, - pconn net.PacketConn, - remoteAddr net.Addr, - host string, - tlsConf *tls.Config, - config *Config, - createdPacketConn bool, -) (Session, error) { - config = populateClientConfig(config, createdPacketConn) - packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDLength, config.StatelessResetKey) - if err != nil { - return nil, err - } - c, err := newClient(pconn, remoteAddr, config, tlsConf, host, createdPacketConn) - if err != nil { - return nil, err - } - c.packetHandlers = packetHandlers - if err := c.dial(ctx); err != nil { - return nil, err - } - return c.session, nil -} - -func newClient( - pconn net.PacketConn, - remoteAddr net.Addr, - config *Config, - tlsConf *tls.Config, - host string, - createdPacketConn bool, -) (*client, error) { - if tlsConf == nil { - tlsConf = &tls.Config{} - } - if tlsConf.ServerName == "" { - var err error - tlsConf.ServerName, _, err = net.SplitHostPort(host) - if err != nil { - return nil, err - } - } - - // check that all versions are actually supported - if config != nil { - for _, v := range config.Versions { - if !protocol.IsValidVersion(v) { - return nil, fmt.Errorf("%s is not a valid QUIC version", v) - } - } - } - - srcConnID, err := generateConnectionID(config.ConnectionIDLength) - if err != nil { - return nil, err - } - destConnID, err := generateConnectionIDForInitial() - if err != nil { - return nil, err - } - c := &client{ - srcConnID: srcConnID, - destConnID: destConnID, - conn: &conn{pconn: pconn, currentAddr: remoteAddr}, - createdPacketConn: createdPacketConn, - tlsConf: tlsConf, - config: config, - version: config.Versions[0], - handshakeChan: make(chan struct{}), - logger: utils.DefaultLogger.WithPrefix("client"), - } - return c, nil -} - -// populateClientConfig populates fields in the quic.Config with their default values, if none are set -// it may be called with nil -func populateClientConfig(config *Config, createdPacketConn bool) *Config { - if config == nil { - config = &Config{} - } - versions := config.Versions - if len(versions) == 0 { - versions = protocol.SupportedVersions - } - - handshakeTimeout := protocol.DefaultHandshakeTimeout - if config.HandshakeTimeout != 0 { - handshakeTimeout = config.HandshakeTimeout - } - idleTimeout := protocol.DefaultIdleTimeout - if config.IdleTimeout != 0 { - idleTimeout = config.IdleTimeout - } - - maxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow - if maxReceiveStreamFlowControlWindow == 0 { - maxReceiveStreamFlowControlWindow = protocol.DefaultMaxReceiveStreamFlowControlWindow - } - maxReceiveConnectionFlowControlWindow := config.MaxReceiveConnectionFlowControlWindow - if maxReceiveConnectionFlowControlWindow == 0 { - maxReceiveConnectionFlowControlWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindow - } - maxIncomingStreams := config.MaxIncomingStreams - if maxIncomingStreams == 0 { - maxIncomingStreams = protocol.DefaultMaxIncomingStreams - } else if maxIncomingStreams < 0 { - maxIncomingStreams = 0 - } - maxIncomingUniStreams := config.MaxIncomingUniStreams - if maxIncomingUniStreams == 0 { - maxIncomingUniStreams = protocol.DefaultMaxIncomingUniStreams - } else if maxIncomingUniStreams < 0 { - maxIncomingUniStreams = 0 - } - connIDLen := config.ConnectionIDLength - if connIDLen == 0 && !createdPacketConn { - connIDLen = protocol.DefaultConnectionIDLength - } - - return &Config{ - Versions: versions, - HandshakeTimeout: handshakeTimeout, - IdleTimeout: idleTimeout, - ConnectionIDLength: connIDLen, - MaxReceiveStreamFlowControlWindow: maxReceiveStreamFlowControlWindow, - MaxReceiveConnectionFlowControlWindow: maxReceiveConnectionFlowControlWindow, - MaxIncomingStreams: maxIncomingStreams, - MaxIncomingUniStreams: maxIncomingUniStreams, - KeepAlive: config.KeepAlive, - StatelessResetKey: config.StatelessResetKey, - } -} - -func (c *client) dial(ctx context.Context) error { - c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.conn.LocalAddr(), c.conn.RemoteAddr(), c.srcConnID, c.destConnID, c.version) - - if err := c.createNewTLSSession(c.version); err != nil { - return err - } - err := c.establishSecureConnection(ctx) - if err == errCloseForRecreating { - return c.dial(ctx) - } - return err -} - -// establishSecureConnection runs the session, and tries to establish a secure connection -// It returns: -// - errCloseForRecreating when the server sends a version negotiation packet -// - any other error that might occur -// - when the connection is forward-secure -func (c *client) establishSecureConnection(ctx context.Context) error { - errorChan := make(chan error, 1) - - go func() { - err := c.session.run() // returns as soon as the session is closed - if err != errCloseForRecreating && c.createdPacketConn { - c.packetHandlers.Close() - } - errorChan <- err - }() - - select { - case <-ctx.Done(): - // The session will send a PeerGoingAway error to the server. - c.session.Close() - return ctx.Err() - case err := <-errorChan: - return err - case <-c.handshakeChan: - // handshake successfully completed - return nil - } -} - -func (c *client) handlePacket(p *receivedPacket) { - if wire.IsVersionNegotiationPacket(p.data) { - go c.handleVersionNegotiationPacket(p) - return - } - - // this is the first packet we are receiving - // since it is not a Version Negotiation Packet, this means the server supports the suggested version - if !c.versionNegotiated.Get() { - c.versionNegotiated.Set(true) - } - - c.session.handlePacket(p) -} - -func (c *client) handleVersionNegotiationPacket(p *receivedPacket) { - c.mutex.Lock() - defer c.mutex.Unlock() - - hdr, _, _, err := wire.ParsePacket(p.data, 0) - if err != nil { - c.logger.Debugf("Error parsing Version Negotiation packet: %s", err) - return - } - - // ignore delayed / duplicated version negotiation packets - if c.receivedVersionNegotiationPacket || c.versionNegotiated.Get() { - c.logger.Debugf("Received a delayed Version Negotiation packet.") - return - } - - for _, v := range hdr.SupportedVersions { - if v == c.version { - // The Version Negotiation packet contains the version that we offered. - // This might be a packet sent by an attacker (or by a terribly broken server implementation). - return - } - } - - c.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", hdr.SupportedVersions) - newVersion, ok := protocol.ChooseSupportedVersion(c.config.Versions, hdr.SupportedVersions) - if !ok { - c.session.destroy(fmt.Errorf("No compatible QUIC version found. We support %s, server offered %s", c.config.Versions, hdr.SupportedVersions)) - c.logger.Debugf("No compatible QUIC version found.") - return - } - c.receivedVersionNegotiationPacket = true - c.negotiatedVersions = hdr.SupportedVersions - - // switch to negotiated version - c.initialVersion = c.version - c.version = newVersion - - c.logger.Infof("Switching to QUIC version %s. New connection ID: %s", newVersion, c.destConnID) - c.initialPacketNumber = c.session.closeForRecreating() -} - -func (c *client) createNewTLSSession(version protocol.VersionNumber) error { - params := &handshake.TransportParameters{ - InitialMaxStreamDataBidiRemote: protocol.InitialMaxStreamData, - InitialMaxStreamDataBidiLocal: protocol.InitialMaxStreamData, - InitialMaxStreamDataUni: protocol.InitialMaxStreamData, - InitialMaxData: protocol.InitialMaxData, - IdleTimeout: c.config.IdleTimeout, - MaxBidiStreams: uint64(c.config.MaxIncomingStreams), - MaxUniStreams: uint64(c.config.MaxIncomingUniStreams), - AckDelayExponent: protocol.AckDelayExponent, - DisableMigration: true, - } - - c.mutex.Lock() - defer c.mutex.Unlock() - runner := &runner{ - packetHandlerManager: c.packetHandlers, - onHandshakeCompleteImpl: func(_ Session) { close(c.handshakeChan) }, - } - sess, err := newClientSession( - c.conn, - runner, - c.destConnID, - c.srcConnID, - c.config, - c.tlsConf, - c.initialPacketNumber, - params, - c.initialVersion, - c.logger, - c.version, - ) - if err != nil { - return err - } - c.session = sess - c.packetHandlers.Add(c.srcConnID, c) - return nil -} - -func (c *client) Close() error { - c.mutex.Lock() - defer c.mutex.Unlock() - if c.session == nil { - return nil - } - return c.session.Close() -} - -func (c *client) destroy(e error) { - c.mutex.Lock() - defer c.mutex.Unlock() - if c.session == nil { - return - } - c.session.destroy(e) -} - -func (c *client) GetVersion() protocol.VersionNumber { - c.mutex.Lock() - v := c.version - c.mutex.Unlock() - return v -} - -func (c *client) getPerspective() protocol.Perspective { - return protocol.PerspectiveClient -} diff --git a/vendor/github.com/lucas-clemente/quic-go/codecov.yml b/vendor/github.com/lucas-clemente/quic-go/codecov.yml deleted file mode 100644 index f077c1ad0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/codecov.yml +++ /dev/null @@ -1,18 +0,0 @@ -coverage: - round: nearest - ignore: - - streams_map_incoming_bidi.go - - streams_map_incoming_uni.go - - streams_map_outgoing_bidi.go - - streams_map_outgoing_uni.go - - h2quic/gzipreader.go - - h2quic/response.go - - internal/ackhandler/packet_linkedlist.go - - internal/utils/byteinterval_linkedlist.go - - internal/utils/packetinterval_linkedlist.go - - internal/utils/linkedlist/linkedlist.go - status: - project: - default: - threshold: 0.5 - patch: false diff --git a/vendor/github.com/lucas-clemente/quic-go/conn.go b/vendor/github.com/lucas-clemente/quic-go/conn.go deleted file mode 100644 index 700c1471c..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/conn.go +++ /dev/null @@ -1,54 +0,0 @@ -package quic - -import ( - "net" - "sync" -) - -type connection interface { - Write([]byte) error - Read([]byte) (int, net.Addr, error) - Close() error - LocalAddr() net.Addr - RemoteAddr() net.Addr - SetCurrentRemoteAddr(net.Addr) -} - -type conn struct { - mutex sync.RWMutex - - pconn net.PacketConn - currentAddr net.Addr -} - -var _ connection = &conn{} - -func (c *conn) Write(p []byte) error { - _, err := c.pconn.WriteTo(p, c.currentAddr) - return err -} - -func (c *conn) Read(p []byte) (int, net.Addr, error) { - return c.pconn.ReadFrom(p) -} - -func (c *conn) SetCurrentRemoteAddr(addr net.Addr) { - c.mutex.Lock() - c.currentAddr = addr - c.mutex.Unlock() -} - -func (c *conn) LocalAddr() net.Addr { - return c.pconn.LocalAddr() -} - -func (c *conn) RemoteAddr() net.Addr { - c.mutex.RLock() - addr := c.currentAddr - c.mutex.RUnlock() - return addr -} - -func (c *conn) Close() error { - return c.pconn.Close() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go b/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go deleted file mode 100644 index 095feb1d1..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/crypto_stream.go +++ /dev/null @@ -1,135 +0,0 @@ -package quic - -import ( - "errors" - "fmt" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type cryptoStream interface { - // for receiving data - HandleCryptoFrame(*wire.CryptoFrame) error - GetCryptoData() []byte - Finish() error - // for sending data - io.Writer - HasData() bool - PopCryptoFrame(protocol.ByteCount) *wire.CryptoFrame -} - -type postHandshakeCryptoStream struct { - cryptoStream - - framer framer -} - -func newPostHandshakeCryptoStream(framer framer) cryptoStream { - return &postHandshakeCryptoStream{ - cryptoStream: newCryptoStream(), - framer: framer, - } -} - -// Write writes post-handshake messages. -// For simplicity, post-handshake crypto messages are treated as control frames. -// The framer functions as a stack (LIFO), so if there are multiple writes, -// they will be returned in the opposite order. -// This is acceptable, since post-handshake crypto messages are very rare. -func (s *postHandshakeCryptoStream) Write(p []byte) (int, error) { - n, err := s.cryptoStream.Write(p) - if err != nil { - return n, err - } - for s.cryptoStream.HasData() { - s.framer.QueueControlFrame(s.PopCryptoFrame(protocol.MaxPostHandshakeCryptoFrameSize)) - } - return n, nil -} - -type cryptoStreamImpl struct { - queue *frameSorter - msgBuf []byte - - highestOffset protocol.ByteCount - finished bool - - writeOffset protocol.ByteCount - writeBuf []byte -} - -func newCryptoStream() cryptoStream { - return &cryptoStreamImpl{queue: newFrameSorter()} -} - -func (s *cryptoStreamImpl) HandleCryptoFrame(f *wire.CryptoFrame) error { - highestOffset := f.Offset + protocol.ByteCount(len(f.Data)) - if maxOffset := highestOffset; maxOffset > protocol.MaxCryptoStreamOffset { - return fmt.Errorf("received invalid offset %d on crypto stream, maximum allowed %d", maxOffset, protocol.MaxCryptoStreamOffset) - } - if s.finished { - if highestOffset > s.highestOffset { - // reject crypto data received after this stream was already finished - return errors.New("received crypto data after change of encryption level") - } - // ignore data with a smaller offset than the highest received - // could e.g. be a retransmission - return nil - } - s.highestOffset = utils.MaxByteCount(s.highestOffset, highestOffset) - if err := s.queue.Push(f.Data, f.Offset); err != nil { - return err - } - for { - _, data := s.queue.Pop() - if data == nil { - return nil - } - s.msgBuf = append(s.msgBuf, data...) - } -} - -// GetCryptoData retrieves data that was received in CRYPTO frames -func (s *cryptoStreamImpl) GetCryptoData() []byte { - if len(s.msgBuf) < 4 { - return nil - } - msgLen := 4 + int(s.msgBuf[1])<<16 + int(s.msgBuf[2])<<8 + int(s.msgBuf[3]) - if len(s.msgBuf) < msgLen { - return nil - } - msg := make([]byte, msgLen) - copy(msg, s.msgBuf[:msgLen]) - s.msgBuf = s.msgBuf[msgLen:] - return msg -} - -func (s *cryptoStreamImpl) Finish() error { - if s.queue.HasMoreData() { - return errors.New("encryption level changed, but crypto stream has more data to read") - } - s.finished = true - return nil -} - -// Writes writes data that should be sent out in CRYPTO frames -func (s *cryptoStreamImpl) Write(p []byte) (int, error) { - s.writeBuf = append(s.writeBuf, p...) - return len(p), nil -} - -func (s *cryptoStreamImpl) HasData() bool { - return len(s.writeBuf) > 0 -} - -func (s *cryptoStreamImpl) PopCryptoFrame(maxLen protocol.ByteCount) *wire.CryptoFrame { - f := &wire.CryptoFrame{Offset: s.writeOffset} - n := utils.MinByteCount(f.MaxDataLen(maxLen), protocol.ByteCount(len(s.writeBuf))) - f.Data = s.writeBuf[:n] - s.writeBuf = s.writeBuf[n:] - s.writeOffset += n - return f -} diff --git a/vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go b/vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go deleted file mode 100644 index 489b306a7..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/crypto_stream_manager.go +++ /dev/null @@ -1,60 +0,0 @@ -package quic - -import ( - "fmt" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type cryptoDataHandler interface { - HandleMessage([]byte, protocol.EncryptionLevel) bool -} - -type cryptoStreamManager struct { - cryptoHandler cryptoDataHandler - - initialStream cryptoStream - handshakeStream cryptoStream - oneRTTStream cryptoStream -} - -func newCryptoStreamManager( - cryptoHandler cryptoDataHandler, - initialStream cryptoStream, - handshakeStream cryptoStream, - oneRTTStream cryptoStream, -) *cryptoStreamManager { - return &cryptoStreamManager{ - cryptoHandler: cryptoHandler, - initialStream: initialStream, - handshakeStream: handshakeStream, - oneRTTStream: oneRTTStream, - } -} - -func (m *cryptoStreamManager) HandleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) (bool /* encryption level changed */, error) { - var str cryptoStream - switch encLevel { - case protocol.EncryptionInitial: - str = m.initialStream - case protocol.EncryptionHandshake: - str = m.handshakeStream - case protocol.Encryption1RTT: - str = m.oneRTTStream - default: - return false, fmt.Errorf("received CRYPTO frame with unexpected encryption level: %s", encLevel) - } - if err := str.HandleCryptoFrame(frame); err != nil { - return false, err - } - for { - data := str.GetCryptoData() - if data == nil { - return false, nil - } - if encLevelFinished := m.cryptoHandler.HandleMessage(data, encLevel); encLevelFinished { - return true, str.Finish() - } - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go b/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go deleted file mode 100644 index 609c35dd7..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/frame_sorter.go +++ /dev/null @@ -1,159 +0,0 @@ -package quic - -import ( - "errors" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -type frameSorter struct { - queue map[protocol.ByteCount][]byte - readPos protocol.ByteCount - gaps *utils.ByteIntervalList -} - -var errDuplicateStreamData = errors.New("Duplicate Stream Data") - -func newFrameSorter() *frameSorter { - s := frameSorter{ - gaps: utils.NewByteIntervalList(), - queue: make(map[protocol.ByteCount][]byte), - } - s.gaps.PushFront(utils.ByteInterval{Start: 0, End: protocol.MaxByteCount}) - return &s -} - -func (s *frameSorter) Push(data []byte, offset protocol.ByteCount) error { - err := s.push(data, offset) - if err == errDuplicateStreamData { - return nil - } - return err -} - -func (s *frameSorter) push(data []byte, offset protocol.ByteCount) error { - if len(data) == 0 { - return nil - } - - var wasCut bool - if oldData, ok := s.queue[offset]; ok { - if len(data) <= len(oldData) { - return errDuplicateStreamData - } - data = data[len(oldData):] - offset += protocol.ByteCount(len(oldData)) - wasCut = true - } - - start := offset - end := offset + protocol.ByteCount(len(data)) - - // skip all gaps that are before this stream frame - var gap *utils.ByteIntervalElement - for gap = s.gaps.Front(); gap != nil; gap = gap.Next() { - // the frame is a duplicate. Ignore it - if end <= gap.Value.Start { - return errDuplicateStreamData - } - if end > gap.Value.Start && start <= gap.Value.End { - break - } - } - - if gap == nil { - return errors.New("StreamFrameSorter BUG: no gap found") - } - - if start < gap.Value.Start { - add := gap.Value.Start - start - offset += add - start += add - data = data[add:] - wasCut = true - } - - // find the highest gaps whose Start lies before the end of the frame - endGap := gap - for end >= endGap.Value.End { - nextEndGap := endGap.Next() - if nextEndGap == nil { - return errors.New("StreamFrameSorter BUG: no end gap found") - } - if endGap != gap { - s.gaps.Remove(endGap) - } - if end <= nextEndGap.Value.Start { - break - } - // delete queued frames completely covered by the current frame - delete(s.queue, endGap.Value.End) - endGap = nextEndGap - } - - if end > endGap.Value.End { - cutLen := end - endGap.Value.End - len := protocol.ByteCount(len(data)) - cutLen - end -= cutLen - data = data[:len] - wasCut = true - } - - if start == gap.Value.Start { - if end >= gap.Value.End { - // the frame completely fills this gap - // delete the gap - s.gaps.Remove(gap) - } - if end < endGap.Value.End { - // the frame covers the beginning of the gap - // adjust the Start value to shrink the gap - endGap.Value.Start = end - } - } else if end == endGap.Value.End { - // the frame covers the end of the gap - // adjust the End value to shrink the gap - gap.Value.End = start - } else { - if gap == endGap { - // the frame lies within the current gap, splitting it into two - // insert a new gap and adjust the current one - intv := utils.ByteInterval{Start: end, End: gap.Value.End} - s.gaps.InsertAfter(intv, gap) - gap.Value.End = start - } else { - gap.Value.End = start - endGap.Value.Start = end - } - } - - if s.gaps.Len() > protocol.MaxStreamFrameSorterGaps { - return errors.New("Too many gaps in received data") - } - - if wasCut { - newData := make([]byte, len(data)) - copy(newData, data) - data = newData - } - - s.queue[offset] = data - return nil -} - -func (s *frameSorter) Pop() (protocol.ByteCount, []byte) { - data, ok := s.queue[s.readPos] - if !ok { - return s.readPos, nil - } - delete(s.queue, s.readPos) - offset := s.readPos - s.readPos += protocol.ByteCount(len(data)) - return offset, data -} - -// HasMoreData says if there is any more data queued at *any* offset. -func (s *frameSorter) HasMoreData() bool { - return len(s.queue) > 0 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/framer.go b/vendor/github.com/lucas-clemente/quic-go/framer.go deleted file mode 100644 index fbfe9bb76..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/framer.go +++ /dev/null @@ -1,109 +0,0 @@ -package quic - -import ( - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type framer interface { - QueueControlFrame(wire.Frame) - AppendControlFrames([]wire.Frame, protocol.ByteCount) ([]wire.Frame, protocol.ByteCount) - - AddActiveStream(protocol.StreamID) - AppendStreamFrames([]wire.Frame, protocol.ByteCount) []wire.Frame -} - -type framerI struct { - mutex sync.Mutex - - streamGetter streamGetter - version protocol.VersionNumber - - activeStreams map[protocol.StreamID]struct{} - streamQueue []protocol.StreamID - - controlFrameMutex sync.Mutex - controlFrames []wire.Frame -} - -var _ framer = &framerI{} - -func newFramer( - streamGetter streamGetter, - v protocol.VersionNumber, -) framer { - return &framerI{ - streamGetter: streamGetter, - activeStreams: make(map[protocol.StreamID]struct{}), - version: v, - } -} - -func (f *framerI) QueueControlFrame(frame wire.Frame) { - f.controlFrameMutex.Lock() - f.controlFrames = append(f.controlFrames, frame) - f.controlFrameMutex.Unlock() -} - -func (f *framerI) AppendControlFrames(frames []wire.Frame, maxLen protocol.ByteCount) ([]wire.Frame, protocol.ByteCount) { - var length protocol.ByteCount - f.controlFrameMutex.Lock() - for len(f.controlFrames) > 0 { - frame := f.controlFrames[len(f.controlFrames)-1] - frameLen := frame.Length(f.version) - if length+frameLen > maxLen { - break - } - frames = append(frames, frame) - length += frameLen - f.controlFrames = f.controlFrames[:len(f.controlFrames)-1] - } - f.controlFrameMutex.Unlock() - return frames, length -} - -func (f *framerI) AddActiveStream(id protocol.StreamID) { - f.mutex.Lock() - if _, ok := f.activeStreams[id]; !ok { - f.streamQueue = append(f.streamQueue, id) - f.activeStreams[id] = struct{}{} - } - f.mutex.Unlock() -} - -func (f *framerI) AppendStreamFrames(frames []wire.Frame, maxLen protocol.ByteCount) []wire.Frame { - var length protocol.ByteCount - f.mutex.Lock() - // pop STREAM frames, until less than MinStreamFrameSize bytes are left in the packet - numActiveStreams := len(f.streamQueue) - for i := 0; i < numActiveStreams; i++ { - if maxLen-length < protocol.MinStreamFrameSize { - break - } - id := f.streamQueue[0] - f.streamQueue = f.streamQueue[1:] - // This should never return an error. Better check it anyway. - // The stream will only be in the streamQueue, if it enqueued itself there. - str, err := f.streamGetter.GetOrOpenSendStream(id) - // The stream can be nil if it completed after it said it had data. - if str == nil || err != nil { - delete(f.activeStreams, id) - continue - } - frame, hasMoreData := str.popStreamFrame(maxLen - length) - if hasMoreData { // put the stream back in the queue (at the end) - f.streamQueue = append(f.streamQueue, id) - } else { // no more data to send. Stream is not active any more - delete(f.activeStreams, id) - } - if frame == nil { // can happen if the receiveStream was canceled after it said it had data - continue - } - frames = append(frames, frame) - length += frame.Length(f.version) - } - f.mutex.Unlock() - return frames -} diff --git a/vendor/github.com/lucas-clemente/quic-go/go.mod b/vendor/github.com/lucas-clemente/quic-go/go.mod deleted file mode 100644 index 63d9d89b3..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/lucas-clemente/quic-go - -go 1.12 - -require ( - github.com/cheekybits/genny v1.0.0 - github.com/golang/mock v1.2.0 - github.com/marten-seemann/qtls v0.2.3 - github.com/onsi/ginkgo v1.7.0 - github.com/onsi/gomega v1.4.3 - golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 - golang.org/x/net v0.0.0-20180906233101-161cd47e91fd -) diff --git a/vendor/github.com/lucas-clemente/quic-go/go.sum b/vendor/github.com/lucas-clemente/quic-go/go.sum deleted file mode 100644 index c5efe8535..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/go.sum +++ /dev/null @@ -1,37 +0,0 @@ -github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/marten-seemann/qtls v0.2.3 h1:0yWJ43C62LsZt08vuQJDK1uC1czUc3FJeCLPoNAI4vA= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e h1:ZytStCyV048ZqDsWHiYDdoI2Vd4msMcrDECFxS+tL9c= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/client.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/client.go deleted file mode 100644 index 3ffa4829a..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/client.go +++ /dev/null @@ -1,311 +0,0 @@ -package h2quic - -import ( - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "net/http" - "strings" - "sync" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "golang.org/x/net/idna" - - quic "github.com/lucas-clemente/quic-go" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -type roundTripperOpts struct { - DisableCompression bool -} - -var dialAddr = quic.DialAddr - -// client is a HTTP2 client doing QUIC requests -type client struct { - mutex sync.RWMutex - - tlsConf *tls.Config - config *quic.Config - opts *roundTripperOpts - - hostname string - handshakeErr error - dialOnce sync.Once - dialer func(network, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.Session, error) - - session quic.Session - headerStream quic.Stream - headerErr *qerr.QuicError - headerErrored chan struct{} // this channel is closed if an error occurs on the header stream - requestWriter *requestWriter - - responses map[protocol.StreamID]chan *http.Response - - logger utils.Logger -} - -var _ http.RoundTripper = &client{} - -var defaultQuicConfig = &quic.Config{KeepAlive: true} - -// newClient creates a new client -func newClient( - hostname string, - tlsConfig *tls.Config, - opts *roundTripperOpts, - quicConfig *quic.Config, - dialer func(network, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.Session, error), -) *client { - config := defaultQuicConfig - if quicConfig != nil { - config = quicConfig - } - return &client{ - hostname: authorityAddr("https", hostname), - responses: make(map[protocol.StreamID]chan *http.Response), - tlsConf: tlsConfig, - config: config, - opts: opts, - headerErrored: make(chan struct{}), - dialer: dialer, - logger: utils.DefaultLogger.WithPrefix("client"), - } -} - -// dial dials the connection -func (c *client) dial() error { - var err error - if c.dialer != nil { - c.session, err = c.dialer("udp", c.hostname, c.tlsConf, c.config) - } else { - c.session, err = dialAddr(c.hostname, c.tlsConf, c.config) - } - if err != nil { - return err - } - - // once the version has been negotiated, open the header stream - c.headerStream, err = c.session.OpenStreamSync() - if err != nil { - return err - } - c.requestWriter = newRequestWriter(c.headerStream, c.logger) - go c.handleHeaderStream() - return nil -} - -func (c *client) handleHeaderStream() { - decoder := hpack.NewDecoder(4096, func(hf hpack.HeaderField) {}) - h2framer := http2.NewFramer(nil, c.headerStream) - - var err error - for err == nil { - err = c.readResponse(h2framer, decoder) - } - if quicErr, ok := err.(*qerr.QuicError); !ok || quicErr.ErrorCode != qerr.NoError { - c.logger.Debugf("Error handling header stream: %s", err) - } - c.headerErr = qerr.Error(qerr.InternalError, err.Error()) - // stop all running request - close(c.headerErrored) -} - -func (c *client) readResponse(h2framer *http2.Framer, decoder *hpack.Decoder) error { - frame, err := h2framer.ReadFrame() - if err != nil { - return err - } - hframe, ok := frame.(*http2.HeadersFrame) - if !ok { - return errors.New("not a headers frame") - } - mhframe := &http2.MetaHeadersFrame{HeadersFrame: hframe} - mhframe.Fields, err = decoder.DecodeFull(hframe.HeaderBlockFragment()) - if err != nil { - return fmt.Errorf("cannot read header fields: %s", err.Error()) - } - - c.mutex.RLock() - responseChan, ok := c.responses[protocol.StreamID(hframe.StreamID)] - c.mutex.RUnlock() - if !ok { - return fmt.Errorf("response channel for stream %d not found", hframe.StreamID) - } - - rsp, err := responseFromHeaders(mhframe) - if err != nil { - return err - } - responseChan <- rsp - return nil -} - -// Roundtrip executes a request and returns a response -func (c *client) RoundTrip(req *http.Request) (*http.Response, error) { - // TODO: add port to address, if it doesn't have one - if req.URL.Scheme != "https" { - return nil, errors.New("quic http2: unsupported scheme") - } - if authorityAddr("https", hostnameFromRequest(req)) != c.hostname { - return nil, fmt.Errorf("h2quic Client BUG: RoundTrip called for the wrong client (expected %s, got %s)", c.hostname, req.Host) - } - - c.dialOnce.Do(func() { - c.handshakeErr = c.dial() - }) - - if c.handshakeErr != nil { - return nil, c.handshakeErr - } - - hasBody := (req.Body != nil) - - responseChan := make(chan *http.Response) - dataStream, err := c.session.OpenStreamSync() - if err != nil { - _ = c.closeWithError(err) - return nil, err - } - c.mutex.Lock() - c.responses[dataStream.StreamID()] = responseChan - c.mutex.Unlock() - - var requestedGzip bool - if !c.opts.DisableCompression && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" && req.Method != "HEAD" { - requestedGzip = true - } - // TODO: add support for trailers - endStream := !hasBody - err = c.requestWriter.WriteRequest(req, dataStream.StreamID(), endStream, requestedGzip) - if err != nil { - _ = c.closeWithError(err) - return nil, err - } - - resc := make(chan error, 1) - if hasBody { - go func() { - resc <- c.writeRequestBody(dataStream, req.Body) - }() - } - - var res *http.Response - - var receivedResponse bool - var bodySent bool - - if !hasBody { - bodySent = true - } - - ctx := req.Context() - for !(bodySent && receivedResponse) { - select { - case res = <-responseChan: - receivedResponse = true - c.mutex.Lock() - delete(c.responses, dataStream.StreamID()) - c.mutex.Unlock() - case err := <-resc: - bodySent = true - if err != nil { - return nil, err - } - case <-ctx.Done(): - // error code 6 signals that stream was canceled - dataStream.CancelRead(6) - dataStream.CancelWrite(6) - c.mutex.Lock() - delete(c.responses, dataStream.StreamID()) - c.mutex.Unlock() - return nil, ctx.Err() - case <-c.headerErrored: - // an error occurred on the header stream - _ = c.closeWithError(c.headerErr) - return nil, c.headerErr - } - } - - // TODO: correctly set this variable - var streamEnded bool - isHead := (req.Method == "HEAD") - - res = setLength(res, isHead, streamEnded) - - if streamEnded || isHead { - res.Body = noBody - } else { - res.Body = &responseBody{dataStream} - if requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - res.Uncompressed = true - } - } - - res.Request = req - return res, nil -} - -func (c *client) writeRequestBody(dataStream quic.Stream, body io.ReadCloser) (err error) { - defer func() { - cerr := body.Close() - if err == nil { - // TODO: what to do with dataStream here? Maybe reset it? - err = cerr - } - }() - - _, err = io.Copy(dataStream, body) - if err != nil { - // TODO: what to do with dataStream here? Maybe reset it? - return err - } - return dataStream.Close() -} - -func (c *client) closeWithError(e error) error { - if c.session == nil { - return nil - } - return c.session.CloseWithError(quic.ErrorCode(qerr.InternalError), e) -} - -// Close closes the client -func (c *client) Close() error { - if c.session == nil { - return nil - } - return c.session.Close() -} - -// copied from net/transport.go - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - host, port, err := net.SplitHostPort(authority) - if err != nil { // authority didn't have a port - port = "443" - if scheme == "http" { - port = "80" - } - host = authority - } - if a, err := idna.ToASCII(host); err == nil { - host = a - } - // IPv6 address literal, without a port: - if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { - return host + ":" + port - } - return net.JoinHostPort(host, port) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/gzipreader.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/gzipreader.go deleted file mode 100644 index 91c226b1e..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/gzipreader.go +++ /dev/null @@ -1,35 +0,0 @@ -package h2quic - -// copied from net/transport.go - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -import ( - "compress/gzip" - "io" -) - -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/request.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/request.go deleted file mode 100644 index b27e37e2e..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/request.go +++ /dev/null @@ -1,77 +0,0 @@ -package h2quic - -import ( - "crypto/tls" - "errors" - "net/http" - "net/url" - "strconv" - "strings" - - "golang.org/x/net/http2/hpack" -) - -func requestFromHeaders(headers []hpack.HeaderField) (*http.Request, error) { - var path, authority, method, contentLengthStr string - httpHeaders := http.Header{} - - for _, h := range headers { - switch h.Name { - case ":path": - path = h.Value - case ":method": - method = h.Value - case ":authority": - authority = h.Value - case "content-length": - contentLengthStr = h.Value - default: - if !h.IsPseudo() { - httpHeaders.Add(h.Name, h.Value) - } - } - } - - // concatenate cookie headers, see https://tools.ietf.org/html/rfc6265#section-5.4 - if len(httpHeaders["Cookie"]) > 0 { - httpHeaders.Set("Cookie", strings.Join(httpHeaders["Cookie"], "; ")) - } - - if len(path) == 0 || len(authority) == 0 || len(method) == 0 { - return nil, errors.New(":path, :authority and :method must not be empty") - } - - u, err := url.Parse(path) - if err != nil { - return nil, err - } - - var contentLength int64 - if len(contentLengthStr) > 0 { - contentLength, err = strconv.ParseInt(contentLengthStr, 10, 64) - if err != nil { - return nil, err - } - } - - return &http.Request{ - Method: method, - URL: u, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - Header: httpHeaders, - Body: nil, - ContentLength: contentLength, - Host: authority, - RequestURI: path, - TLS: &tls.ConnectionState{}, - }, nil -} - -func hostnameFromRequest(req *http.Request) string { - if req.URL != nil { - return req.URL.Host - } - return "" -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/request_body.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/request_body.go deleted file mode 100644 index 2d4d59543..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/request_body.go +++ /dev/null @@ -1,29 +0,0 @@ -package h2quic - -import ( - "io" - - quic "github.com/lucas-clemente/quic-go" -) - -type requestBody struct { - requestRead bool - dataStream quic.Stream -} - -// make sure the requestBody can be used as a http.Request.Body -var _ io.ReadCloser = &requestBody{} - -func newRequestBody(stream quic.Stream) *requestBody { - return &requestBody{dataStream: stream} -} - -func (b *requestBody) Read(p []byte) (int, error) { - b.requestRead = true - return b.dataStream.Read(p) -} - -func (b *requestBody) Close() error { - // stream's Close() closes the write side, not the read side - return nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/request_writer.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/request_writer.go deleted file mode 100644 index b5ee97513..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/request_writer.go +++ /dev/null @@ -1,203 +0,0 @@ -package h2quic - -import ( - "bytes" - "fmt" - "net/http" - "strconv" - "strings" - "sync" - - "golang.org/x/net/http/httpguts" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - - quic "github.com/lucas-clemente/quic-go" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -type requestWriter struct { - mutex sync.Mutex - headerStream quic.Stream - - henc *hpack.Encoder - hbuf bytes.Buffer // HPACK encoder writes into this - - logger utils.Logger -} - -const defaultUserAgent = "quic-go" - -func newRequestWriter(headerStream quic.Stream, logger utils.Logger) *requestWriter { - rw := &requestWriter{ - headerStream: headerStream, - logger: logger, - } - rw.henc = hpack.NewEncoder(&rw.hbuf) - return rw -} - -func (w *requestWriter) WriteRequest(req *http.Request, dataStreamID protocol.StreamID, endStream, requestGzip bool) error { - // TODO: add support for trailers - // TODO: add support for gzip compression - // TODO: write continuation frames, if the header frame is too long - - w.mutex.Lock() - defer w.mutex.Unlock() - - w.encodeHeaders(req, requestGzip, "", actualContentLength(req)) - h2framer := http2.NewFramer(w.headerStream, nil) - return h2framer.WriteHeaders(http2.HeadersFrameParam{ - StreamID: uint32(dataStreamID), - EndHeaders: true, - EndStream: endStream, - BlockFragment: w.hbuf.Bytes(), - Priority: http2.PriorityParam{Weight: 0xff}, - }) -} - -// the rest of this files is copied from http2.Transport -func (w *requestWriter) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - w.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - w.writeHeader(":authority", host) - w.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - w.writeHeader(":path", path) - w.writeHeader(":scheme", req.URL.Scheme) - } - if trailers != "" { - w.writeHeader("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } - for _, v := range vv { - w.writeHeader(lowKey, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - w.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - w.writeHeader("accept-encoding", "gzip") - } - if !didUA { - w.writeHeader("user-agent", defaultUserAgent) - } - return w.hbuf.Bytes(), nil -} - -func (w *requestWriter) writeHeader(name, value string) { - w.logger.Debugf("http2: Transport encoding header %q = %q", name, value) - w.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" -} - -// actualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func actualContentLength(req *http.Request) int64 { - if req.Body == nil { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/response.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/response.go deleted file mode 100644 index d5dd21941..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/response.go +++ /dev/null @@ -1,95 +0,0 @@ -package h2quic - -import ( - "bytes" - "errors" - "io/ioutil" - "net/http" - "net/textproto" - "strconv" - "strings" - - "golang.org/x/net/http2" -) - -// copied from net/http2/transport.go - -var errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") -var noBody = ioutil.NopCloser(bytes.NewReader(nil)) - -// from the handleResponse function -func responseFromHeaders(f *http2.MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - // TODO: handle statusCode == 100 - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - return res, nil -} - -// continuation of the handleResponse function -func setLength(res *http.Response, isHead, streamEnded bool) *http.Response { - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } - } - } - return res -} - -// copied from net/http/server.go - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/response_body.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/response_body.go deleted file mode 100644 index 11d79e803..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/response_body.go +++ /dev/null @@ -1,18 +0,0 @@ -package h2quic - -import ( - "io" - - quic "github.com/lucas-clemente/quic-go" -) - -type responseBody struct { - quic.Stream -} - -var _ io.ReadCloser = &responseBody{} - -func (rb *responseBody) Close() error { - rb.Stream.CancelRead(0) - return nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer.go deleted file mode 100644 index 02841227e..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer.go +++ /dev/null @@ -1,114 +0,0 @@ -package h2quic - -import ( - "bytes" - "net/http" - "strconv" - "strings" - "sync" - - quic "github.com/lucas-clemente/quic-go" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -type responseWriter struct { - dataStreamID protocol.StreamID - dataStream quic.Stream - - headerStream quic.Stream - headerStreamMutex *sync.Mutex - - header http.Header - status int // status code passed to WriteHeader - headerWritten bool - - logger utils.Logger -} - -func newResponseWriter( - headerStream quic.Stream, - headerStreamMutex *sync.Mutex, - dataStream quic.Stream, - dataStreamID protocol.StreamID, - logger utils.Logger, -) *responseWriter { - return &responseWriter{ - header: http.Header{}, - headerStream: headerStream, - headerStreamMutex: headerStreamMutex, - dataStream: dataStream, - dataStreamID: dataStreamID, - logger: logger, - } -} - -func (w *responseWriter) Header() http.Header { - return w.header -} - -func (w *responseWriter) WriteHeader(status int) { - if w.headerWritten { - return - } - w.headerWritten = true - w.status = status - - var headers bytes.Buffer - enc := hpack.NewEncoder(&headers) - enc.WriteField(hpack.HeaderField{Name: ":status", Value: strconv.Itoa(status)}) - - for k, v := range w.header { - for index := range v { - enc.WriteField(hpack.HeaderField{Name: strings.ToLower(k), Value: v[index]}) - } - } - - w.logger.Infof("Responding with %d", status) - w.headerStreamMutex.Lock() - defer w.headerStreamMutex.Unlock() - h2framer := http2.NewFramer(w.headerStream, nil) - err := h2framer.WriteHeaders(http2.HeadersFrameParam{ - StreamID: uint32(w.dataStreamID), - EndHeaders: true, - BlockFragment: headers.Bytes(), - }) - if err != nil { - w.logger.Errorf("could not write h2 header: %s", err.Error()) - } -} - -func (w *responseWriter) Write(p []byte) (int, error) { - if !w.headerWritten { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(w.status) { - return 0, http.ErrBodyNotAllowed - } - return w.dataStream.Write(p) -} - -func (w *responseWriter) Flush() {} - -// This is a NOP. Use http.Request.Context -func (w *responseWriter) CloseNotify() <-chan bool { return make(<-chan bool) } - -// test that we implement http.Flusher -var _ http.Flusher = &responseWriter{} - -// copied from http2/http2.go -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer_closenotifier.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer_closenotifier.go deleted file mode 100644 index b26f91c1b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/response_writer_closenotifier.go +++ /dev/null @@ -1,9 +0,0 @@ -package h2quic - -import "net/http" - -// The CloseNotifier is a deprecated interface, and staticcheck will report that from Go 1.11. -// By defining it in a separate file, we can exclude this file from staticcheck. - -// test that we implement http.CloseNotifier -var _ http.CloseNotifier = &responseWriter{} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/roundtrip.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/roundtrip.go deleted file mode 100644 index 27732b5e4..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/roundtrip.go +++ /dev/null @@ -1,179 +0,0 @@ -package h2quic - -import ( - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "strings" - "sync" - - quic "github.com/lucas-clemente/quic-go" - - "golang.org/x/net/http/httpguts" -) - -type roundTripCloser interface { - http.RoundTripper - io.Closer -} - -// RoundTripper implements the http.RoundTripper interface -type RoundTripper struct { - mutex sync.Mutex - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // QuicConfig is the quic.Config used for dialing new connections. - // If nil, reasonable default values will be used. - QuicConfig *quic.Config - - // Dial specifies an optional dial function for creating QUIC - // connections for requests. - // If Dial is nil, quic.DialAddr will be used. - Dial func(network, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.Session, error) - - clients map[string]roundTripCloser -} - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether the RoundTripper may - // create a new QUIC connection. If set true and - // no cached connection is available, RoundTrip - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -var _ roundTripCloser = &RoundTripper{} - -// ErrNoCachedConn is returned when RoundTripper.OnlyCachedConn is set -var ErrNoCachedConn = errors.New("h2quic: no cached connection was available") - -// RoundTripOpt is like RoundTrip, but takes options. -func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if req.URL == nil { - closeRequestBody(req) - return nil, errors.New("quic: nil Request.URL") - } - if req.URL.Host == "" { - closeRequestBody(req) - return nil, errors.New("quic: no Host in request URL") - } - if req.Header == nil { - closeRequestBody(req) - return nil, errors.New("quic: nil Request.Header") - } - - if req.URL.Scheme == "https" { - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("quic: invalid http header field name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("quic: invalid http header field value %q for key %v", v, k) - } - } - } - } else { - closeRequestBody(req) - return nil, fmt.Errorf("quic: unsupported protocol scheme: %s", req.URL.Scheme) - } - - if req.Method != "" && !validMethod(req.Method) { - closeRequestBody(req) - return nil, fmt.Errorf("quic: invalid method %q", req.Method) - } - - hostname := authorityAddr("https", hostnameFromRequest(req)) - cl, err := r.getClient(hostname, opt.OnlyCachedConn) - if err != nil { - return nil, err - } - return cl.RoundTrip(req) -} - -// RoundTrip does a round trip. -func (r *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return r.RoundTripOpt(req, RoundTripOpt{}) -} - -func (r *RoundTripper) getClient(hostname string, onlyCached bool) (http.RoundTripper, error) { - r.mutex.Lock() - defer r.mutex.Unlock() - - if r.clients == nil { - r.clients = make(map[string]roundTripCloser) - } - - client, ok := r.clients[hostname] - if !ok { - if onlyCached { - return nil, ErrNoCachedConn - } - client = newClient( - hostname, - r.TLSClientConfig, - &roundTripperOpts{DisableCompression: r.DisableCompression}, - r.QuicConfig, - r.Dial, - ) - r.clients[hostname] = client - } - return client, nil -} - -// Close closes the QUIC connections that this RoundTripper has used -func (r *RoundTripper) Close() error { - r.mutex.Lock() - defer r.mutex.Unlock() - for _, client := range r.clients { - if err := client.Close(); err != nil { - return err - } - } - r.clients = nil - return nil -} - -func closeRequestBody(req *http.Request) { - if req.Body != nil { - req.Body.Close() - } -} - -func validMethod(method string) bool { - /* - Method = "OPTIONS" ; Section 9.2 - | "GET" ; Section 9.3 - | "HEAD" ; Section 9.4 - | "POST" ; Section 9.5 - | "PUT" ; Section 9.6 - | "DELETE" ; Section 9.7 - | "TRACE" ; Section 9.8 - | "CONNECT" ; Section 9.9 - | extension-method - extension-method = token - token = 1* - */ - return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1 -} - -// copied from net/http/http.go -func isNotToken(r rune) bool { - return !httpguts.IsTokenRune(r) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/h2quic/server.go b/vendor/github.com/lucas-clemente/quic-go/h2quic/server.go deleted file mode 100644 index 327499198..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/h2quic/server.go +++ /dev/null @@ -1,402 +0,0 @@ -package h2quic - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/http" - "runtime" - "strings" - "sync" - "sync/atomic" - "time" - - quic "github.com/lucas-clemente/quic-go" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -type streamCreator interface { - quic.Session - GetOrOpenStream(protocol.StreamID) (quic.Stream, error) -} - -type remoteCloser interface { - CloseRemote(protocol.ByteCount) -} - -// allows mocking of quic.Listen and quic.ListenAddr -var ( - quicListen = quic.Listen - quicListenAddr = quic.ListenAddr -) - -// Server is a HTTP2 server listening for QUIC connections. -type Server struct { - *http.Server - - // By providing a quic.Config, it is possible to set parameters of the QUIC connection. - // If nil, it uses reasonable default values. - QuicConfig *quic.Config - - // Private flag for demo, do not use - CloseAfterFirstRequest bool - - port uint32 // used atomically - - listenerMutex sync.Mutex - listener quic.Listener - closed bool - - supportedVersionsAsString string - - logger utils.Logger // will be set by Server.serveImpl() -} - -// ListenAndServe listens on the UDP address s.Addr and calls s.Handler to handle HTTP/2 requests on incoming connections. -func (s *Server) ListenAndServe() error { - if s.Server == nil { - return errors.New("use of h2quic.Server without http.Server") - } - return s.serveImpl(s.TLSConfig, nil) -} - -// ListenAndServeTLS listens on the UDP address s.Addr and calls s.Handler to handle HTTP/2 requests on incoming connections. -func (s *Server) ListenAndServeTLS(certFile, keyFile string) error { - var err error - certs := make([]tls.Certificate, 1) - certs[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - // We currently only use the cert-related stuff from tls.Config, - // so we don't need to make a full copy. - config := &tls.Config{ - Certificates: certs, - } - return s.serveImpl(config, nil) -} - -// Serve an existing UDP connection. -func (s *Server) Serve(conn net.PacketConn) error { - return s.serveImpl(s.TLSConfig, conn) -} - -func (s *Server) serveImpl(tlsConfig *tls.Config, conn net.PacketConn) error { - if s.Server == nil { - return errors.New("use of h2quic.Server without http.Server") - } - s.logger = utils.DefaultLogger.WithPrefix("server") - s.listenerMutex.Lock() - if s.closed { - s.listenerMutex.Unlock() - return errors.New("Server is already closed") - } - if s.listener != nil { - s.listenerMutex.Unlock() - return errors.New("ListenAndServe may only be called once") - } - - var ln quic.Listener - var err error - if conn == nil { - ln, err = quicListenAddr(s.Addr, tlsConfig, s.QuicConfig) - } else { - ln, err = quicListen(conn, tlsConfig, s.QuicConfig) - } - if err != nil { - s.listenerMutex.Unlock() - return err - } - s.listener = ln - s.listenerMutex.Unlock() - - for { - sess, err := ln.Accept() - if err != nil { - return err - } - go s.handleHeaderStream(sess.(streamCreator)) - } -} - -func (s *Server) handleHeaderStream(session streamCreator) { - stream, err := session.AcceptStream() - if err != nil { - session.CloseWithError(quic.ErrorCode(qerr.InternalError), err) - return - } - - hpackDecoder := hpack.NewDecoder(4096, nil) - h2framer := http2.NewFramer(nil, stream) - - var headerStreamMutex sync.Mutex // Protects concurrent calls to Write() - for { - if err := s.handleRequest(session, stream, &headerStreamMutex, hpackDecoder, h2framer); err != nil { - // QuicErrors must originate from stream.Read() returning an error. - // In this case, the session has already logged the error, so we don't - // need to log it again. - errorCode := qerr.InternalError - if qerr, ok := err.(*qerr.QuicError); ok { - errorCode = qerr.ErrorCode - s.logger.Errorf("error handling h2 request: %s", err.Error()) - } - session.CloseWithError(quic.ErrorCode(errorCode), err) - return - } - } -} - -func (s *Server) handleRequest(session streamCreator, headerStream quic.Stream, headerStreamMutex *sync.Mutex, hpackDecoder *hpack.Decoder, h2framer *http2.Framer) error { - h2frame, err := h2framer.ReadFrame() - if err != nil { - return qerr.Error(qerr.InternalError, "cannot read frame") - } - var h2headersFrame *http2.HeadersFrame - switch f := h2frame.(type) { - case *http2.PriorityFrame: - // ignore PRIORITY frames - s.logger.Debugf("Ignoring H2 PRIORITY frame: %#v", f) - return nil - case *http2.HeadersFrame: - h2headersFrame = f - default: - return qerr.Error(qerr.ProtocolViolation, "expected a header frame") - } - - if !h2headersFrame.HeadersEnded() { - return errors.New("http2 header continuation not implemented") - } - headers, err := hpackDecoder.DecodeFull(h2headersFrame.HeaderBlockFragment()) - if err != nil { - s.logger.Errorf("invalid http2 headers encoding: %s", err.Error()) - return err - } - - req, err := requestFromHeaders(headers) - if err != nil { - return err - } - - if s.logger.Debug() { - s.logger.Infof("%s %s%s, on data stream %d", req.Method, req.Host, req.RequestURI, h2headersFrame.StreamID) - } else { - s.logger.Infof("%s %s%s", req.Method, req.Host, req.RequestURI) - } - - dataStream, err := session.GetOrOpenStream(protocol.StreamID(h2headersFrame.StreamID)) - if err != nil { - return err - } - // this can happen if the client immediately closes the data stream after sending the request and the runtime processes the reset before the request - if dataStream == nil { - return nil - } - - // handleRequest should be as non-blocking as possible to minimize - // head-of-line blocking. Potentially blocking code is run in a separate - // goroutine, enabling handleRequest to return before the code is executed. - go func() { - streamEnded := h2headersFrame.StreamEnded() - if streamEnded { - dataStream.(remoteCloser).CloseRemote(0) - streamEnded = true - _, _ = dataStream.Read([]byte{0}) // read the eof - } - - req = req.WithContext(dataStream.Context()) - reqBody := newRequestBody(dataStream) - req.Body = reqBody - - req.RemoteAddr = session.RemoteAddr().String() - - responseWriter := newResponseWriter(headerStream, headerStreamMutex, dataStream, protocol.StreamID(h2headersFrame.StreamID), s.logger) - - handler := s.Handler - if handler == nil { - handler = http.DefaultServeMux - } - panicked := false - func() { - defer func() { - if p := recover(); p != nil { - // Copied from net/http/server.go - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - s.logger.Errorf("http: panic serving: %v\n%s", p, buf) - panicked = true - } - }() - handler.ServeHTTP(responseWriter, req) - }() - if panicked { - responseWriter.WriteHeader(500) - } else { - responseWriter.WriteHeader(200) - } - if responseWriter.dataStream != nil { - if !streamEnded && !reqBody.requestRead { - // in gQUIC, the error code doesn't matter, so just use 0 here - responseWriter.dataStream.CancelRead(0) - } - responseWriter.dataStream.Close() - } - if s.CloseAfterFirstRequest { - time.Sleep(100 * time.Millisecond) - session.Close() - } - }() - - return nil -} - -// Close the server immediately, aborting requests and sending CONNECTION_CLOSE frames to connected clients. -// Close in combination with ListenAndServe() (instead of Serve()) may race if it is called before a UDP socket is established. -func (s *Server) Close() error { - s.listenerMutex.Lock() - defer s.listenerMutex.Unlock() - s.closed = true - if s.listener != nil { - err := s.listener.Close() - s.listener = nil - return err - } - return nil -} - -// CloseGracefully shuts down the server gracefully. The server sends a GOAWAY frame first, then waits for either timeout to trigger, or for all running requests to complete. -// CloseGracefully in combination with ListenAndServe() (instead of Serve()) may race if it is called before a UDP socket is established. -func (s *Server) CloseGracefully(timeout time.Duration) error { - // TODO: implement - return nil -} - -// SetQuicHeaders can be used to set the proper headers that announce that this server supports QUIC. -// The values that are set depend on the port information from s.Server.Addr, and currently look like this (if Addr has port 443): -// Alt-Svc: quic=":443"; ma=2592000; v="33,32,31,30" -func (s *Server) SetQuicHeaders(hdr http.Header) error { - port := atomic.LoadUint32(&s.port) - - if port == 0 { - // Extract port from s.Server.Addr - _, portStr, err := net.SplitHostPort(s.Server.Addr) - if err != nil { - return err - } - portInt, err := net.LookupPort("tcp", portStr) - if err != nil { - return err - } - port = uint32(portInt) - atomic.StoreUint32(&s.port, port) - } - - if s.supportedVersionsAsString == "" { - var versions []string - for _, v := range protocol.SupportedVersions { - versions = append(versions, v.ToAltSvc()) - } - s.supportedVersionsAsString = strings.Join(versions, ",") - } - - hdr.Add("Alt-Svc", fmt.Sprintf(`quic=":%d"; ma=2592000; v="%s"`, port, s.supportedVersionsAsString)) - - return nil -} - -// ListenAndServeQUIC listens on the UDP network address addr and calls the -// handler for HTTP/2 requests on incoming connections. http.DefaultServeMux is -// used when handler is nil. -func ListenAndServeQUIC(addr, certFile, keyFile string, handler http.Handler) error { - server := &Server{ - Server: &http.Server{ - Addr: addr, - Handler: handler, - }, - } - return server.ListenAndServeTLS(certFile, keyFile) -} - -// ListenAndServe listens on the given network address for both, TLS and QUIC -// connetions in parallel. It returns if one of the two returns an error. -// http.DefaultServeMux is used when handler is nil. -// The correct Alt-Svc headers for QUIC are set. -func ListenAndServe(addr, certFile, keyFile string, handler http.Handler) error { - // Load certs - var err error - certs := make([]tls.Certificate, 1) - certs[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - // We currently only use the cert-related stuff from tls.Config, - // so we don't need to make a full copy. - config := &tls.Config{ - Certificates: certs, - } - - // Open the listeners - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return err - } - udpConn, err := net.ListenUDP("udp", udpAddr) - if err != nil { - return err - } - defer udpConn.Close() - - tcpAddr, err := net.ResolveTCPAddr("tcp", addr) - if err != nil { - return err - } - tcpConn, err := net.ListenTCP("tcp", tcpAddr) - if err != nil { - return err - } - defer tcpConn.Close() - - tlsConn := tls.NewListener(tcpConn, config) - defer tlsConn.Close() - - // Start the servers - httpServer := &http.Server{ - Addr: addr, - TLSConfig: config, - } - - quicServer := &Server{ - Server: httpServer, - } - - if handler == nil { - handler = http.DefaultServeMux - } - httpServer.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - quicServer.SetQuicHeaders(w.Header()) - handler.ServeHTTP(w, r) - }) - - hErr := make(chan error) - qErr := make(chan error) - go func() { - hErr <- httpServer.Serve(tlsConn) - }() - go func() { - qErr <- quicServer.Serve(udpConn) - }() - - select { - case err := <-hErr: - quicServer.Close() - return err - case err := <-qErr: - // Cannot close the HTTP server or wait for requests to complete properly :/ - return err - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/interface.go b/vendor/github.com/lucas-clemente/quic-go/interface.go deleted file mode 100644 index a83d09b84..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/interface.go +++ /dev/null @@ -1,224 +0,0 @@ -package quic - -import ( - "context" - "crypto/tls" - "io" - "net" - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// The StreamID is the ID of a QUIC stream. -type StreamID = protocol.StreamID - -// A VersionNumber is a QUIC version number. -type VersionNumber = protocol.VersionNumber - -// A Cookie can be used to verify the ownership of the client address. -type Cookie struct { - RemoteAddr string - SentTime time.Time -} - -// An ErrorCode is an application-defined error code. -type ErrorCode = protocol.ApplicationErrorCode - -// Stream is the interface implemented by QUIC streams -type Stream interface { - // StreamID returns the stream ID. - StreamID() StreamID - // Read reads data from the stream. - // Read can be made to time out and return a net.Error with Timeout() == true - // after a fixed time limit; see SetDeadline and SetReadDeadline. - // If the stream was canceled by the peer, the error implements the StreamError - // interface, and Canceled() == true. - // If the session was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. - io.Reader - // Write writes data to the stream. - // Write can be made to time out and return a net.Error with Timeout() == true - // after a fixed time limit; see SetDeadline and SetWriteDeadline. - // If the stream was canceled by the peer, the error implements the StreamError - // interface, and Canceled() == true. - // If the session was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. - io.Writer - // Close closes the write-direction of the stream. - // Future calls to Write are not permitted after calling Close. - // It must not be called concurrently with Write. - // It must not be called after calling CancelWrite. - io.Closer - // CancelWrite aborts sending on this stream. - // Data already written, but not yet delivered to the peer is not guaranteed to be delivered reliably. - // Write will unblock immediately, and future calls to Write will fail. - // When called multiple times or after closing the stream it is a no-op. - CancelWrite(ErrorCode) - // CancelRead aborts receiving on this stream. - // It will ask the peer to stop transmitting stream data. - // Read will unblock immediately, and future Read calls will fail. - // When called multiple times or after reading the io.EOF it is a no-op. - CancelRead(ErrorCode) - // The context is canceled as soon as the write-side of the stream is closed. - // This happens when Close() or CancelWrite() is called, or when the peer - // cancels the read-side of their stream. - // Warning: This API should not be considered stable and might change soon. - Context() context.Context - // SetReadDeadline sets the deadline for future Read calls and - // any currently-blocked Read call. - // A zero value for t means Read will not time out. - SetReadDeadline(t time.Time) error - // SetWriteDeadline sets the deadline for future Write calls - // and any currently-blocked Write call. - // Even if write times out, it may return n > 0, indicating that - // some of the data was successfully written. - // A zero value for t means Write will not time out. - SetWriteDeadline(t time.Time) error - // SetDeadline sets the read and write deadlines associated - // with the connection. It is equivalent to calling both - // SetReadDeadline and SetWriteDeadline. - SetDeadline(t time.Time) error -} - -// A ReceiveStream is a unidirectional Receive Stream. -type ReceiveStream interface { - // see Stream.StreamID - StreamID() StreamID - // see Stream.Read - io.Reader - // see Stream.CancelRead - CancelRead(ErrorCode) - // see Stream.SetReadDealine - SetReadDeadline(t time.Time) error -} - -// A SendStream is a unidirectional Send Stream. -type SendStream interface { - // see Stream.StreamID - StreamID() StreamID - // see Stream.Write - io.Writer - // see Stream.Close - io.Closer - // see Stream.CancelWrite - CancelWrite(ErrorCode) - // see Stream.Context - Context() context.Context - // see Stream.SetWriteDeadline - SetWriteDeadline(t time.Time) error -} - -// StreamError is returned by Read and Write when the peer cancels the stream. -type StreamError interface { - error - Canceled() bool - ErrorCode() ErrorCode -} - -// A Session is a QUIC connection between two peers. -type Session interface { - // AcceptStream returns the next stream opened by the peer, blocking until one is available. - // If the session was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. - AcceptStream() (Stream, error) - // AcceptUniStream returns the next unidirectional stream opened by the peer, blocking until one is available. - // If the session was closed due to a timeout, the error satisfies - // the net.Error interface, and Timeout() will be true. - AcceptUniStream() (ReceiveStream, error) - // OpenStream opens a new bidirectional QUIC stream. - // There is no signaling to the peer about new streams: - // The peer can only accept the stream after data has been sent on the stream. - // If the error is non-nil, it satisfies the net.Error interface. - // When reaching the peer's stream limit, err.Temporary() will be true. - // If the session was closed due to a timeout, Timeout() will be true. - OpenStream() (Stream, error) - // OpenStreamSync opens a new bidirectional QUIC stream. - // It blocks until a new stream can be opened. - // If the error is non-nil, it satisfies the net.Error interface. - // If the session was closed due to a timeout, Timeout() will be true. - OpenStreamSync() (Stream, error) - // OpenUniStream opens a new outgoing unidirectional QUIC stream. - // If the error is non-nil, it satisfies the net.Error interface. - // When reaching the peer's stream limit, Temporary() will be true. - // If the session was closed due to a timeout, Timeout() will be true. - OpenUniStream() (SendStream, error) - // OpenUniStreamSync opens a new outgoing unidirectional QUIC stream. - // It blocks until a new stream can be opened. - // If the error is non-nil, it satisfies the net.Error interface. - // If the session was closed due to a timeout, Timeout() will be true. - OpenUniStreamSync() (SendStream, error) - // LocalAddr returns the local address. - LocalAddr() net.Addr - // RemoteAddr returns the address of the peer. - RemoteAddr() net.Addr - // Close the connection. - io.Closer - // Close the connection with an error. - // The error must not be nil. - CloseWithError(ErrorCode, error) error - // The context is cancelled when the session is closed. - // Warning: This API should not be considered stable and might change soon. - Context() context.Context - // ConnectionState returns basic details about the QUIC connection. - // Warning: This API should not be considered stable and might change soon. - ConnectionState() tls.ConnectionState -} - -// Config contains all configuration data needed for a QUIC server or client. -type Config struct { - // The QUIC versions that can be negotiated. - // If not set, it uses all versions available. - // Warning: This API should not be considered stable and will change soon. - Versions []VersionNumber - // The length of the connection ID in bytes. - // It can be 0, or any value between 4 and 18. - // If not set, the interpretation depends on where the Config is used: - // If used for dialing an address, a 0 byte connection ID will be used. - // If used for a server, or dialing on a packet conn, a 4 byte connection ID will be used. - // When dialing on a packet conn, the ConnectionIDLength value must be the same for every Dial call. - ConnectionIDLength int - // HandshakeTimeout is the maximum duration that the cryptographic handshake may take. - // If the timeout is exceeded, the connection is closed. - // If this value is zero, the timeout is set to 10 seconds. - HandshakeTimeout time.Duration - // IdleTimeout is the maximum duration that may pass without any incoming network activity. - // This value only applies after the handshake has completed. - // If the timeout is exceeded, the connection is closed. - // If this value is zero, the timeout is set to 30 seconds. - IdleTimeout time.Duration - // AcceptCookie determines if a Cookie is accepted. - // It is called with cookie = nil if the client didn't send an Cookie. - // If not set, it verifies that the address matches, and that the Cookie was issued within the last 24 hours. - // This option is only valid for the server. - AcceptCookie func(clientAddr net.Addr, cookie *Cookie) bool - // MaxReceiveStreamFlowControlWindow is the maximum stream-level flow control window for receiving data. - // If this value is zero, it will default to 1 MB for the server and 6 MB for the client. - MaxReceiveStreamFlowControlWindow uint64 - // MaxReceiveConnectionFlowControlWindow is the connection-level flow control window for receiving data. - // If this value is zero, it will default to 1.5 MB for the server and 15 MB for the client. - MaxReceiveConnectionFlowControlWindow uint64 - // MaxIncomingStreams is the maximum number of concurrent bidirectional streams that a peer is allowed to open. - // If not set, it will default to 100. - // If set to a negative value, it doesn't allow any bidirectional streams. - MaxIncomingStreams int - // MaxIncomingUniStreams is the maximum number of concurrent unidirectional streams that a peer is allowed to open. - // If not set, it will default to 100. - // If set to a negative value, it doesn't allow any unidirectional streams. - MaxIncomingUniStreams int - // The StatelessResetKey is used to generate stateless reset tokens. - // If no key is configured, sending of stateless resets is disabled. - StatelessResetKey []byte - // KeepAlive defines whether this peer will periodically send a packet to keep the connection alive. - KeepAlive bool -} - -// A Listener for incoming QUIC connections -type Listener interface { - // Close the server. All active sessions will be closed. - Close() error - // Addr returns the local network addr that the server is listening on. - Addr() net.Addr - // Accept returns new sessions. It should be called in a loop. - Accept() (Session, error) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go deleted file mode 100644 index 32235f81a..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -package ackhandler - -//go:generate genny -pkg ackhandler -in ../utils/linkedlist/linkedlist.go -out packet_linkedlist.go gen Item=Packet diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go deleted file mode 100644 index 362b9fbe8..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/interfaces.go +++ /dev/null @@ -1,50 +0,0 @@ -package ackhandler - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -// SentPacketHandler handles ACKs received for outgoing packets -type SentPacketHandler interface { - // SentPacket may modify the packet - SentPacket(packet *Packet) - SentPacketsAsRetransmission(packets []*Packet, retransmissionOf protocol.PacketNumber) - ReceivedAck(ackFrame *wire.AckFrame, withPacketNumber protocol.PacketNumber, encLevel protocol.EncryptionLevel, recvTime time.Time) error - SetHandshakeComplete() - ResetForRetry() error - - // The SendMode determines if and what kind of packets can be sent. - SendMode() SendMode - // TimeUntilSend is the time when the next packet should be sent. - // It is used for pacing packets. - TimeUntilSend() time.Time - // ShouldSendNumPackets returns the number of packets that should be sent immediately. - // It always returns a number greater or equal than 1. - // A number greater than 1 is returned when the pacing delay is smaller than the minimum pacing delay. - // Note that the number of packets is only calculated based on the pacing algorithm. - // Before sending any packet, SendingAllowed() must be called to learn if we can actually send it. - ShouldSendNumPackets() int - - // only to be called once the handshake is complete - GetLowestPacketNotConfirmedAcked() protocol.PacketNumber - DequeuePacketForRetransmission() *Packet - DequeueProbePacket() (*Packet, error) - - PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) - PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber - - GetAlarmTimeout() time.Time - OnAlarm() error -} - -// ReceivedPacketHandler handles ACKs needed to send for incoming packets -type ReceivedPacketHandler interface { - ReceivedPacket(pn protocol.PacketNumber, encLevel protocol.EncryptionLevel, rcvTime time.Time, shouldInstigateAck bool) error - IgnoreBelow(protocol.PacketNumber) - - GetAlarmTimeout() time.Time - GetAckFrame(protocol.EncryptionLevel) *wire.AckFrame -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet.go deleted file mode 100644 index 9673a85c7..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet.go +++ /dev/null @@ -1,29 +0,0 @@ -package ackhandler - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -// A Packet is a packet -type Packet struct { - PacketNumber protocol.PacketNumber - PacketType protocol.PacketType - Frames []wire.Frame - Length protocol.ByteCount - EncryptionLevel protocol.EncryptionLevel - SendTime time.Time - - largestAcked protocol.PacketNumber // if the packet contains an ACK, the LargestAcked value of that ACK - - // There are two reasons why a packet cannot be retransmitted: - // * it was already retransmitted - // * this packet is a retransmission, and we already received an ACK for the original packet - canBeRetransmitted bool - includedInBytesInFlight bool - retransmittedAs []protocol.PacketNumber - isRetransmission bool // we need a separate bool here because 0 is a valid packet number - retransmissionOf protocol.PacketNumber -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go deleted file mode 100644 index bb74f4ef9..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go +++ /dev/null @@ -1,217 +0,0 @@ -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package ackhandler - -// Linked list implementation from the Go standard library. - -// PacketElement is an element of a linked list. -type PacketElement struct { - // Next and previous pointers in the doubly-linked list of elements. - // To simplify the implementation, internally a list l is implemented - // as a ring, such that &l.root is both the next element of the last - // list element (l.Back()) and the previous element of the first list - // element (l.Front()). - next, prev *PacketElement - - // The list to which this element belongs. - list *PacketList - - // The value stored with this element. - Value Packet -} - -// Next returns the next list element or nil. -func (e *PacketElement) Next() *PacketElement { - if p := e.next; e.list != nil && p != &e.list.root { - return p - } - return nil -} - -// Prev returns the previous list element or nil. -func (e *PacketElement) Prev() *PacketElement { - if p := e.prev; e.list != nil && p != &e.list.root { - return p - } - return nil -} - -// PacketList is a linked list of Packets. -type PacketList struct { - root PacketElement // sentinel list element, only &root, root.prev, and root.next are used - len int // current list length excluding (this) sentinel element -} - -// Init initializes or clears list l. -func (l *PacketList) Init() *PacketList { - l.root.next = &l.root - l.root.prev = &l.root - l.len = 0 - return l -} - -// NewPacketList returns an initialized list. -func NewPacketList() *PacketList { return new(PacketList).Init() } - -// Len returns the number of elements of list l. -// The complexity is O(1). -func (l *PacketList) Len() int { return l.len } - -// Front returns the first element of list l or nil if the list is empty. -func (l *PacketList) Front() *PacketElement { - if l.len == 0 { - return nil - } - return l.root.next -} - -// Back returns the last element of list l or nil if the list is empty. -func (l *PacketList) Back() *PacketElement { - if l.len == 0 { - return nil - } - return l.root.prev -} - -// lazyInit lazily initializes a zero List value. -func (l *PacketList) lazyInit() { - if l.root.next == nil { - l.Init() - } -} - -// insert inserts e after at, increments l.len, and returns e. -func (l *PacketList) insert(e, at *PacketElement) *PacketElement { - n := at.next - at.next = e - e.prev = at - e.next = n - n.prev = e - e.list = l - l.len++ - return e -} - -// insertValue is a convenience wrapper for insert(&Element{Value: v}, at). -func (l *PacketList) insertValue(v Packet, at *PacketElement) *PacketElement { - return l.insert(&PacketElement{Value: v}, at) -} - -// remove removes e from its list, decrements l.len, and returns e. -func (l *PacketList) remove(e *PacketElement) *PacketElement { - e.prev.next = e.next - e.next.prev = e.prev - e.next = nil // avoid memory leaks - e.prev = nil // avoid memory leaks - e.list = nil - l.len-- - return e -} - -// Remove removes e from l if e is an element of list l. -// It returns the element value e.Value. -// The element must not be nil. -func (l *PacketList) Remove(e *PacketElement) Packet { - if e.list == l { - // if e.list == l, l must have been initialized when e was inserted - // in l or l == nil (e is a zero Element) and l.remove will crash - l.remove(e) - } - return e.Value -} - -// PushFront inserts a new element e with value v at the front of list l and returns e. -func (l *PacketList) PushFront(v Packet) *PacketElement { - l.lazyInit() - return l.insertValue(v, &l.root) -} - -// PushBack inserts a new element e with value v at the back of list l and returns e. -func (l *PacketList) PushBack(v Packet) *PacketElement { - l.lazyInit() - return l.insertValue(v, l.root.prev) -} - -// InsertBefore inserts a new element e with value v immediately before mark and returns e. -// If mark is not an element of l, the list is not modified. -// The mark must not be nil. -func (l *PacketList) InsertBefore(v Packet, mark *PacketElement) *PacketElement { - if mark.list != l { - return nil - } - // see comment in List.Remove about initialization of l - return l.insertValue(v, mark.prev) -} - -// InsertAfter inserts a new element e with value v immediately after mark and returns e. -// If mark is not an element of l, the list is not modified. -// The mark must not be nil. -func (l *PacketList) InsertAfter(v Packet, mark *PacketElement) *PacketElement { - if mark.list != l { - return nil - } - // see comment in List.Remove about initialization of l - return l.insertValue(v, mark) -} - -// MoveToFront moves element e to the front of list l. -// If e is not an element of l, the list is not modified. -// The element must not be nil. -func (l *PacketList) MoveToFront(e *PacketElement) { - if e.list != l || l.root.next == e { - return - } - // see comment in List.Remove about initialization of l - l.insert(l.remove(e), &l.root) -} - -// MoveToBack moves element e to the back of list l. -// If e is not an element of l, the list is not modified. -// The element must not be nil. -func (l *PacketList) MoveToBack(e *PacketElement) { - if e.list != l || l.root.prev == e { - return - } - // see comment in List.Remove about initialization of l - l.insert(l.remove(e), l.root.prev) -} - -// MoveBefore moves element e to its new position before mark. -// If e or mark is not an element of l, or e == mark, the list is not modified. -// The element and mark must not be nil. -func (l *PacketList) MoveBefore(e, mark *PacketElement) { - if e.list != l || e == mark || mark.list != l { - return - } - l.insert(l.remove(e), mark.prev) -} - -// MoveAfter moves element e to its new position after mark. -// If e or mark is not an element of l, or e == mark, the list is not modified. -// The element and mark must not be nil. -func (l *PacketList) MoveAfter(e, mark *PacketElement) { - if e.list != l || e == mark || mark.list != l { - return - } - l.insert(l.remove(e), mark) -} - -// PushBackList inserts a copy of an other list at the back of list l. -// The lists l and other may be the same. They must not be nil. -func (l *PacketList) PushBackList(other *PacketList) { - l.lazyInit() - for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() { - l.insertValue(e.Value, l.root.prev) - } -} - -// PushFrontList inserts a copy of an other list at the front of list l. -// The lists l and other may be the same. They must not be nil. -func (l *PacketList) PushFrontList(other *PacketList) { - l.lazyInit() - for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() { - l.insertValue(e.Value, &l.root) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go deleted file mode 100644 index 56fbf3d80..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_number_generator.go +++ /dev/null @@ -1,78 +0,0 @@ -package ackhandler - -import ( - "crypto/rand" - "math" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -// The packetNumberGenerator generates the packet number for the next packet -// it randomly skips a packet number every averagePeriod packets (on average) -// it is guarantued to never skip two consecutive packet numbers -type packetNumberGenerator struct { - averagePeriod protocol.PacketNumber - - next protocol.PacketNumber - nextToSkip protocol.PacketNumber - - history []protocol.PacketNumber -} - -func newPacketNumberGenerator(initial, averagePeriod protocol.PacketNumber) *packetNumberGenerator { - g := &packetNumberGenerator{ - next: initial, - averagePeriod: averagePeriod, - } - g.generateNewSkip() - return g -} - -func (p *packetNumberGenerator) Peek() protocol.PacketNumber { - return p.next -} - -func (p *packetNumberGenerator) Pop() protocol.PacketNumber { - next := p.next - - // generate a new packet number for the next packet - p.next++ - - if p.next == p.nextToSkip { - if len(p.history)+1 > protocol.MaxTrackedSkippedPackets { - p.history = p.history[1:] - } - p.history = append(p.history, p.next) - p.next++ - p.generateNewSkip() - } - - return next -} - -func (p *packetNumberGenerator) generateNewSkip() { - num := p.getRandomNumber() - skip := protocol.PacketNumber(num) * (p.averagePeriod - 1) / (math.MaxUint16 / 2) - // make sure that there are never two consecutive packet numbers that are skipped - p.nextToSkip = p.next + 2 + skip -} - -// getRandomNumber() generates a cryptographically secure random number between 0 and MaxUint16 (= 65535) -// The expectation value is 65535/2 -func (p *packetNumberGenerator) getRandomNumber() uint16 { - b := make([]byte, 2) - rand.Read(b) // ignore the error here - - num := uint16(b[0])<<8 + uint16(b[1]) - return num -} - -func (p *packetNumberGenerator) Validate(ack *wire.AckFrame) bool { - for _, pn := range p.history { - if ack.AcksPacket(pn) { - return false - } - } - return true -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go deleted file mode 100644 index 1df64ea87..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_handler.go +++ /dev/null @@ -1,98 +0,0 @@ -package ackhandler - -import ( - "fmt" - "time" - - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -const ( - // maximum delay that can be applied to an ACK for a retransmittable packet - ackSendDelay = 25 * time.Millisecond - // initial maximum number of retransmittable packets received before sending an ack. - initialRetransmittablePacketsBeforeAck = 2 - // number of retransmittable that an ACK is sent for - retransmittablePacketsBeforeAck = 10 - // 1/5 RTT delay when doing ack decimation - ackDecimationDelay = 1.0 / 4 - // 1/8 RTT delay when doing ack decimation - shortAckDecimationDelay = 1.0 / 8 - // Minimum number of packets received before ack decimation is enabled. - // This intends to avoid the beginning of slow start, when CWNDs may be - // rapidly increasing. - minReceivedBeforeAckDecimation = 100 - // Maximum number of packets to ack immediately after a missing packet for - // fast retransmission to kick in at the sender. This limit is created to - // reduce the number of acks sent that have no benefit for fast retransmission. - // Set to the number of nacks needed for fast retransmit plus one for protection - // against an ack loss - maxPacketsAfterNewMissing = 4 -) - -type receivedPacketHandler struct { - initialPackets *receivedPacketTracker - handshakePackets *receivedPacketTracker - oneRTTPackets *receivedPacketTracker -} - -var _ ReceivedPacketHandler = &receivedPacketHandler{} - -// NewReceivedPacketHandler creates a new receivedPacketHandler -func NewReceivedPacketHandler( - rttStats *congestion.RTTStats, - logger utils.Logger, - version protocol.VersionNumber, -) ReceivedPacketHandler { - return &receivedPacketHandler{ - initialPackets: newReceivedPacketTracker(rttStats, logger, version), - handshakePackets: newReceivedPacketTracker(rttStats, logger, version), - oneRTTPackets: newReceivedPacketTracker(rttStats, logger, version), - } -} - -func (h *receivedPacketHandler) ReceivedPacket( - pn protocol.PacketNumber, - encLevel protocol.EncryptionLevel, - rcvTime time.Time, - shouldInstigateAck bool, -) error { - switch encLevel { - case protocol.EncryptionInitial: - return h.initialPackets.ReceivedPacket(pn, rcvTime, shouldInstigateAck) - case protocol.EncryptionHandshake: - return h.handshakePackets.ReceivedPacket(pn, rcvTime, shouldInstigateAck) - case protocol.Encryption1RTT: - return h.oneRTTPackets.ReceivedPacket(pn, rcvTime, shouldInstigateAck) - default: - return fmt.Errorf("received packet with unknown encryption level: %s", encLevel) - } -} - -// only to be used with 1-RTT packets -func (h *receivedPacketHandler) IgnoreBelow(pn protocol.PacketNumber) { - h.oneRTTPackets.IgnoreBelow(pn) -} - -func (h *receivedPacketHandler) GetAlarmTimeout() time.Time { - initialAlarm := h.initialPackets.GetAlarmTimeout() - handshakeAlarm := h.handshakePackets.GetAlarmTimeout() - oneRTTAlarm := h.oneRTTPackets.GetAlarmTimeout() - return utils.MinNonZeroTime(utils.MinNonZeroTime(initialAlarm, handshakeAlarm), oneRTTAlarm) -} - -func (h *receivedPacketHandler) GetAckFrame(encLevel protocol.EncryptionLevel) *wire.AckFrame { - switch encLevel { - case protocol.EncryptionInitial: - return h.initialPackets.GetAckFrame() - case protocol.EncryptionHandshake: - return h.handshakePackets.GetAckFrame() - case protocol.Encryption1RTT: - return h.oneRTTPackets.GetAckFrame() - default: - return nil - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go deleted file mode 100644 index 0daba413d..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_history.go +++ /dev/null @@ -1,122 +0,0 @@ -package ackhandler - -import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -// The receivedPacketHistory stores if a packet number has already been received. -// It generates ACK ranges which can be used to assemble an ACK frame. -// It does not store packet contents. -type receivedPacketHistory struct { - ranges *utils.PacketIntervalList - - lowestInReceivedPacketNumbers protocol.PacketNumber -} - -var errTooManyOutstandingReceivedAckRanges = qerr.Error(qerr.InternalError, "Too many outstanding received ACK ranges") - -// newReceivedPacketHistory creates a new received packet history -func newReceivedPacketHistory() *receivedPacketHistory { - return &receivedPacketHistory{ - ranges: utils.NewPacketIntervalList(), - } -} - -// ReceivedPacket registers a packet with PacketNumber p and updates the ranges -func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) error { - if h.ranges.Len() >= protocol.MaxTrackedReceivedAckRanges { - return errTooManyOutstandingReceivedAckRanges - } - - if h.ranges.Len() == 0 { - h.ranges.PushBack(utils.PacketInterval{Start: p, End: p}) - return nil - } - - for el := h.ranges.Back(); el != nil; el = el.Prev() { - // p already included in an existing range. Nothing to do here - if p >= el.Value.Start && p <= el.Value.End { - return nil - } - - var rangeExtended bool - if el.Value.End == p-1 { // extend a range at the end - rangeExtended = true - el.Value.End = p - } else if el.Value.Start == p+1 { // extend a range at the beginning - rangeExtended = true - el.Value.Start = p - } - - // if a range was extended (either at the beginning or at the end, maybe it is possible to merge two ranges into one) - if rangeExtended { - prev := el.Prev() - if prev != nil && prev.Value.End+1 == el.Value.Start { // merge two ranges - prev.Value.End = el.Value.End - h.ranges.Remove(el) - return nil - } - return nil // if the two ranges were not merge, we're done here - } - - // create a new range at the end - if p > el.Value.End { - h.ranges.InsertAfter(utils.PacketInterval{Start: p, End: p}, el) - return nil - } - } - - // create a new range at the beginning - h.ranges.InsertBefore(utils.PacketInterval{Start: p, End: p}, h.ranges.Front()) - - return nil -} - -// DeleteBelow deletes all entries below (but not including) p -func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) { - if p <= h.lowestInReceivedPacketNumbers { - return - } - h.lowestInReceivedPacketNumbers = p - - nextEl := h.ranges.Front() - for el := h.ranges.Front(); nextEl != nil; el = nextEl { - nextEl = el.Next() - - if p > el.Value.Start && p <= el.Value.End { - el.Value.Start = p - } else if el.Value.End < p { // delete a whole range - h.ranges.Remove(el) - } else { // no ranges affected. Nothing to do - return - } - } -} - -// GetAckRanges gets a slice of all AckRanges that can be used in an AckFrame -func (h *receivedPacketHistory) GetAckRanges() []wire.AckRange { - if h.ranges.Len() == 0 { - return nil - } - - ackRanges := make([]wire.AckRange, h.ranges.Len()) - i := 0 - for el := h.ranges.Back(); el != nil; el = el.Prev() { - ackRanges[i] = wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End} - i++ - } - return ackRanges -} - -func (h *receivedPacketHistory) GetHighestAckRange() wire.AckRange { - ackRange := wire.AckRange{} - if h.ranges.Len() > 0 { - r := h.ranges.Back().Value - ackRange.Smallest = r.Start - ackRange.Largest = r.End - } - return ackRange -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go deleted file mode 100644 index b7840490f..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/received_packet_tracker.go +++ /dev/null @@ -1,191 +0,0 @@ -package ackhandler - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type receivedPacketTracker struct { - largestObserved protocol.PacketNumber - ignoreBelow protocol.PacketNumber - largestObservedReceivedTime time.Time - - packetHistory *receivedPacketHistory - - ackSendDelay time.Duration - rttStats *congestion.RTTStats - - packetsReceivedSinceLastAck int - retransmittablePacketsReceivedSinceLastAck int - ackQueued bool - ackAlarm time.Time - lastAck *wire.AckFrame - - logger utils.Logger - - version protocol.VersionNumber -} - -func newReceivedPacketTracker( - rttStats *congestion.RTTStats, - logger utils.Logger, - version protocol.VersionNumber, -) *receivedPacketTracker { - return &receivedPacketTracker{ - packetHistory: newReceivedPacketHistory(), - ackSendDelay: ackSendDelay, - rttStats: rttStats, - logger: logger, - version: version, - } -} - -func (h *receivedPacketTracker) ReceivedPacket(packetNumber protocol.PacketNumber, rcvTime time.Time, shouldInstigateAck bool) error { - if packetNumber < h.ignoreBelow { - return nil - } - - isMissing := h.isMissing(packetNumber) - if packetNumber >= h.largestObserved { - h.largestObserved = packetNumber - h.largestObservedReceivedTime = rcvTime - } - - if err := h.packetHistory.ReceivedPacket(packetNumber); err != nil { - return err - } - h.maybeQueueAck(packetNumber, rcvTime, shouldInstigateAck, isMissing) - return nil -} - -// IgnoreBelow sets a lower limit for acking packets. -// Packets with packet numbers smaller than p will not be acked. -func (h *receivedPacketTracker) IgnoreBelow(p protocol.PacketNumber) { - if p <= h.ignoreBelow { - return - } - h.ignoreBelow = p - h.packetHistory.DeleteBelow(p) - if h.logger.Debug() { - h.logger.Debugf("\tIgnoring all packets below %#x.", p) - } -} - -// isMissing says if a packet was reported missing in the last ACK. -func (h *receivedPacketTracker) isMissing(p protocol.PacketNumber) bool { - if h.lastAck == nil || p < h.ignoreBelow { - return false - } - return p < h.lastAck.LargestAcked() && !h.lastAck.AcksPacket(p) -} - -func (h *receivedPacketTracker) hasNewMissingPackets() bool { - if h.lastAck == nil { - return false - } - highestRange := h.packetHistory.GetHighestAckRange() - return highestRange.Smallest >= h.lastAck.LargestAcked() && highestRange.Len() <= maxPacketsAfterNewMissing -} - -// maybeQueueAck queues an ACK, if necessary. -// It is implemented analogously to Chrome's QuicConnection::MaybeQueueAck() -// in ACK_DECIMATION_WITH_REORDERING mode. -func (h *receivedPacketTracker) maybeQueueAck(packetNumber protocol.PacketNumber, rcvTime time.Time, shouldInstigateAck, wasMissing bool) { - h.packetsReceivedSinceLastAck++ - - // always ack the first packet - if h.lastAck == nil { - h.logger.Debugf("\tQueueing ACK because the first packet should be acknowledged.") - h.ackQueued = true - return - } - - // Send an ACK if this packet was reported missing in an ACK sent before. - // Ack decimation with reordering relies on the timer to send an ACK, but if - // missing packets we reported in the previous ack, send an ACK immediately. - if wasMissing { - if h.logger.Debug() { - h.logger.Debugf("\tQueueing ACK because packet %#x was missing before.", packetNumber) - } - h.ackQueued = true - } - - if !h.ackQueued && shouldInstigateAck { - h.retransmittablePacketsReceivedSinceLastAck++ - - if packetNumber > minReceivedBeforeAckDecimation { - // ack up to 10 packets at once - if h.retransmittablePacketsReceivedSinceLastAck >= retransmittablePacketsBeforeAck { - h.ackQueued = true - if h.logger.Debug() { - h.logger.Debugf("\tQueueing ACK because packet %d packets were received after the last ACK (using threshold: %d).", h.retransmittablePacketsReceivedSinceLastAck, retransmittablePacketsBeforeAck) - } - } else if h.ackAlarm.IsZero() { - // wait for the minimum of the ack decimation delay or the delayed ack time before sending an ack - ackDelay := utils.MinDuration(ackSendDelay, time.Duration(float64(h.rttStats.MinRTT())*float64(ackDecimationDelay))) - h.ackAlarm = rcvTime.Add(ackDelay) - if h.logger.Debug() { - h.logger.Debugf("\tSetting ACK timer to min(1/4 min-RTT, max ack delay): %s (%s from now)", ackDelay, time.Until(h.ackAlarm)) - } - } - } else { - // send an ACK every 2 retransmittable packets - if h.retransmittablePacketsReceivedSinceLastAck >= initialRetransmittablePacketsBeforeAck { - if h.logger.Debug() { - h.logger.Debugf("\tQueueing ACK because packet %d packets were received after the last ACK (using initial threshold: %d).", h.retransmittablePacketsReceivedSinceLastAck, initialRetransmittablePacketsBeforeAck) - } - h.ackQueued = true - } else if h.ackAlarm.IsZero() { - if h.logger.Debug() { - h.logger.Debugf("\tSetting ACK timer to max ack delay: %s", ackSendDelay) - } - h.ackAlarm = rcvTime.Add(ackSendDelay) - } - } - // If there are new missing packets to report, set a short timer to send an ACK. - if h.hasNewMissingPackets() { - // wait the minimum of 1/8 min RTT and the existing ack time - ackDelay := time.Duration(float64(h.rttStats.MinRTT()) * float64(shortAckDecimationDelay)) - ackTime := rcvTime.Add(ackDelay) - if h.ackAlarm.IsZero() || h.ackAlarm.After(ackTime) { - h.ackAlarm = ackTime - if h.logger.Debug() { - h.logger.Debugf("\tSetting ACK timer to 1/8 min-RTT: %s (%s from now)", ackDelay, time.Until(h.ackAlarm)) - } - } - } - } - - if h.ackQueued { - // cancel the ack alarm - h.ackAlarm = time.Time{} - } -} - -func (h *receivedPacketTracker) GetAckFrame() *wire.AckFrame { - now := time.Now() - if !h.ackQueued && (h.ackAlarm.IsZero() || h.ackAlarm.After(now)) { - return nil - } - if h.logger.Debug() && !h.ackQueued && !h.ackAlarm.IsZero() { - h.logger.Debugf("Sending ACK because the ACK timer expired.") - } - - ack := &wire.AckFrame{ - AckRanges: h.packetHistory.GetAckRanges(), - DelayTime: now.Sub(h.largestObservedReceivedTime), - } - - h.lastAck = ack - h.ackAlarm = time.Time{} - h.ackQueued = false - h.packetsReceivedSinceLastAck = 0 - h.retransmittablePacketsReceivedSinceLastAck = 0 - return ack -} - -func (h *receivedPacketTracker) GetAlarmTimeout() time.Time { return h.ackAlarm } diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/retransmittable.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/retransmittable.go deleted file mode 100644 index ae622afd5..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/retransmittable.go +++ /dev/null @@ -1,34 +0,0 @@ -package ackhandler - -import "github.com/lucas-clemente/quic-go/internal/wire" - -// Returns a new slice with all non-retransmittable frames deleted. -func stripNonRetransmittableFrames(fs []wire.Frame) []wire.Frame { - res := make([]wire.Frame, 0, len(fs)) - for _, f := range fs { - if IsFrameRetransmittable(f) { - res = append(res, f) - } - } - return res -} - -// IsFrameRetransmittable returns true if the frame should be retransmitted. -func IsFrameRetransmittable(f wire.Frame) bool { - switch f.(type) { - case *wire.AckFrame: - return false - default: - return true - } -} - -// HasRetransmittableFrames returns true if at least one frame is retransmittable. -func HasRetransmittableFrames(fs []wire.Frame) bool { - for _, f := range fs { - if IsFrameRetransmittable(f) { - return true - } - } - return false -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go deleted file mode 100644 index 8cdaa7e6b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/send_mode.go +++ /dev/null @@ -1,36 +0,0 @@ -package ackhandler - -import "fmt" - -// The SendMode says what kind of packets can be sent. -type SendMode uint8 - -const ( - // SendNone means that no packets should be sent - SendNone SendMode = iota - // SendAck means an ACK-only packet should be sent - SendAck - // SendRetransmission means that retransmissions should be sent - SendRetransmission - // SendPTO means that a probe packet should be sent - SendPTO - // SendAny means that any packet should be sent - SendAny -) - -func (s SendMode) String() string { - switch s { - case SendNone: - return "none" - case SendAck: - return "ack" - case SendRetransmission: - return "retransmission" - case SendPTO: - return "pto" - case SendAny: - return "any" - default: - return fmt.Sprintf("invalid send mode: %d", s) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go deleted file mode 100644 index 51e28fc0f..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_handler.go +++ /dev/null @@ -1,647 +0,0 @@ -package ackhandler - -import ( - "errors" - "fmt" - "math" - "time" - - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -const ( - // Maximum reordering in time space before time based loss detection considers a packet lost. - // In fraction of an RTT. - timeReorderingFraction = 1.0 / 8 - // Timer granularity. The timer will not be set to a value smaller than granularity. - granularity = time.Millisecond -) - -type packetNumberSpace struct { - history *sentPacketHistory - pns *packetNumberGenerator - - largestAcked protocol.PacketNumber - largestSent protocol.PacketNumber -} - -func newPacketNumberSpace(initialPN protocol.PacketNumber) *packetNumberSpace { - return &packetNumberSpace{ - history: newSentPacketHistory(), - pns: newPacketNumberGenerator(initialPN, protocol.SkipPacketAveragePeriodLength), - } -} - -type sentPacketHandler struct { - lastSentRetransmittablePacketTime time.Time // only applies to the application-data packet number space - lastSentCryptoPacketTime time.Time - - nextSendTime time.Time - - initialPackets *packetNumberSpace - handshakePackets *packetNumberSpace - oneRTTPackets *packetNumberSpace - - // lowestNotConfirmedAcked is the lowest packet number that we sent an ACK for, but haven't received confirmation, that this ACK actually arrived - // example: we send an ACK for packets 90-100 with packet number 20 - // once we receive an ACK from the peer for packet 20, the lowestNotConfirmedAcked is 101 - // Only applies to the application-data packet number space. - lowestNotConfirmedAcked protocol.PacketNumber - - retransmissionQueue []*Packet - - bytesInFlight protocol.ByteCount - - congestion congestion.SendAlgorithm - rttStats *congestion.RTTStats - - handshakeComplete bool - - // The number of times the crypto packets have been retransmitted without receiving an ack. - cryptoCount uint32 - // The number of times a PTO has been sent without receiving an ack. - ptoCount uint32 - // The number of PTO probe packets that should be sent. - // Only applies to the application-data packet number space. - numProbesToSend int - - // The time at which the next packet will be considered lost based on early transmit or exceeding the reordering window in time. - lossTime time.Time - - // The alarm timeout - alarm time.Time - - logger utils.Logger -} - -// NewSentPacketHandler creates a new sentPacketHandler -func NewSentPacketHandler( - initialPacketNumber protocol.PacketNumber, - rttStats *congestion.RTTStats, - logger utils.Logger, -) SentPacketHandler { - congestion := congestion.NewCubicSender( - congestion.DefaultClock{}, - rttStats, - false, /* don't use reno since chromium doesn't (why?) */ - protocol.InitialCongestionWindow, - protocol.DefaultMaxCongestionWindow, - ) - - return &sentPacketHandler{ - initialPackets: newPacketNumberSpace(initialPacketNumber), - handshakePackets: newPacketNumberSpace(0), - oneRTTPackets: newPacketNumberSpace(0), - rttStats: rttStats, - congestion: congestion, - logger: logger, - } -} - -func (h *sentPacketHandler) SetHandshakeComplete() { - h.logger.Debugf("Handshake complete. Discarding all outstanding crypto packets.") - var queue []*Packet - for _, packet := range h.retransmissionQueue { - if packet.EncryptionLevel == protocol.Encryption1RTT { - queue = append(queue, packet) - } - } - for _, pnSpace := range []*packetNumberSpace{h.initialPackets, h.handshakePackets} { - var cryptoPackets []*Packet - pnSpace.history.Iterate(func(p *Packet) (bool, error) { - cryptoPackets = append(cryptoPackets, p) - return true, nil - }) - for _, p := range cryptoPackets { - pnSpace.history.Remove(p.PacketNumber) - } - } - h.retransmissionQueue = queue - h.handshakeComplete = true -} - -func (h *sentPacketHandler) SentPacket(packet *Packet) { - if isRetransmittable := h.sentPacketImpl(packet); isRetransmittable { - h.getPacketNumberSpace(packet.EncryptionLevel).history.SentPacket(packet) - h.updateLossDetectionAlarm() - } -} - -func (h *sentPacketHandler) SentPacketsAsRetransmission(packets []*Packet, retransmissionOf protocol.PacketNumber) { - var p []*Packet - for _, packet := range packets { - if isRetransmittable := h.sentPacketImpl(packet); isRetransmittable { - p = append(p, packet) - } - } - h.getPacketNumberSpace(p[0].EncryptionLevel).history.SentPacketsAsRetransmission(p, retransmissionOf) - h.updateLossDetectionAlarm() -} - -func (h *sentPacketHandler) getPacketNumberSpace(encLevel protocol.EncryptionLevel) *packetNumberSpace { - switch encLevel { - case protocol.EncryptionInitial: - return h.initialPackets - case protocol.EncryptionHandshake: - return h.handshakePackets - case protocol.Encryption1RTT: - return h.oneRTTPackets - default: - panic("invalid packet number space") - } -} - -func (h *sentPacketHandler) sentPacketImpl(packet *Packet) bool /* isRetransmittable */ { - pnSpace := h.getPacketNumberSpace(packet.EncryptionLevel) - - if h.logger.Debug() && pnSpace.largestSent != 0 { - for p := pnSpace.largestSent + 1; p < packet.PacketNumber; p++ { - h.logger.Debugf("Skipping packet number %#x", p) - } - } - - pnSpace.largestSent = packet.PacketNumber - - if len(packet.Frames) > 0 { - if ackFrame, ok := packet.Frames[0].(*wire.AckFrame); ok { - packet.largestAcked = ackFrame.LargestAcked() - } - } - - packet.Frames = stripNonRetransmittableFrames(packet.Frames) - isRetransmittable := len(packet.Frames) != 0 - - if isRetransmittable { - if packet.EncryptionLevel != protocol.Encryption1RTT { - h.lastSentCryptoPacketTime = packet.SendTime - } - h.lastSentRetransmittablePacketTime = packet.SendTime - packet.includedInBytesInFlight = true - h.bytesInFlight += packet.Length - packet.canBeRetransmitted = true - if h.numProbesToSend > 0 { - h.numProbesToSend-- - } - } - h.congestion.OnPacketSent(packet.SendTime, h.bytesInFlight, packet.PacketNumber, packet.Length, isRetransmittable) - - h.nextSendTime = utils.MaxTime(h.nextSendTime, packet.SendTime).Add(h.congestion.TimeUntilSend(h.bytesInFlight)) - return isRetransmittable -} - -func (h *sentPacketHandler) ReceivedAck(ackFrame *wire.AckFrame, withPacketNumber protocol.PacketNumber, encLevel protocol.EncryptionLevel, rcvTime time.Time) error { - pnSpace := h.getPacketNumberSpace(encLevel) - - largestAcked := ackFrame.LargestAcked() - if largestAcked > pnSpace.largestSent { - return qerr.Error(qerr.ProtocolViolation, "Received ACK for an unsent packet") - } - - pnSpace.largestAcked = utils.MaxPacketNumber(pnSpace.largestAcked, largestAcked) - - if !pnSpace.pns.Validate(ackFrame) { - return qerr.Error(qerr.ProtocolViolation, "Received an ACK for a skipped packet number") - } - - // maybe update the RTT - if p := pnSpace.history.GetPacket(ackFrame.LargestAcked()); p != nil { - h.rttStats.UpdateRTT(rcvTime.Sub(p.SendTime), ackFrame.DelayTime, rcvTime) - if h.logger.Debug() { - h.logger.Debugf("\tupdated RTT: %s (σ: %s)", h.rttStats.SmoothedRTT(), h.rttStats.MeanDeviation()) - } - h.congestion.MaybeExitSlowStart() - } - - ackedPackets, err := h.determineNewlyAckedPackets(ackFrame, encLevel) - if err != nil { - return err - } - if len(ackedPackets) == 0 { - return nil - } - - priorInFlight := h.bytesInFlight - for _, p := range ackedPackets { - // largestAcked == 0 either means that the packet didn't contain an ACK, or it just acked packet 0 - // It is safe to ignore the corner case of packets that just acked packet 0, because - // the lowestPacketNotConfirmedAcked is only used to limit the number of ACK ranges we will send. - if p.largestAcked != 0 && encLevel == protocol.Encryption1RTT { - h.lowestNotConfirmedAcked = utils.MaxPacketNumber(h.lowestNotConfirmedAcked, p.largestAcked+1) - } - if err := h.onPacketAcked(p, rcvTime); err != nil { - return err - } - if p.includedInBytesInFlight { - h.congestion.OnPacketAcked(p.PacketNumber, p.Length, priorInFlight, rcvTime) - } - } - - if err := h.detectLostPackets(rcvTime, encLevel, priorInFlight); err != nil { - return err - } - - h.ptoCount = 0 - h.cryptoCount = 0 - h.numProbesToSend = 0 - - h.updateLossDetectionAlarm() - return nil -} - -func (h *sentPacketHandler) GetLowestPacketNotConfirmedAcked() protocol.PacketNumber { - return h.lowestNotConfirmedAcked -} - -func (h *sentPacketHandler) determineNewlyAckedPackets( - ackFrame *wire.AckFrame, - encLevel protocol.EncryptionLevel, -) ([]*Packet, error) { - pnSpace := h.getPacketNumberSpace(encLevel) - var ackedPackets []*Packet - ackRangeIndex := 0 - lowestAcked := ackFrame.LowestAcked() - largestAcked := ackFrame.LargestAcked() - err := pnSpace.history.Iterate(func(p *Packet) (bool, error) { - // Ignore packets below the lowest acked - if p.PacketNumber < lowestAcked { - return true, nil - } - // Break after largest acked is reached - if p.PacketNumber > largestAcked { - return false, nil - } - - if ackFrame.HasMissingRanges() { - ackRange := ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex] - - for p.PacketNumber > ackRange.Largest && ackRangeIndex < len(ackFrame.AckRanges)-1 { - ackRangeIndex++ - ackRange = ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex] - } - - if p.PacketNumber >= ackRange.Smallest { // packet i contained in ACK range - if p.PacketNumber > ackRange.Largest { - return false, fmt.Errorf("BUG: ackhandler would have acked wrong packet 0x%x, while evaluating range 0x%x -> 0x%x", p.PacketNumber, ackRange.Smallest, ackRange.Largest) - } - ackedPackets = append(ackedPackets, p) - } - } else { - ackedPackets = append(ackedPackets, p) - } - return true, nil - }) - if h.logger.Debug() && len(ackedPackets) > 0 { - pns := make([]protocol.PacketNumber, len(ackedPackets)) - for i, p := range ackedPackets { - pns[i] = p.PacketNumber - } - h.logger.Debugf("\tnewly acked packets (%d): %#x", len(pns), pns) - } - return ackedPackets, err -} - -func (h *sentPacketHandler) hasOutstandingCryptoPackets() bool { - return h.initialPackets.history.HasOutstandingPackets() || h.handshakePackets.history.HasOutstandingPackets() -} - -func (h *sentPacketHandler) hasOutstandingPackets() bool { - return h.oneRTTPackets.history.HasOutstandingPackets() || h.hasOutstandingCryptoPackets() -} - -func (h *sentPacketHandler) updateLossDetectionAlarm() { - // Cancel the alarm if no packets are outstanding - if !h.hasOutstandingPackets() { - h.alarm = time.Time{} - return - } - - if h.hasOutstandingCryptoPackets() { - h.alarm = h.lastSentCryptoPacketTime.Add(h.computeCryptoTimeout()) - } else if !h.lossTime.IsZero() { - // Early retransmit timer or time loss detection. - h.alarm = h.lossTime - } else { // PTO alarm - h.alarm = h.lastSentRetransmittablePacketTime.Add(h.computePTOTimeout()) - } -} - -func (h *sentPacketHandler) detectLostPackets( - now time.Time, - encLevel protocol.EncryptionLevel, - priorInFlight protocol.ByteCount, -) error { - if encLevel == protocol.Encryption1RTT { - h.lossTime = time.Time{} - } - pnSpace := h.getPacketNumberSpace(encLevel) - - maxRTT := float64(utils.MaxDuration(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT())) - delayUntilLost := time.Duration((1.0 + timeReorderingFraction) * maxRTT) - - var lostPackets []*Packet - pnSpace.history.Iterate(func(packet *Packet) (bool, error) { - if packet.PacketNumber > pnSpace.largestAcked { - return false, nil - } - - timeSinceSent := now.Sub(packet.SendTime) - if timeSinceSent > delayUntilLost { - lostPackets = append(lostPackets, packet) - } else if h.lossTime.IsZero() && encLevel == protocol.Encryption1RTT { - if h.logger.Debug() { - h.logger.Debugf("\tsetting loss timer for packet %#x to %s (in %s)", packet.PacketNumber, delayUntilLost, delayUntilLost-timeSinceSent) - } - // Note: This conditional is only entered once per call - h.lossTime = now.Add(delayUntilLost - timeSinceSent) - } - return true, nil - }) - - if h.logger.Debug() && len(lostPackets) > 0 { - pns := make([]protocol.PacketNumber, len(lostPackets)) - for i, p := range lostPackets { - pns[i] = p.PacketNumber - } - h.logger.Debugf("\tlost packets (%d): %#x", len(pns), pns) - } - - for _, p := range lostPackets { - // the bytes in flight need to be reduced no matter if this packet will be retransmitted - if p.includedInBytesInFlight { - h.bytesInFlight -= p.Length - h.congestion.OnPacketLost(p.PacketNumber, p.Length, priorInFlight) - } - if p.canBeRetransmitted { - // queue the packet for retransmission, and report the loss to the congestion controller - if err := h.queuePacketForRetransmission(p, pnSpace); err != nil { - return err - } - } - pnSpace.history.Remove(p.PacketNumber) - } - return nil -} - -func (h *sentPacketHandler) OnAlarm() error { - // When all outstanding are acknowledged, the alarm is canceled in - // updateLossDetectionAlarm. This doesn't reset the timer in the session though. - // When OnAlarm is called, we therefore need to make sure that there are - // actually packets outstanding. - if h.hasOutstandingPackets() { - if err := h.onVerifiedAlarm(); err != nil { - return err - } - } - h.updateLossDetectionAlarm() - return nil -} - -func (h *sentPacketHandler) onVerifiedAlarm() error { - var err error - if h.hasOutstandingCryptoPackets() { - if h.logger.Debug() { - h.logger.Debugf("Loss detection alarm fired in crypto mode. Crypto count: %d", h.cryptoCount) - } - h.cryptoCount++ - err = h.queueCryptoPacketsForRetransmission() - } else if !h.lossTime.IsZero() { - if h.logger.Debug() { - h.logger.Debugf("Loss detection alarm fired in loss timer mode. Loss time: %s", h.lossTime) - } - // Early retransmit or time loss detection - err = h.detectLostPackets(time.Now(), protocol.Encryption1RTT, h.bytesInFlight) - } else { // PTO - if h.logger.Debug() { - h.logger.Debugf("Loss detection alarm fired in PTO mode. PTO count: %d", h.ptoCount) - } - h.ptoCount++ - h.numProbesToSend += 2 - } - return err -} - -func (h *sentPacketHandler) GetAlarmTimeout() time.Time { - return h.alarm -} - -func (h *sentPacketHandler) onPacketAcked(p *Packet, rcvTime time.Time) error { - pnSpace := h.getPacketNumberSpace(p.EncryptionLevel) - // This happens if a packet and its retransmissions is acked in the same ACK. - // As soon as we process the first one, this will remove all the retransmissions, - // so we won't find the retransmitted packet number later. - if packet := pnSpace.history.GetPacket(p.PacketNumber); packet == nil { - return nil - } - - // only report the acking of this packet to the congestion controller if: - // * it is a retransmittable packet - // * this packet wasn't retransmitted yet - if p.isRetransmission { - // that the parent doesn't exist is expected to happen every time the original packet was already acked - if parent := pnSpace.history.GetPacket(p.retransmissionOf); parent != nil { - if len(parent.retransmittedAs) == 1 { - parent.retransmittedAs = nil - } else { - // remove this packet from the slice of retransmission - retransmittedAs := make([]protocol.PacketNumber, 0, len(parent.retransmittedAs)-1) - for _, pn := range parent.retransmittedAs { - if pn != p.PacketNumber { - retransmittedAs = append(retransmittedAs, pn) - } - } - parent.retransmittedAs = retransmittedAs - } - } - } - // this also applies to packets that have been retransmitted as probe packets - if p.includedInBytesInFlight { - h.bytesInFlight -= p.Length - } - if err := h.stopRetransmissionsFor(p, pnSpace); err != nil { - return err - } - return pnSpace.history.Remove(p.PacketNumber) -} - -func (h *sentPacketHandler) stopRetransmissionsFor(p *Packet, pnSpace *packetNumberSpace) error { - if err := pnSpace.history.MarkCannotBeRetransmitted(p.PacketNumber); err != nil { - return err - } - for _, r := range p.retransmittedAs { - packet := pnSpace.history.GetPacket(r) - if packet == nil { - return fmt.Errorf("sent packet handler BUG: marking packet as not retransmittable %d (retransmission of %d) not found in history", r, p.PacketNumber) - } - h.stopRetransmissionsFor(packet, pnSpace) - } - return nil -} - -func (h *sentPacketHandler) DequeuePacketForRetransmission() *Packet { - if len(h.retransmissionQueue) == 0 { - return nil - } - packet := h.retransmissionQueue[0] - // Shift the slice and don't retain anything that isn't needed. - copy(h.retransmissionQueue, h.retransmissionQueue[1:]) - h.retransmissionQueue[len(h.retransmissionQueue)-1] = nil - h.retransmissionQueue = h.retransmissionQueue[:len(h.retransmissionQueue)-1] - return packet -} - -func (h *sentPacketHandler) DequeueProbePacket() (*Packet, error) { - pnSpace := h.getPacketNumberSpace(protocol.Encryption1RTT) - if len(h.retransmissionQueue) == 0 { - p := pnSpace.history.FirstOutstanding() - if p == nil { - return nil, errors.New("cannot dequeue a probe packet. No outstanding packets") - } - if err := h.queuePacketForRetransmission(p, pnSpace); err != nil { - return nil, err - } - } - return h.DequeuePacketForRetransmission(), nil -} - -func (h *sentPacketHandler) PeekPacketNumber(encLevel protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) { - pnSpace := h.getPacketNumberSpace(encLevel) - - var lowestUnacked protocol.PacketNumber - if p := pnSpace.history.FirstOutstanding(); p != nil { - lowestUnacked = p.PacketNumber - } else { - lowestUnacked = pnSpace.largestAcked + 1 - } - - pn := pnSpace.pns.Peek() - return pn, protocol.GetPacketNumberLengthForHeader(pn, lowestUnacked) -} - -func (h *sentPacketHandler) PopPacketNumber(encLevel protocol.EncryptionLevel) protocol.PacketNumber { - return h.getPacketNumberSpace(encLevel).pns.Pop() -} - -func (h *sentPacketHandler) SendMode() SendMode { - numTrackedPackets := len(h.retransmissionQueue) + h.initialPackets.history.Len() + - h.handshakePackets.history.Len() + h.oneRTTPackets.history.Len() - - // Don't send any packets if we're keeping track of the maximum number of packets. - // Note that since MaxOutstandingSentPackets is smaller than MaxTrackedSentPackets, - // we will stop sending out new data when reaching MaxOutstandingSentPackets, - // but still allow sending of retransmissions and ACKs. - if numTrackedPackets >= protocol.MaxTrackedSentPackets { - if h.logger.Debug() { - h.logger.Debugf("Limited by the number of tracked packets: tracking %d packets, maximum %d", numTrackedPackets, protocol.MaxTrackedSentPackets) - } - return SendNone - } - if h.numProbesToSend > 0 { - return SendPTO - } - // Only send ACKs if we're congestion limited. - if cwnd := h.congestion.GetCongestionWindow(); h.bytesInFlight > cwnd { - if h.logger.Debug() { - h.logger.Debugf("Congestion limited: bytes in flight %d, window %d", h.bytesInFlight, cwnd) - } - return SendAck - } - // Send retransmissions first, if there are any. - if len(h.retransmissionQueue) > 0 { - return SendRetransmission - } - if numTrackedPackets >= protocol.MaxOutstandingSentPackets { - if h.logger.Debug() { - h.logger.Debugf("Max outstanding limited: tracking %d packets, maximum: %d", numTrackedPackets, protocol.MaxOutstandingSentPackets) - } - return SendAck - } - return SendAny -} - -func (h *sentPacketHandler) TimeUntilSend() time.Time { - return h.nextSendTime -} - -func (h *sentPacketHandler) ShouldSendNumPackets() int { - if h.numProbesToSend > 0 { - // RTO probes should not be paced, but must be sent immediately. - return h.numProbesToSend - } - delay := h.congestion.TimeUntilSend(h.bytesInFlight) - if delay == 0 || delay > protocol.MinPacingDelay { - return 1 - } - return int(math.Ceil(float64(protocol.MinPacingDelay) / float64(delay))) -} - -func (h *sentPacketHandler) queueCryptoPacketsForRetransmission() error { - if err := h.queueAllPacketsForRetransmission(protocol.EncryptionInitial); err != nil { - return err - } - return h.queueAllPacketsForRetransmission(protocol.EncryptionHandshake) -} - -func (h *sentPacketHandler) queueAllPacketsForRetransmission(encLevel protocol.EncryptionLevel) error { - var packets []*Packet - pnSpace := h.getPacketNumberSpace(encLevel) - pnSpace.history.Iterate(func(p *Packet) (bool, error) { - if p.canBeRetransmitted { - packets = append(packets, p) - } - return true, nil - }) - for _, p := range packets { - h.logger.Debugf("Queueing packet %#x (%s) as a crypto retransmission", p.PacketNumber, encLevel) - if err := h.queuePacketForRetransmission(p, pnSpace); err != nil { - return err - } - } - return nil -} - -func (h *sentPacketHandler) queuePacketForRetransmission(p *Packet, pnSpace *packetNumberSpace) error { - if !p.canBeRetransmitted { - return fmt.Errorf("sent packet handler BUG: packet %d already queued for retransmission", p.PacketNumber) - } - if err := pnSpace.history.MarkCannotBeRetransmitted(p.PacketNumber); err != nil { - return err - } - h.retransmissionQueue = append(h.retransmissionQueue, p) - return nil -} - -func (h *sentPacketHandler) computeCryptoTimeout() time.Duration { - duration := utils.MaxDuration(2*h.rttStats.SmoothedOrInitialRTT(), granularity) - // exponential backoff - // There's an implicit limit to this set by the crypto timeout. - return duration << h.cryptoCount -} - -func (h *sentPacketHandler) computePTOTimeout() time.Duration { - // TODO(#1236): include the max_ack_delay - duration := utils.MaxDuration(h.rttStats.SmoothedOrInitialRTT()+4*h.rttStats.MeanDeviation(), granularity) - return duration << h.ptoCount -} - -func (h *sentPacketHandler) ResetForRetry() error { - h.cryptoCount = 0 - h.bytesInFlight = 0 - var packets []*Packet - h.initialPackets.history.Iterate(func(p *Packet) (bool, error) { - if p.canBeRetransmitted { - packets = append(packets, p) - } - return true, nil - }) - for _, p := range packets { - h.logger.Debugf("Queueing packet %#x for retransmission.", p.PacketNumber) - h.retransmissionQueue = append(h.retransmissionQueue, p) - } - h.initialPackets = newPacketNumberSpace(h.initialPackets.pns.Pop()) - h.updateLossDetectionAlarm() - return nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go b/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go deleted file mode 100644 index 393ebba61..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/sent_packet_history.go +++ /dev/null @@ -1,148 +0,0 @@ -package ackhandler - -import ( - "fmt" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -type sentPacketHistory struct { - packetList *PacketList - packetMap map[protocol.PacketNumber]*PacketElement - - numOutstandingPackets int - - firstOutstanding *PacketElement -} - -func newSentPacketHistory() *sentPacketHistory { - return &sentPacketHistory{ - packetList: NewPacketList(), - packetMap: make(map[protocol.PacketNumber]*PacketElement), - } -} - -func (h *sentPacketHistory) SentPacket(p *Packet) { - h.sentPacketImpl(p) -} - -func (h *sentPacketHistory) sentPacketImpl(p *Packet) *PacketElement { - el := h.packetList.PushBack(*p) - h.packetMap[p.PacketNumber] = el - if h.firstOutstanding == nil { - h.firstOutstanding = el - } - if p.canBeRetransmitted { - h.numOutstandingPackets++ - } - return el -} - -func (h *sentPacketHistory) SentPacketsAsRetransmission(packets []*Packet, retransmissionOf protocol.PacketNumber) { - retransmission, ok := h.packetMap[retransmissionOf] - // The retransmitted packet is not present anymore. - // This can happen if it was acked in between dequeueing of the retransmission and sending. - // Just treat the retransmissions as normal packets. - // TODO: This won't happen if we clear packets queued for retransmission on new ACKs. - if !ok { - for _, packet := range packets { - h.sentPacketImpl(packet) - } - return - } - retransmission.Value.retransmittedAs = make([]protocol.PacketNumber, len(packets)) - for i, packet := range packets { - retransmission.Value.retransmittedAs[i] = packet.PacketNumber - el := h.sentPacketImpl(packet) - el.Value.isRetransmission = true - el.Value.retransmissionOf = retransmissionOf - } -} - -func (h *sentPacketHistory) GetPacket(p protocol.PacketNumber) *Packet { - if el, ok := h.packetMap[p]; ok { - return &el.Value - } - return nil -} - -// Iterate iterates through all packets. -// The callback must not modify the history. -func (h *sentPacketHistory) Iterate(cb func(*Packet) (cont bool, err error)) error { - cont := true - for el := h.packetList.Front(); cont && el != nil; el = el.Next() { - var err error - cont, err = cb(&el.Value) - if err != nil { - return err - } - } - return nil -} - -// FirstOutStanding returns the first outstanding packet. -// It must not be modified (e.g. retransmitted). -// Use DequeueFirstPacketForRetransmission() to retransmit it. -func (h *sentPacketHistory) FirstOutstanding() *Packet { - if h.firstOutstanding == nil { - return nil - } - return &h.firstOutstanding.Value -} - -// QueuePacketForRetransmission marks a packet for retransmission. -// A packet can only be queued once. -func (h *sentPacketHistory) MarkCannotBeRetransmitted(pn protocol.PacketNumber) error { - el, ok := h.packetMap[pn] - if !ok { - return fmt.Errorf("sent packet history: packet %d not found", pn) - } - if el.Value.canBeRetransmitted { - h.numOutstandingPackets-- - if h.numOutstandingPackets < 0 { - panic("numOutstandingHandshakePackets negative") - } - } - el.Value.canBeRetransmitted = false - if el == h.firstOutstanding { - h.readjustFirstOutstanding() - } - return nil -} - -// readjustFirstOutstanding readjusts the pointer to the first outstanding packet. -// This is necessary every time the first outstanding packet is deleted or retransmitted. -func (h *sentPacketHistory) readjustFirstOutstanding() { - el := h.firstOutstanding.Next() - for el != nil && !el.Value.canBeRetransmitted { - el = el.Next() - } - h.firstOutstanding = el -} - -func (h *sentPacketHistory) Len() int { - return len(h.packetMap) -} - -func (h *sentPacketHistory) Remove(p protocol.PacketNumber) error { - el, ok := h.packetMap[p] - if !ok { - return fmt.Errorf("packet %d not found in sent packet history", p) - } - if el == h.firstOutstanding { - h.readjustFirstOutstanding() - } - if el.Value.canBeRetransmitted { - h.numOutstandingPackets-- - if h.numOutstandingPackets < 0 { - panic("numOutstandingHandshakePackets negative") - } - } - h.packetList.Remove(el) - delete(h.packetMap, p) - return nil -} - -func (h *sentPacketHistory) HasOutstandingPackets() bool { - return h.numOutstandingPackets > 0 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go deleted file mode 100644 index 54269c567..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/bandwidth.go +++ /dev/null @@ -1,22 +0,0 @@ -package congestion - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// Bandwidth of a connection -type Bandwidth uint64 - -const ( - // BitsPerSecond is 1 bit per second - BitsPerSecond Bandwidth = 1 - // BytesPerSecond is 1 byte per second - BytesPerSecond = 8 * BitsPerSecond -) - -// BandwidthFromDelta calculates the bandwidth from a number of bytes and a time delta -func BandwidthFromDelta(bytes protocol.ByteCount, delta time.Duration) Bandwidth { - return Bandwidth(bytes) * Bandwidth(time.Second) / Bandwidth(delta) * BytesPerSecond -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go deleted file mode 100644 index 405fae70f..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/clock.go +++ /dev/null @@ -1,18 +0,0 @@ -package congestion - -import "time" - -// A Clock returns the current time -type Clock interface { - Now() time.Time -} - -// DefaultClock implements the Clock interface using the Go stdlib clock. -type DefaultClock struct{} - -var _ Clock = DefaultClock{} - -// Now gets the current time -func (DefaultClock) Now() time.Time { - return time.Now() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go deleted file mode 100644 index dcf91fc6d..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic.go +++ /dev/null @@ -1,210 +0,0 @@ -package congestion - -import ( - "math" - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// This cubic implementation is based on the one found in Chromiums's QUIC -// implementation, in the files net/quic/congestion_control/cubic.{hh,cc}. - -// Constants based on TCP defaults. -// The following constants are in 2^10 fractions of a second instead of ms to -// allow a 10 shift right to divide. - -// 1024*1024^3 (first 1024 is from 0.100^3) -// where 0.100 is 100 ms which is the scaling round trip time. -const cubeScale = 40 -const cubeCongestionWindowScale = 410 -const cubeFactor protocol.ByteCount = 1 << cubeScale / cubeCongestionWindowScale / protocol.DefaultTCPMSS - -const defaultNumConnections = 2 - -// Default Cubic backoff factor -const beta float32 = 0.7 - -// Additional backoff factor when loss occurs in the concave part of the Cubic -// curve. This additional backoff factor is expected to give up bandwidth to -// new concurrent flows and speed up convergence. -const betaLastMax float32 = 0.85 - -// Cubic implements the cubic algorithm from TCP -type Cubic struct { - clock Clock - - // Number of connections to simulate. - numConnections int - - // Time when this cycle started, after last loss event. - epoch time.Time - - // Max congestion window used just before last loss event. - // Note: to improve fairness to other streams an additional back off is - // applied to this value if the new value is below our latest value. - lastMaxCongestionWindow protocol.ByteCount - - // Number of acked bytes since the cycle started (epoch). - ackedBytesCount protocol.ByteCount - - // TCP Reno equivalent congestion window in packets. - estimatedTCPcongestionWindow protocol.ByteCount - - // Origin point of cubic function. - originPointCongestionWindow protocol.ByteCount - - // Time to origin point of cubic function in 2^10 fractions of a second. - timeToOriginPoint uint32 - - // Last congestion window in packets computed by cubic function. - lastTargetCongestionWindow protocol.ByteCount -} - -// NewCubic returns a new Cubic instance -func NewCubic(clock Clock) *Cubic { - c := &Cubic{ - clock: clock, - numConnections: defaultNumConnections, - } - c.Reset() - return c -} - -// Reset is called after a timeout to reset the cubic state -func (c *Cubic) Reset() { - c.epoch = time.Time{} - c.lastMaxCongestionWindow = 0 - c.ackedBytesCount = 0 - c.estimatedTCPcongestionWindow = 0 - c.originPointCongestionWindow = 0 - c.timeToOriginPoint = 0 - c.lastTargetCongestionWindow = 0 -} - -func (c *Cubic) alpha() float32 { - // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that - // beta here is a cwnd multiplier, and is equal to 1-beta from the paper. - // We derive the equivalent alpha for an N-connection emulation as: - b := c.beta() - return 3 * float32(c.numConnections) * float32(c.numConnections) * (1 - b) / (1 + b) -} - -func (c *Cubic) beta() float32 { - // kNConnectionBeta is the backoff factor after loss for our N-connection - // emulation, which emulates the effective backoff of an ensemble of N - // TCP-Reno connections on a single loss event. The effective multiplier is - // computed as: - return (float32(c.numConnections) - 1 + beta) / float32(c.numConnections) -} - -func (c *Cubic) betaLastMax() float32 { - // betaLastMax is the additional backoff factor after loss for our - // N-connection emulation, which emulates the additional backoff of - // an ensemble of N TCP-Reno connections on a single loss event. The - // effective multiplier is computed as: - return (float32(c.numConnections) - 1 + betaLastMax) / float32(c.numConnections) -} - -// OnApplicationLimited is called on ack arrival when sender is unable to use -// the available congestion window. Resets Cubic state during quiescence. -func (c *Cubic) OnApplicationLimited() { - // When sender is not using the available congestion window, the window does - // not grow. But to be RTT-independent, Cubic assumes that the sender has been - // using the entire window during the time since the beginning of the current - // "epoch" (the end of the last loss recovery period). Since - // application-limited periods break this assumption, we reset the epoch when - // in such a period. This reset effectively freezes congestion window growth - // through application-limited periods and allows Cubic growth to continue - // when the entire window is being used. - c.epoch = time.Time{} -} - -// CongestionWindowAfterPacketLoss computes a new congestion window to use after -// a loss event. Returns the new congestion window in packets. The new -// congestion window is a multiplicative decrease of our current window. -func (c *Cubic) CongestionWindowAfterPacketLoss(currentCongestionWindow protocol.ByteCount) protocol.ByteCount { - if currentCongestionWindow+protocol.DefaultTCPMSS < c.lastMaxCongestionWindow { - // We never reached the old max, so assume we are competing with another - // flow. Use our extra back off factor to allow the other flow to go up. - c.lastMaxCongestionWindow = protocol.ByteCount(c.betaLastMax() * float32(currentCongestionWindow)) - } else { - c.lastMaxCongestionWindow = currentCongestionWindow - } - c.epoch = time.Time{} // Reset time. - return protocol.ByteCount(float32(currentCongestionWindow) * c.beta()) -} - -// CongestionWindowAfterAck computes a new congestion window to use after a received ACK. -// Returns the new congestion window in packets. The new congestion window -// follows a cubic function that depends on the time passed since last -// packet loss. -func (c *Cubic) CongestionWindowAfterAck( - ackedBytes protocol.ByteCount, - currentCongestionWindow protocol.ByteCount, - delayMin time.Duration, - eventTime time.Time, -) protocol.ByteCount { - c.ackedBytesCount += ackedBytes - - if c.epoch.IsZero() { - // First ACK after a loss event. - c.epoch = eventTime // Start of epoch. - c.ackedBytesCount = ackedBytes // Reset count. - // Reset estimated_tcp_congestion_window_ to be in sync with cubic. - c.estimatedTCPcongestionWindow = currentCongestionWindow - if c.lastMaxCongestionWindow <= currentCongestionWindow { - c.timeToOriginPoint = 0 - c.originPointCongestionWindow = currentCongestionWindow - } else { - c.timeToOriginPoint = uint32(math.Cbrt(float64(cubeFactor * (c.lastMaxCongestionWindow - currentCongestionWindow)))) - c.originPointCongestionWindow = c.lastMaxCongestionWindow - } - } - - // Change the time unit from microseconds to 2^10 fractions per second. Take - // the round trip time in account. This is done to allow us to use shift as a - // divide operator. - elapsedTime := int64(eventTime.Add(delayMin).Sub(c.epoch)/time.Microsecond) << 10 / (1000 * 1000) - - // Right-shifts of negative, signed numbers have implementation-dependent - // behavior, so force the offset to be positive, as is done in the kernel. - offset := int64(c.timeToOriginPoint) - elapsedTime - if offset < 0 { - offset = -offset - } - - deltaCongestionWindow := protocol.ByteCount(cubeCongestionWindowScale*offset*offset*offset) * protocol.DefaultTCPMSS >> cubeScale - var targetCongestionWindow protocol.ByteCount - if elapsedTime > int64(c.timeToOriginPoint) { - targetCongestionWindow = c.originPointCongestionWindow + deltaCongestionWindow - } else { - targetCongestionWindow = c.originPointCongestionWindow - deltaCongestionWindow - } - // Limit the CWND increase to half the acked bytes. - targetCongestionWindow = utils.MinByteCount(targetCongestionWindow, currentCongestionWindow+c.ackedBytesCount/2) - - // Increase the window by approximately Alpha * 1 MSS of bytes every - // time we ack an estimated tcp window of bytes. For small - // congestion windows (less than 25), the formula below will - // increase slightly slower than linearly per estimated tcp window - // of bytes. - c.estimatedTCPcongestionWindow += protocol.ByteCount(float32(c.ackedBytesCount) * c.alpha() * float32(protocol.DefaultTCPMSS) / float32(c.estimatedTCPcongestionWindow)) - c.ackedBytesCount = 0 - - // We have a new cubic congestion window. - c.lastTargetCongestionWindow = targetCongestionWindow - - // Compute target congestion_window based on cubic target and estimated TCP - // congestion_window, use highest (fastest). - if targetCongestionWindow < c.estimatedTCPcongestionWindow { - targetCongestionWindow = c.estimatedTCPcongestionWindow - } - return targetCongestionWindow -} - -// SetNumConnections sets the number of emulated connections -func (c *Cubic) SetNumConnections(n int) { - c.numConnections = n -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go deleted file mode 100644 index 6f3d3289a..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/cubic_sender.go +++ /dev/null @@ -1,314 +0,0 @@ -package congestion - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -const ( - maxBurstBytes = 3 * protocol.DefaultTCPMSS - renoBeta float32 = 0.7 // Reno backoff factor. - defaultMinimumCongestionWindow protocol.ByteCount = 2 * protocol.DefaultTCPMSS -) - -type cubicSender struct { - hybridSlowStart HybridSlowStart - prr PrrSender - rttStats *RTTStats - stats connectionStats - cubic *Cubic - - reno bool - - // Track the largest packet that has been sent. - largestSentPacketNumber protocol.PacketNumber - - // Track the largest packet that has been acked. - largestAckedPacketNumber protocol.PacketNumber - - // Track the largest packet number outstanding when a CWND cutback occurs. - largestSentAtLastCutback protocol.PacketNumber - - // Whether the last loss event caused us to exit slowstart. - // Used for stats collection of slowstartPacketsLost - lastCutbackExitedSlowstart bool - - // When true, exit slow start with large cutback of congestion window. - slowStartLargeReduction bool - - // Congestion window in packets. - congestionWindow protocol.ByteCount - - // Minimum congestion window in packets. - minCongestionWindow protocol.ByteCount - - // Maximum congestion window. - maxCongestionWindow protocol.ByteCount - - // Slow start congestion window in bytes, aka ssthresh. - slowstartThreshold protocol.ByteCount - - // Number of connections to simulate. - numConnections int - - // ACK counter for the Reno implementation. - numAckedPackets uint64 - - initialCongestionWindow protocol.ByteCount - initialMaxCongestionWindow protocol.ByteCount - - minSlowStartExitWindow protocol.ByteCount -} - -var _ SendAlgorithm = &cubicSender{} -var _ SendAlgorithmWithDebugInfo = &cubicSender{} - -// NewCubicSender makes a new cubic sender -func NewCubicSender(clock Clock, rttStats *RTTStats, reno bool, initialCongestionWindow, initialMaxCongestionWindow protocol.ByteCount) SendAlgorithmWithDebugInfo { - return &cubicSender{ - rttStats: rttStats, - initialCongestionWindow: initialCongestionWindow, - initialMaxCongestionWindow: initialMaxCongestionWindow, - congestionWindow: initialCongestionWindow, - minCongestionWindow: defaultMinimumCongestionWindow, - slowstartThreshold: initialMaxCongestionWindow, - maxCongestionWindow: initialMaxCongestionWindow, - numConnections: defaultNumConnections, - cubic: NewCubic(clock), - reno: reno, - } -} - -// TimeUntilSend returns when the next packet should be sent. -func (c *cubicSender) TimeUntilSend(bytesInFlight protocol.ByteCount) time.Duration { - if c.InRecovery() { - // PRR is used when in recovery. - if c.prr.CanSend(c.GetCongestionWindow(), bytesInFlight, c.GetSlowStartThreshold()) { - return 0 - } - } - return c.rttStats.SmoothedRTT() * time.Duration(protocol.DefaultTCPMSS) / time.Duration(2*c.GetCongestionWindow()) -} - -func (c *cubicSender) OnPacketSent( - sentTime time.Time, - bytesInFlight protocol.ByteCount, - packetNumber protocol.PacketNumber, - bytes protocol.ByteCount, - isRetransmittable bool, -) { - if !isRetransmittable { - return - } - if c.InRecovery() { - // PRR is used when in recovery. - c.prr.OnPacketSent(bytes) - } - c.largestSentPacketNumber = packetNumber - c.hybridSlowStart.OnPacketSent(packetNumber) -} - -func (c *cubicSender) InRecovery() bool { - return c.largestAckedPacketNumber <= c.largestSentAtLastCutback && c.largestAckedPacketNumber != 0 -} - -func (c *cubicSender) InSlowStart() bool { - return c.GetCongestionWindow() < c.GetSlowStartThreshold() -} - -func (c *cubicSender) GetCongestionWindow() protocol.ByteCount { - return c.congestionWindow -} - -func (c *cubicSender) GetSlowStartThreshold() protocol.ByteCount { - return c.slowstartThreshold -} - -func (c *cubicSender) ExitSlowstart() { - c.slowstartThreshold = c.congestionWindow -} - -func (c *cubicSender) SlowstartThreshold() protocol.ByteCount { - return c.slowstartThreshold -} - -func (c *cubicSender) MaybeExitSlowStart() { - if c.InSlowStart() && c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/protocol.DefaultTCPMSS) { - c.ExitSlowstart() - } -} - -func (c *cubicSender) OnPacketAcked( - ackedPacketNumber protocol.PacketNumber, - ackedBytes protocol.ByteCount, - priorInFlight protocol.ByteCount, - eventTime time.Time, -) { - c.largestAckedPacketNumber = utils.MaxPacketNumber(ackedPacketNumber, c.largestAckedPacketNumber) - if c.InRecovery() { - // PRR is used when in recovery. - c.prr.OnPacketAcked(ackedBytes) - return - } - c.maybeIncreaseCwnd(ackedPacketNumber, ackedBytes, priorInFlight, eventTime) - if c.InSlowStart() { - c.hybridSlowStart.OnPacketAcked(ackedPacketNumber) - } -} - -func (c *cubicSender) OnPacketLost( - packetNumber protocol.PacketNumber, - lostBytes protocol.ByteCount, - priorInFlight protocol.ByteCount, -) { - // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets - // already sent should be treated as a single loss event, since it's expected. - if packetNumber <= c.largestSentAtLastCutback { - if c.lastCutbackExitedSlowstart { - c.stats.slowstartPacketsLost++ - c.stats.slowstartBytesLost += lostBytes - if c.slowStartLargeReduction { - // Reduce congestion window by lost_bytes for every loss. - c.congestionWindow = utils.MaxByteCount(c.congestionWindow-lostBytes, c.minSlowStartExitWindow) - c.slowstartThreshold = c.congestionWindow - } - } - return - } - c.lastCutbackExitedSlowstart = c.InSlowStart() - if c.InSlowStart() { - c.stats.slowstartPacketsLost++ - } - - c.prr.OnPacketLost(priorInFlight) - - // TODO(chromium): Separate out all of slow start into a separate class. - if c.slowStartLargeReduction && c.InSlowStart() { - if c.congestionWindow >= 2*c.initialCongestionWindow { - c.minSlowStartExitWindow = c.congestionWindow / 2 - } - c.congestionWindow -= protocol.DefaultTCPMSS - } else if c.reno { - c.congestionWindow = protocol.ByteCount(float32(c.congestionWindow) * c.RenoBeta()) - } else { - c.congestionWindow = c.cubic.CongestionWindowAfterPacketLoss(c.congestionWindow) - } - if c.congestionWindow < c.minCongestionWindow { - c.congestionWindow = c.minCongestionWindow - } - c.slowstartThreshold = c.congestionWindow - c.largestSentAtLastCutback = c.largestSentPacketNumber - // reset packet count from congestion avoidance mode. We start - // counting again when we're out of recovery. - c.numAckedPackets = 0 -} - -func (c *cubicSender) RenoBeta() float32 { - // kNConnectionBeta is the backoff factor after loss for our N-connection - // emulation, which emulates the effective backoff of an ensemble of N - // TCP-Reno connections on a single loss event. The effective multiplier is - // computed as: - return (float32(c.numConnections) - 1. + renoBeta) / float32(c.numConnections) -} - -// Called when we receive an ack. Normal TCP tracks how many packets one ack -// represents, but quic has a separate ack for each packet. -func (c *cubicSender) maybeIncreaseCwnd( - ackedPacketNumber protocol.PacketNumber, - ackedBytes protocol.ByteCount, - priorInFlight protocol.ByteCount, - eventTime time.Time, -) { - // Do not increase the congestion window unless the sender is close to using - // the current window. - if !c.isCwndLimited(priorInFlight) { - c.cubic.OnApplicationLimited() - return - } - if c.congestionWindow >= c.maxCongestionWindow { - return - } - if c.InSlowStart() { - // TCP slow start, exponential growth, increase by one for each ACK. - c.congestionWindow += protocol.DefaultTCPMSS - return - } - // Congestion avoidance - if c.reno { - // Classic Reno congestion avoidance. - c.numAckedPackets++ - // Divide by num_connections to smoothly increase the CWND at a faster - // rate than conventional Reno. - if c.numAckedPackets*uint64(c.numConnections) >= uint64(c.congestionWindow)/uint64(protocol.DefaultTCPMSS) { - c.congestionWindow += protocol.DefaultTCPMSS - c.numAckedPackets = 0 - } - } else { - c.congestionWindow = utils.MinByteCount(c.maxCongestionWindow, c.cubic.CongestionWindowAfterAck(ackedBytes, c.congestionWindow, c.rttStats.MinRTT(), eventTime)) - } -} - -func (c *cubicSender) isCwndLimited(bytesInFlight protocol.ByteCount) bool { - congestionWindow := c.GetCongestionWindow() - if bytesInFlight >= congestionWindow { - return true - } - availableBytes := congestionWindow - bytesInFlight - slowStartLimited := c.InSlowStart() && bytesInFlight > congestionWindow/2 - return slowStartLimited || availableBytes <= maxBurstBytes -} - -// BandwidthEstimate returns the current bandwidth estimate -func (c *cubicSender) BandwidthEstimate() Bandwidth { - srtt := c.rttStats.SmoothedRTT() - if srtt == 0 { - // If we haven't measured an rtt, the bandwidth estimate is unknown. - return 0 - } - return BandwidthFromDelta(c.GetCongestionWindow(), srtt) -} - -// HybridSlowStart returns the hybrid slow start instance for testing -func (c *cubicSender) HybridSlowStart() *HybridSlowStart { - return &c.hybridSlowStart -} - -// SetNumEmulatedConnections sets the number of emulated connections -func (c *cubicSender) SetNumEmulatedConnections(n int) { - c.numConnections = utils.Max(n, 1) - c.cubic.SetNumConnections(c.numConnections) -} - -// OnRetransmissionTimeout is called on an retransmission timeout -func (c *cubicSender) OnRetransmissionTimeout(packetsRetransmitted bool) { - c.largestSentAtLastCutback = 0 - if !packetsRetransmitted { - return - } - c.hybridSlowStart.Restart() - c.cubic.Reset() - c.slowstartThreshold = c.congestionWindow / 2 - c.congestionWindow = c.minCongestionWindow -} - -// OnConnectionMigration is called when the connection is migrated (?) -func (c *cubicSender) OnConnectionMigration() { - c.hybridSlowStart.Restart() - c.prr = PrrSender{} - c.largestSentPacketNumber = 0 - c.largestAckedPacketNumber = 0 - c.largestSentAtLastCutback = 0 - c.lastCutbackExitedSlowstart = false - c.cubic.Reset() - c.numAckedPackets = 0 - c.congestionWindow = c.initialCongestionWindow - c.slowstartThreshold = c.initialMaxCongestionWindow - c.maxCongestionWindow = c.initialMaxCongestionWindow -} - -// SetSlowStartLargeReduction allows enabling the SSLR experiment -func (c *cubicSender) SetSlowStartLargeReduction(enabled bool) { - c.slowStartLargeReduction = enabled -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go deleted file mode 100644 index f41c1e5c3..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/hybrid_slow_start.go +++ /dev/null @@ -1,111 +0,0 @@ -package congestion - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// Note(pwestin): the magic clamping numbers come from the original code in -// tcp_cubic.c. -const hybridStartLowWindow = protocol.ByteCount(16) - -// Number of delay samples for detecting the increase of delay. -const hybridStartMinSamples = uint32(8) - -// Exit slow start if the min rtt has increased by more than 1/8th. -const hybridStartDelayFactorExp = 3 // 2^3 = 8 -// The original paper specifies 2 and 8ms, but those have changed over time. -const hybridStartDelayMinThresholdUs = int64(4000) -const hybridStartDelayMaxThresholdUs = int64(16000) - -// HybridSlowStart implements the TCP hybrid slow start algorithm -type HybridSlowStart struct { - endPacketNumber protocol.PacketNumber - lastSentPacketNumber protocol.PacketNumber - started bool - currentMinRTT time.Duration - rttSampleCount uint32 - hystartFound bool -} - -// StartReceiveRound is called for the start of each receive round (burst) in the slow start phase. -func (s *HybridSlowStart) StartReceiveRound(lastSent protocol.PacketNumber) { - s.endPacketNumber = lastSent - s.currentMinRTT = 0 - s.rttSampleCount = 0 - s.started = true -} - -// IsEndOfRound returns true if this ack is the last packet number of our current slow start round. -func (s *HybridSlowStart) IsEndOfRound(ack protocol.PacketNumber) bool { - return s.endPacketNumber < ack -} - -// ShouldExitSlowStart should be called on every new ack frame, since a new -// RTT measurement can be made then. -// rtt: the RTT for this ack packet. -// minRTT: is the lowest delay (RTT) we have seen during the session. -// congestionWindow: the congestion window in packets. -func (s *HybridSlowStart) ShouldExitSlowStart(latestRTT time.Duration, minRTT time.Duration, congestionWindow protocol.ByteCount) bool { - if !s.started { - // Time to start the hybrid slow start. - s.StartReceiveRound(s.lastSentPacketNumber) - } - if s.hystartFound { - return true - } - // Second detection parameter - delay increase detection. - // Compare the minimum delay (s.currentMinRTT) of the current - // burst of packets relative to the minimum delay during the session. - // Note: we only look at the first few(8) packets in each burst, since we - // only want to compare the lowest RTT of the burst relative to previous - // bursts. - s.rttSampleCount++ - if s.rttSampleCount <= hybridStartMinSamples { - if s.currentMinRTT == 0 || s.currentMinRTT > latestRTT { - s.currentMinRTT = latestRTT - } - } - // We only need to check this once per round. - if s.rttSampleCount == hybridStartMinSamples { - // Divide minRTT by 8 to get a rtt increase threshold for exiting. - minRTTincreaseThresholdUs := int64(minRTT / time.Microsecond >> hybridStartDelayFactorExp) - // Ensure the rtt threshold is never less than 2ms or more than 16ms. - minRTTincreaseThresholdUs = utils.MinInt64(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs) - minRTTincreaseThreshold := time.Duration(utils.MaxInt64(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond - - if s.currentMinRTT > (minRTT + minRTTincreaseThreshold) { - s.hystartFound = true - } - } - // Exit from slow start if the cwnd is greater than 16 and - // increasing delay is found. - return congestionWindow >= hybridStartLowWindow && s.hystartFound -} - -// OnPacketSent is called when a packet was sent -func (s *HybridSlowStart) OnPacketSent(packetNumber protocol.PacketNumber) { - s.lastSentPacketNumber = packetNumber -} - -// OnPacketAcked gets invoked after ShouldExitSlowStart, so it's best to end -// the round when the final packet of the burst is received and start it on -// the next incoming ack. -func (s *HybridSlowStart) OnPacketAcked(ackedPacketNumber protocol.PacketNumber) { - if s.IsEndOfRound(ackedPacketNumber) { - s.started = false - } -} - -// Started returns true if started -func (s *HybridSlowStart) Started() bool { - return s.started -} - -// Restart the slow start phase -func (s *HybridSlowStart) Restart() { - s.started = false - s.hystartFound = false -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go deleted file mode 100644 index 7c27da64a..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/interface.go +++ /dev/null @@ -1,36 +0,0 @@ -package congestion - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// A SendAlgorithm performs congestion control and calculates the congestion window -type SendAlgorithm interface { - TimeUntilSend(bytesInFlight protocol.ByteCount) time.Duration - OnPacketSent(sentTime time.Time, bytesInFlight protocol.ByteCount, packetNumber protocol.PacketNumber, bytes protocol.ByteCount, isRetransmittable bool) - GetCongestionWindow() protocol.ByteCount - MaybeExitSlowStart() - OnPacketAcked(number protocol.PacketNumber, ackedBytes protocol.ByteCount, priorInFlight protocol.ByteCount, eventTime time.Time) - OnPacketLost(number protocol.PacketNumber, lostBytes protocol.ByteCount, priorInFlight protocol.ByteCount) - SetNumEmulatedConnections(n int) - OnRetransmissionTimeout(packetsRetransmitted bool) - OnConnectionMigration() - - // Experiments - SetSlowStartLargeReduction(enabled bool) -} - -// SendAlgorithmWithDebugInfo adds some debug functions to SendAlgorithm -type SendAlgorithmWithDebugInfo interface { - SendAlgorithm - BandwidthEstimate() Bandwidth - - // Stuff only used in testing - - HybridSlowStart() *HybridSlowStart - SlowstartThreshold() protocol.ByteCount - RenoBeta() float32 - InRecovery() bool -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/prr_sender.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/prr_sender.go deleted file mode 100644 index 5c807d190..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/prr_sender.go +++ /dev/null @@ -1,54 +0,0 @@ -package congestion - -import ( - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// PrrSender implements the Proportional Rate Reduction (PRR) per RFC 6937 -type PrrSender struct { - bytesSentSinceLoss protocol.ByteCount - bytesDeliveredSinceLoss protocol.ByteCount - ackCountSinceLoss protocol.ByteCount - bytesInFlightBeforeLoss protocol.ByteCount -} - -// OnPacketSent should be called after a packet was sent -func (p *PrrSender) OnPacketSent(sentBytes protocol.ByteCount) { - p.bytesSentSinceLoss += sentBytes -} - -// OnPacketLost should be called on the first loss that triggers a recovery -// period and all other methods in this class should only be called when in -// recovery. -func (p *PrrSender) OnPacketLost(priorInFlight protocol.ByteCount) { - p.bytesSentSinceLoss = 0 - p.bytesInFlightBeforeLoss = priorInFlight - p.bytesDeliveredSinceLoss = 0 - p.ackCountSinceLoss = 0 -} - -// OnPacketAcked should be called after a packet was acked -func (p *PrrSender) OnPacketAcked(ackedBytes protocol.ByteCount) { - p.bytesDeliveredSinceLoss += ackedBytes - p.ackCountSinceLoss++ -} - -// CanSend returns if packets can be sent -func (p *PrrSender) CanSend(congestionWindow, bytesInFlight, slowstartThreshold protocol.ByteCount) bool { - // Return QuicTime::Zero In order to ensure limited transmit always works. - if p.bytesSentSinceLoss == 0 || bytesInFlight < protocol.DefaultTCPMSS { - return true - } - if congestionWindow > bytesInFlight { - // During PRR-SSRB, limit outgoing packets to 1 extra MSS per ack, instead - // of sending the entire available window. This prevents burst retransmits - // when more packets are lost than the CWND reduction. - // limit = MAX(prr_delivered - prr_out, DeliveredData) + MSS - return p.bytesDeliveredSinceLoss+p.ackCountSinceLoss*protocol.DefaultTCPMSS > p.bytesSentSinceLoss - } - // Implement Proportional Rate Reduction (RFC6937). - // Checks a simplified version of the PRR formula that doesn't use division: - // AvailableSendWindow = - // CEIL(prr_delivered * ssthresh / BytesInFlightAtLoss) - prr_sent - return p.bytesDeliveredSinceLoss*slowstartThreshold > p.bytesSentSinceLoss*p.bytesInFlightBeforeLoss -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/rtt_stats.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/rtt_stats.go deleted file mode 100644 index f0ebbb236..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/rtt_stats.go +++ /dev/null @@ -1,101 +0,0 @@ -package congestion - -import ( - "time" - - "github.com/lucas-clemente/quic-go/internal/utils" -) - -const ( - rttAlpha float32 = 0.125 - oneMinusAlpha float32 = (1 - rttAlpha) - rttBeta float32 = 0.25 - oneMinusBeta float32 = (1 - rttBeta) - // The default RTT used before an RTT sample is taken. - defaultInitialRTT = 100 * time.Millisecond -) - -// RTTStats provides round-trip statistics -type RTTStats struct { - minRTT time.Duration - latestRTT time.Duration - smoothedRTT time.Duration - meanDeviation time.Duration -} - -// NewRTTStats makes a properly initialized RTTStats object -func NewRTTStats() *RTTStats { - return &RTTStats{} -} - -// MinRTT Returns the minRTT for the entire connection. -// May return Zero if no valid updates have occurred. -func (r *RTTStats) MinRTT() time.Duration { return r.minRTT } - -// LatestRTT returns the most recent rtt measurement. -// May return Zero if no valid updates have occurred. -func (r *RTTStats) LatestRTT() time.Duration { return r.latestRTT } - -// SmoothedRTT returns the EWMA smoothed RTT for the connection. -// May return Zero if no valid updates have occurred. -func (r *RTTStats) SmoothedRTT() time.Duration { return r.smoothedRTT } - -// SmoothedOrInitialRTT returns the EWMA smoothed RTT for the connection. -// If no valid updates have occurred, it returns the initial RTT. -func (r *RTTStats) SmoothedOrInitialRTT() time.Duration { - if r.smoothedRTT != 0 { - return r.smoothedRTT - } - return defaultInitialRTT -} - -// MeanDeviation gets the mean deviation -func (r *RTTStats) MeanDeviation() time.Duration { return r.meanDeviation } - -// UpdateRTT updates the RTT based on a new sample. -func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) { - if sendDelta == utils.InfDuration || sendDelta <= 0 { - return - } - - // Update r.minRTT first. r.minRTT does not use an rttSample corrected for - // ackDelay but the raw observed sendDelta, since poor clock granularity at - // the client may cause a high ackDelay to result in underestimation of the - // r.minRTT. - if r.minRTT == 0 || r.minRTT > sendDelta { - r.minRTT = sendDelta - } - - // Correct for ackDelay if information received from the peer results in a - // an RTT sample at least as large as minRTT. Otherwise, only use the - // sendDelta. - sample := sendDelta - if sample-r.minRTT >= ackDelay { - sample -= ackDelay - } - r.latestRTT = sample - // First time call. - if r.smoothedRTT == 0 { - r.smoothedRTT = sample - r.meanDeviation = sample / 2 - } else { - r.meanDeviation = time.Duration(oneMinusBeta*float32(r.meanDeviation/time.Microsecond)+rttBeta*float32(utils.AbsDuration(r.smoothedRTT-sample)/time.Microsecond)) * time.Microsecond - r.smoothedRTT = time.Duration((float32(r.smoothedRTT/time.Microsecond)*oneMinusAlpha)+(float32(sample/time.Microsecond)*rttAlpha)) * time.Microsecond - } -} - -// OnConnectionMigration is called when connection migrates and rtt measurement needs to be reset. -func (r *RTTStats) OnConnectionMigration() { - r.latestRTT = 0 - r.minRTT = 0 - r.smoothedRTT = 0 - r.meanDeviation = 0 -} - -// ExpireSmoothedMetrics causes the smoothed_rtt to be increased to the latest_rtt if the latest_rtt -// is larger. The mean deviation is increased to the most recent deviation if -// it's larger. -func (r *RTTStats) ExpireSmoothedMetrics() { - r.meanDeviation = utils.MaxDuration(r.meanDeviation, utils.AbsDuration(r.smoothedRTT-r.latestRTT)) - r.smoothedRTT = utils.MaxDuration(r.smoothedRTT, r.latestRTT) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/stats.go b/vendor/github.com/lucas-clemente/quic-go/internal/congestion/stats.go deleted file mode 100644 index ed669c146..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/congestion/stats.go +++ /dev/null @@ -1,8 +0,0 @@ -package congestion - -import "github.com/lucas-clemente/quic-go/internal/protocol" - -type connectionStats struct { - slowstartPacketsLost protocol.PacketNumber - slowstartBytesLost protocol.ByteCount -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go b/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go deleted file mode 100644 index 6a0aa3c58..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/base_flow_controller.go +++ /dev/null @@ -1,122 +0,0 @@ -package flowcontrol - -import ( - "sync" - "time" - - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -type baseFlowController struct { - // for sending data - bytesSent protocol.ByteCount - sendWindow protocol.ByteCount - lastBlockedAt protocol.ByteCount - - // for receiving data - mutex sync.RWMutex - bytesRead protocol.ByteCount - highestReceived protocol.ByteCount - receiveWindow protocol.ByteCount - receiveWindowSize protocol.ByteCount - maxReceiveWindowSize protocol.ByteCount - - epochStartTime time.Time - epochStartOffset protocol.ByteCount - rttStats *congestion.RTTStats - - logger utils.Logger -} - -// IsNewlyBlocked says if it is newly blocked by flow control. -// For every offset, it only returns true once. -// If it is blocked, the offset is returned. -func (c *baseFlowController) IsNewlyBlocked() (bool, protocol.ByteCount) { - if c.sendWindowSize() != 0 || c.sendWindow == c.lastBlockedAt { - return false, 0 - } - c.lastBlockedAt = c.sendWindow - return true, c.sendWindow -} - -func (c *baseFlowController) AddBytesSent(n protocol.ByteCount) { - c.bytesSent += n -} - -// UpdateSendWindow should be called after receiving a WindowUpdateFrame -// it returns true if the window was actually updated -func (c *baseFlowController) UpdateSendWindow(offset protocol.ByteCount) { - if offset > c.sendWindow { - c.sendWindow = offset - } -} - -func (c *baseFlowController) sendWindowSize() protocol.ByteCount { - // this only happens during connection establishment, when data is sent before we receive the peer's transport parameters - if c.bytesSent > c.sendWindow { - return 0 - } - return c.sendWindow - c.bytesSent -} - -func (c *baseFlowController) AddBytesRead(n protocol.ByteCount) { - c.mutex.Lock() - defer c.mutex.Unlock() - - // pretend we sent a WindowUpdate when reading the first byte - // this way auto-tuning of the window size already works for the first WindowUpdate - if c.bytesRead == 0 { - c.startNewAutoTuningEpoch() - } - c.bytesRead += n -} - -func (c *baseFlowController) hasWindowUpdate() bool { - bytesRemaining := c.receiveWindow - c.bytesRead - // update the window when more than the threshold was consumed - return bytesRemaining <= protocol.ByteCount((float64(c.receiveWindowSize) * float64((1 - protocol.WindowUpdateThreshold)))) -} - -// getWindowUpdate updates the receive window, if necessary -// it returns the new offset -func (c *baseFlowController) getWindowUpdate() protocol.ByteCount { - if !c.hasWindowUpdate() { - return 0 - } - - c.maybeAdjustWindowSize() - c.receiveWindow = c.bytesRead + c.receiveWindowSize - return c.receiveWindow -} - -// maybeAdjustWindowSize increases the receiveWindowSize if we're sending updates too often. -// For details about auto-tuning, see https://docs.google.com/document/d/1SExkMmGiz8VYzV3s9E35JQlJ73vhzCekKkDi85F1qCE/edit?usp=sharing. -func (c *baseFlowController) maybeAdjustWindowSize() { - bytesReadInEpoch := c.bytesRead - c.epochStartOffset - // don't do anything if less than half the window has been consumed - if bytesReadInEpoch <= c.receiveWindowSize/2 { - return - } - rtt := c.rttStats.SmoothedRTT() - if rtt == 0 { - return - } - - fraction := float64(bytesReadInEpoch) / float64(c.receiveWindowSize) - if time.Since(c.epochStartTime) < time.Duration(4*fraction*float64(rtt)) { - // window is consumed too fast, try to increase the window size - c.receiveWindowSize = utils.MinByteCount(2*c.receiveWindowSize, c.maxReceiveWindowSize) - } - c.startNewAutoTuningEpoch() -} - -func (c *baseFlowController) startNewAutoTuningEpoch() { - c.epochStartTime = time.Now() - c.epochStartOffset = c.bytesRead -} - -func (c *baseFlowController) checkFlowControlViolation() bool { - return c.highestReceived > c.receiveWindow -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go b/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go deleted file mode 100644 index 1393335b0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/connection_flow_controller.go +++ /dev/null @@ -1,92 +0,0 @@ -package flowcontrol - -import ( - "fmt" - - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -type connectionFlowController struct { - baseFlowController - - queueWindowUpdate func() -} - -var _ ConnectionFlowController = &connectionFlowController{} - -// NewConnectionFlowController gets a new flow controller for the connection -// It is created before we receive the peer's transport paramenters, thus it starts with a sendWindow of 0. -func NewConnectionFlowController( - receiveWindow protocol.ByteCount, - maxReceiveWindow protocol.ByteCount, - queueWindowUpdate func(), - rttStats *congestion.RTTStats, - logger utils.Logger, -) ConnectionFlowController { - return &connectionFlowController{ - baseFlowController: baseFlowController{ - rttStats: rttStats, - receiveWindow: receiveWindow, - receiveWindowSize: receiveWindow, - maxReceiveWindowSize: maxReceiveWindow, - logger: logger, - }, - queueWindowUpdate: queueWindowUpdate, - } -} - -func (c *connectionFlowController) SendWindowSize() protocol.ByteCount { - return c.baseFlowController.sendWindowSize() -} - -// IncrementHighestReceived adds an increment to the highestReceived value -func (c *connectionFlowController) IncrementHighestReceived(increment protocol.ByteCount) error { - c.mutex.Lock() - defer c.mutex.Unlock() - - c.highestReceived += increment - if c.checkFlowControlViolation() { - return qerr.Error(qerr.FlowControlError, fmt.Sprintf("Received %d bytes for the connection, allowed %d bytes", c.highestReceived, c.receiveWindow)) - } - return nil -} - -func (c *connectionFlowController) AddBytesRead(n protocol.ByteCount) { - c.baseFlowController.AddBytesRead(n) - c.maybeQueueWindowUpdate() -} - -func (c *connectionFlowController) maybeQueueWindowUpdate() { - c.mutex.Lock() - hasWindowUpdate := c.hasWindowUpdate() - c.mutex.Unlock() - if hasWindowUpdate { - c.queueWindowUpdate() - } -} - -func (c *connectionFlowController) GetWindowUpdate() protocol.ByteCount { - c.mutex.Lock() - oldWindowSize := c.receiveWindowSize - offset := c.baseFlowController.getWindowUpdate() - if oldWindowSize < c.receiveWindowSize { - c.logger.Debugf("Increasing receive flow control window for the connection to %d kB", c.receiveWindowSize/(1<<10)) - } - c.mutex.Unlock() - return offset -} - -// EnsureMinimumWindowSize sets a minimum window size -// it should make sure that the connection-level window is increased when a stream-level window grows -func (c *connectionFlowController) EnsureMinimumWindowSize(inc protocol.ByteCount) { - c.mutex.Lock() - if inc > c.receiveWindowSize { - c.logger.Debugf("Increasing receive flow control window for the connection to %d kB, in response to stream flow control window increase", c.receiveWindowSize/(1<<10)) - c.receiveWindowSize = utils.MinByteCount(inc, c.maxReceiveWindowSize) - c.startNewAutoTuningEpoch() - } - c.mutex.Unlock() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go b/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go deleted file mode 100644 index a47e7cd76..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/interface.go +++ /dev/null @@ -1,41 +0,0 @@ -package flowcontrol - -import "github.com/lucas-clemente/quic-go/internal/protocol" - -type flowController interface { - // for sending - SendWindowSize() protocol.ByteCount - UpdateSendWindow(protocol.ByteCount) - AddBytesSent(protocol.ByteCount) - // for receiving - AddBytesRead(protocol.ByteCount) - GetWindowUpdate() protocol.ByteCount // returns 0 if no update is necessary - IsNewlyBlocked() (bool, protocol.ByteCount) -} - -// A StreamFlowController is a flow controller for a QUIC stream. -type StreamFlowController interface { - flowController - // for receiving - // UpdateHighestReceived should be called when a new highest offset is received - // final has to be to true if this is the final offset of the stream, - // as contained in a STREAM frame with FIN bit, and the RESET_STREAM frame - UpdateHighestReceived(offset protocol.ByteCount, final bool) error - // Abandon should be called when reading from the stream is aborted early, - // and there won't be any further calls to AddBytesRead. - Abandon() -} - -// The ConnectionFlowController is the flow controller for the connection. -type ConnectionFlowController interface { - flowController -} - -type connectionFlowControllerI interface { - ConnectionFlowController - // The following two methods are not supposed to be called from outside this packet, but are needed internally - // for sending - EnsureMinimumWindowSize(protocol.ByteCount) - // for receiving - IncrementHighestReceived(protocol.ByteCount) error -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go b/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go deleted file mode 100644 index bb22337df..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/flowcontrol/stream_flow_controller.go +++ /dev/null @@ -1,139 +0,0 @@ -package flowcontrol - -import ( - "fmt" - - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -type streamFlowController struct { - baseFlowController - - streamID protocol.StreamID - - queueWindowUpdate func() - - connection connectionFlowControllerI - - receivedFinalOffset bool -} - -var _ StreamFlowController = &streamFlowController{} - -// NewStreamFlowController gets a new flow controller for a stream -func NewStreamFlowController( - streamID protocol.StreamID, - cfc ConnectionFlowController, - receiveWindow protocol.ByteCount, - maxReceiveWindow protocol.ByteCount, - initialSendWindow protocol.ByteCount, - queueWindowUpdate func(protocol.StreamID), - rttStats *congestion.RTTStats, - logger utils.Logger, -) StreamFlowController { - return &streamFlowController{ - streamID: streamID, - connection: cfc.(connectionFlowControllerI), - queueWindowUpdate: func() { queueWindowUpdate(streamID) }, - baseFlowController: baseFlowController{ - rttStats: rttStats, - receiveWindow: receiveWindow, - receiveWindowSize: receiveWindow, - maxReceiveWindowSize: maxReceiveWindow, - sendWindow: initialSendWindow, - logger: logger, - }, - } -} - -// UpdateHighestReceived updates the highestReceived value, if the offset is higher. -func (c *streamFlowController) UpdateHighestReceived(offset protocol.ByteCount, final bool) error { - c.mutex.Lock() - defer c.mutex.Unlock() - - // If the final offset for this stream is already known, check for consistency. - if c.receivedFinalOffset { - // If we receive another final offset, check that it's the same. - if final && offset != c.highestReceived { - return qerr.Error(qerr.FinalSizeError, fmt.Sprintf("Received inconsistent final offset for stream %d (old: %#x, new: %#x bytes)", c.streamID, c.highestReceived, offset)) - } - // Check that the offset is below the final offset. - if offset > c.highestReceived { - return qerr.Error(qerr.FinalSizeError, fmt.Sprintf("Received offset %#x for stream %d. Final offset was already received at %#x", offset, c.streamID, c.highestReceived)) - } - } - - if final { - c.receivedFinalOffset = true - } - if offset == c.highestReceived { - return nil - } - // A higher offset was received before. - // This can happen due to reordering. - if offset <= c.highestReceived { - if final { - return qerr.Error(qerr.FinalSizeError, fmt.Sprintf("Received final offset %#x for stream %d, but already received offset %#x before", offset, c.streamID, c.highestReceived)) - } - return nil - } - - increment := offset - c.highestReceived - c.highestReceived = offset - if c.checkFlowControlViolation() { - return qerr.Error(qerr.FlowControlError, fmt.Sprintf("Received %#x bytes on stream %d, allowed %#x bytes", offset, c.streamID, c.receiveWindow)) - } - return c.connection.IncrementHighestReceived(increment) -} - -func (c *streamFlowController) AddBytesRead(n protocol.ByteCount) { - c.baseFlowController.AddBytesRead(n) - c.maybeQueueWindowUpdate() - c.connection.AddBytesRead(n) -} - -func (c *streamFlowController) Abandon() { - if unread := c.highestReceived - c.bytesRead; unread > 0 { - c.connection.AddBytesRead(unread) - } -} - -func (c *streamFlowController) AddBytesSent(n protocol.ByteCount) { - c.baseFlowController.AddBytesSent(n) - c.connection.AddBytesSent(n) -} - -func (c *streamFlowController) SendWindowSize() protocol.ByteCount { - return utils.MinByteCount(c.baseFlowController.sendWindowSize(), c.connection.SendWindowSize()) -} - -func (c *streamFlowController) maybeQueueWindowUpdate() { - c.mutex.Lock() - hasWindowUpdate := !c.receivedFinalOffset && c.hasWindowUpdate() - c.mutex.Unlock() - if hasWindowUpdate { - c.queueWindowUpdate() - } -} - -func (c *streamFlowController) GetWindowUpdate() protocol.ByteCount { - // don't use defer for unlocking the mutex here, GetWindowUpdate() is called frequently and defer shows up in the profiler - c.mutex.Lock() - // if we already received the final offset for this stream, the peer won't need any additional flow control credit - if c.receivedFinalOffset { - c.mutex.Unlock() - return 0 - } - - oldWindowSize := c.receiveWindowSize - offset := c.baseFlowController.getWindowUpdate() - if c.receiveWindowSize > oldWindowSize { // auto-tuning enlarged the window size - c.logger.Debugf("Increasing receive flow control window for stream %d to %d kB", c.streamID, c.receiveWindowSize/(1<<10)) - c.connection.EnsureMinimumWindowSize(protocol.ByteCount(float64(c.receiveWindowSize) * protocol.ConnectionFlowControlMultiplier)) - } - c.mutex.Unlock() - return offset -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go deleted file mode 100644 index 07ce74f74..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/aead.go +++ /dev/null @@ -1,104 +0,0 @@ -package handshake - -import ( - "crypto/cipher" - "encoding/binary" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -type sealer struct { - aead cipher.AEAD - hpEncrypter cipher.Block - - // use a single slice to avoid allocations - nonceBuf []byte - hpMask []byte - - // short headers protect 5 bits in the first byte, long headers only 4 - is1RTT bool -} - -var _ Sealer = &sealer{} - -func newSealer(aead cipher.AEAD, hpEncrypter cipher.Block, is1RTT bool) Sealer { - return &sealer{ - aead: aead, - nonceBuf: make([]byte, aead.NonceSize()), - is1RTT: is1RTT, - hpEncrypter: hpEncrypter, - hpMask: make([]byte, hpEncrypter.BlockSize()), - } -} - -func (s *sealer) Seal(dst, src []byte, pn protocol.PacketNumber, ad []byte) []byte { - binary.BigEndian.PutUint64(s.nonceBuf[len(s.nonceBuf)-8:], uint64(pn)) - // The AEAD we're using here will be the qtls.aeadAESGCM13. - // It uses the nonce provided here and XOR it with the IV. - return s.aead.Seal(dst, s.nonceBuf, src, ad) -} - -func (s *sealer) EncryptHeader(sample []byte, firstByte *byte, pnBytes []byte) { - if len(sample) != s.hpEncrypter.BlockSize() { - panic("invalid sample size") - } - s.hpEncrypter.Encrypt(s.hpMask, sample) - if s.is1RTT { - *firstByte ^= s.hpMask[0] & 0x1f - } else { - *firstByte ^= s.hpMask[0] & 0xf - } - for i := range pnBytes { - pnBytes[i] ^= s.hpMask[i+1] - } -} - -func (s *sealer) Overhead() int { - return s.aead.Overhead() -} - -type opener struct { - aead cipher.AEAD - pnDecrypter cipher.Block - - // use a single slice to avoid allocations - nonceBuf []byte - hpMask []byte - - // short headers protect 5 bits in the first byte, long headers only 4 - is1RTT bool -} - -var _ Opener = &opener{} - -func newOpener(aead cipher.AEAD, pnDecrypter cipher.Block, is1RTT bool) Opener { - return &opener{ - aead: aead, - nonceBuf: make([]byte, aead.NonceSize()), - is1RTT: is1RTT, - pnDecrypter: pnDecrypter, - hpMask: make([]byte, pnDecrypter.BlockSize()), - } -} - -func (o *opener) Open(dst, src []byte, pn protocol.PacketNumber, ad []byte) ([]byte, error) { - binary.BigEndian.PutUint64(o.nonceBuf[len(o.nonceBuf)-8:], uint64(pn)) - // The AEAD we're using here will be the qtls.aeadAESGCM13. - // It uses the nonce provided here and XOR it with the IV. - return o.aead.Open(dst, o.nonceBuf, src, ad) -} - -func (o *opener) DecryptHeader(sample []byte, firstByte *byte, pnBytes []byte) { - if len(sample) != o.pnDecrypter.BlockSize() { - panic("invalid sample size") - } - o.pnDecrypter.Encrypt(o.hpMask, sample) - if o.is1RTT { - *firstByte ^= o.hpMask[0] & 0x1f - } else { - *firstByte ^= o.hpMask[0] & 0xf - } - for i := range pnBytes { - pnBytes[i] ^= o.hpMask[i+1] - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_generator.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_generator.go deleted file mode 100644 index 6d1288ed6..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_generator.go +++ /dev/null @@ -1,109 +0,0 @@ -package handshake - -import ( - "encoding/asn1" - "fmt" - "net" - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -const ( - cookiePrefixIP byte = iota - cookiePrefixString -) - -// A Cookie is derived from the client address and can be used to verify the ownership of this address. -type Cookie struct { - RemoteAddr string - OriginalDestConnectionID protocol.ConnectionID - // The time that the Cookie was issued (resolution 1 second) - SentTime time.Time -} - -// token is the struct that is used for ASN1 serialization and deserialization -type token struct { - RemoteAddr []byte - OriginalDestConnectionID []byte - - Timestamp int64 -} - -// A CookieGenerator generates Cookies -type CookieGenerator struct { - cookieProtector cookieProtector -} - -// NewCookieGenerator initializes a new CookieGenerator -func NewCookieGenerator() (*CookieGenerator, error) { - cookieProtector, err := newCookieProtector() - if err != nil { - return nil, err - } - return &CookieGenerator{ - cookieProtector: cookieProtector, - }, nil -} - -// NewToken generates a new Cookie for a given source address -func (g *CookieGenerator) NewToken(raddr net.Addr, origConnID protocol.ConnectionID) ([]byte, error) { - data, err := asn1.Marshal(token{ - RemoteAddr: encodeRemoteAddr(raddr), - OriginalDestConnectionID: origConnID, - Timestamp: time.Now().Unix(), - }) - if err != nil { - return nil, err - } - return g.cookieProtector.NewToken(data) -} - -// DecodeToken decodes a Cookie -func (g *CookieGenerator) DecodeToken(encrypted []byte) (*Cookie, error) { - // if the client didn't send any Cookie, DecodeToken will be called with a nil-slice - if len(encrypted) == 0 { - return nil, nil - } - - data, err := g.cookieProtector.DecodeToken(encrypted) - if err != nil { - return nil, err - } - t := &token{} - rest, err := asn1.Unmarshal(data, t) - if err != nil { - return nil, err - } - if len(rest) != 0 { - return nil, fmt.Errorf("rest when unpacking token: %d", len(rest)) - } - cookie := &Cookie{ - RemoteAddr: decodeRemoteAddr(t.RemoteAddr), - SentTime: time.Unix(t.Timestamp, 0), - } - if len(t.OriginalDestConnectionID) > 0 { - cookie.OriginalDestConnectionID = protocol.ConnectionID(t.OriginalDestConnectionID) - } - return cookie, nil -} - -// encodeRemoteAddr encodes a remote address such that it can be saved in the Cookie -func encodeRemoteAddr(remoteAddr net.Addr) []byte { - if udpAddr, ok := remoteAddr.(*net.UDPAddr); ok { - return append([]byte{cookiePrefixIP}, udpAddr.IP...) - } - return append([]byte{cookiePrefixString}, []byte(remoteAddr.String())...) -} - -// decodeRemoteAddr decodes the remote address saved in the Cookie -func decodeRemoteAddr(data []byte) string { - // data will never be empty for a Cookie that we generated. Check it to be on the safe side - if len(data) == 0 { - return "" - } - if data[0] == cookiePrefixIP { - return net.IP(data[1:]).String() - } - return string(data[1:]) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_protector.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_protector.go deleted file mode 100644 index 7ebdfa18c..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/cookie_protector.go +++ /dev/null @@ -1,86 +0,0 @@ -package handshake - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "crypto/sha256" - "fmt" - "io" - - "golang.org/x/crypto/hkdf" -) - -// CookieProtector is used to create and verify a cookie -type cookieProtector interface { - // NewToken creates a new token - NewToken([]byte) ([]byte, error) - // DecodeToken decodes a token - DecodeToken([]byte) ([]byte, error) -} - -const ( - cookieSecretSize = 32 - cookieNonceSize = 32 -) - -// cookieProtector is used to create and verify a cookie -type cookieProtectorImpl struct { - secret []byte -} - -// newCookieProtector creates a source for source address tokens -func newCookieProtector() (cookieProtector, error) { - secret := make([]byte, cookieSecretSize) - if _, err := rand.Read(secret); err != nil { - return nil, err - } - return &cookieProtectorImpl{secret: secret}, nil -} - -// NewToken encodes data into a new token. -func (s *cookieProtectorImpl) NewToken(data []byte) ([]byte, error) { - nonce := make([]byte, cookieNonceSize) - if _, err := rand.Read(nonce); err != nil { - return nil, err - } - aead, aeadNonce, err := s.createAEAD(nonce) - if err != nil { - return nil, err - } - return append(nonce, aead.Seal(nil, aeadNonce, data, nil)...), nil -} - -// DecodeToken decodes a token. -func (s *cookieProtectorImpl) DecodeToken(p []byte) ([]byte, error) { - if len(p) < cookieNonceSize { - return nil, fmt.Errorf("Token too short: %d", len(p)) - } - nonce := p[:cookieNonceSize] - aead, aeadNonce, err := s.createAEAD(nonce) - if err != nil { - return nil, err - } - return aead.Open(nil, aeadNonce, p[cookieNonceSize:], nil) -} - -func (s *cookieProtectorImpl) createAEAD(nonce []byte) (cipher.AEAD, []byte, error) { - h := hkdf.New(sha256.New, s.secret, nonce, []byte("quic-go cookie source")) - key := make([]byte, 32) // use a 32 byte key, in order to select AES-256 - if _, err := io.ReadFull(h, key); err != nil { - return nil, nil, err - } - aeadNonce := make([]byte, 12) - if _, err := io.ReadFull(h, aeadNonce); err != nil { - return nil, nil, err - } - c, err := aes.NewCipher(key) - if err != nil { - return nil, nil, err - } - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, nil, err - } - return aead, aeadNonce, nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go deleted file mode 100644 index de6ddc684..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/crypto_setup.go +++ /dev/null @@ -1,581 +0,0 @@ -package handshake - -import ( - "crypto/aes" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "sync" - "unsafe" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/marten-seemann/qtls" -) - -type messageType uint8 - -// TLS handshake message types. -const ( - typeClientHello messageType = 1 - typeServerHello messageType = 2 - typeNewSessionTicket messageType = 4 - typeEncryptedExtensions messageType = 8 - typeCertificate messageType = 11 - typeCertificateRequest messageType = 13 - typeCertificateVerify messageType = 15 - typeFinished messageType = 20 -) - -func (m messageType) String() string { - switch m { - case typeClientHello: - return "ClientHello" - case typeServerHello: - return "ServerHello" - case typeNewSessionTicket: - return "NewSessionTicket" - case typeEncryptedExtensions: - return "EncryptedExtensions" - case typeCertificate: - return "Certificate" - case typeCertificateRequest: - return "CertificateRequest" - case typeCertificateVerify: - return "CertificateVerify" - case typeFinished: - return "Finished" - default: - return fmt.Sprintf("unknown message type: %d", m) - } -} - -// ErrOpenerNotYetAvailable is returned when an opener is requested for an encryption level, -// but the corresponding opener has not yet been initialized -// This can happen when packets arrive out of order. -var ErrOpenerNotYetAvailable = errors.New("CryptoSetup: opener at this encryption level not yet available") - -type cryptoSetup struct { - tlsConf *qtls.Config - conn *qtls.Conn - - messageChan chan []byte - - paramsChan <-chan []byte - handleParamsCallback func([]byte) - - alertChan chan uint8 - // HandleData() sends errors on the messageErrChan - messageErrChan chan error - // handshakeDone is closed as soon as the go routine running qtls.Handshake() returns - handshakeDone chan struct{} - // is closed when Close() is called - closeChan chan struct{} - - clientHelloWritten bool - clientHelloWrittenChan chan struct{} - - receivedWriteKey chan struct{} - receivedReadKey chan struct{} - // WriteRecord does a non-blocking send on this channel. - // This way, handleMessage can see if qtls tries to write a message. - // This is necessary: - // for servers: to see if a HelloRetryRequest should be sent in response to a ClientHello - // for clients: to see if a ServerHello is a HelloRetryRequest - writeRecord chan struct{} - - logger utils.Logger - - perspective protocol.Perspective - - mutex sync.Mutex // protects all members below - - readEncLevel protocol.EncryptionLevel - writeEncLevel protocol.EncryptionLevel - - initialStream io.Writer - initialOpener Opener - initialSealer Sealer - - handshakeStream io.Writer - handshakeOpener Opener - handshakeSealer Sealer - - oneRTTStream io.Writer - opener Opener - sealer Sealer -} - -var _ qtls.RecordLayer = &cryptoSetup{} -var _ CryptoSetup = &cryptoSetup{} - -// NewCryptoSetupClient creates a new crypto setup for the client -func NewCryptoSetupClient( - initialStream io.Writer, - handshakeStream io.Writer, - oneRTTStream io.Writer, - connID protocol.ConnectionID, - remoteAddr net.Addr, - tp *TransportParameters, - handleParams func([]byte), - tlsConf *tls.Config, - logger utils.Logger, -) (CryptoSetup, <-chan struct{} /* ClientHello written */, error) { - cs, clientHelloWritten, err := newCryptoSetup( - initialStream, - handshakeStream, - oneRTTStream, - connID, - tp, - handleParams, - tlsConf, - logger, - protocol.PerspectiveClient, - ) - if err != nil { - return nil, nil, err - } - cs.conn = qtls.Client(newConn(remoteAddr), cs.tlsConf) - return cs, clientHelloWritten, nil -} - -// NewCryptoSetupServer creates a new crypto setup for the server -func NewCryptoSetupServer( - initialStream io.Writer, - handshakeStream io.Writer, - oneRTTStream io.Writer, - connID protocol.ConnectionID, - remoteAddr net.Addr, - tp *TransportParameters, - handleParams func([]byte), - tlsConf *tls.Config, - logger utils.Logger, -) (CryptoSetup, error) { - cs, _, err := newCryptoSetup( - initialStream, - handshakeStream, - oneRTTStream, - connID, - tp, - handleParams, - tlsConf, - logger, - protocol.PerspectiveServer, - ) - if err != nil { - return nil, err - } - cs.conn = qtls.Server(newConn(remoteAddr), cs.tlsConf) - return cs, nil -} - -func newCryptoSetup( - initialStream io.Writer, - handshakeStream io.Writer, - oneRTTStream io.Writer, - connID protocol.ConnectionID, - tp *TransportParameters, - handleParams func([]byte), - tlsConf *tls.Config, - logger utils.Logger, - perspective protocol.Perspective, -) (*cryptoSetup, <-chan struct{} /* ClientHello written */, error) { - initialSealer, initialOpener, err := NewInitialAEAD(connID, perspective) - if err != nil { - return nil, nil, err - } - extHandler := newExtensionHandler(tp.Marshal(), perspective) - cs := &cryptoSetup{ - initialStream: initialStream, - initialSealer: initialSealer, - initialOpener: initialOpener, - handshakeStream: handshakeStream, - oneRTTStream: oneRTTStream, - readEncLevel: protocol.EncryptionInitial, - writeEncLevel: protocol.EncryptionInitial, - handleParamsCallback: handleParams, - paramsChan: extHandler.TransportParameters(), - logger: logger, - perspective: perspective, - handshakeDone: make(chan struct{}), - alertChan: make(chan uint8), - messageErrChan: make(chan error, 1), - clientHelloWrittenChan: make(chan struct{}), - messageChan: make(chan []byte, 100), - receivedReadKey: make(chan struct{}), - receivedWriteKey: make(chan struct{}), - writeRecord: make(chan struct{}), - closeChan: make(chan struct{}), - } - qtlsConf := tlsConfigToQtlsConfig(tlsConf, cs, extHandler) - cs.tlsConf = qtlsConf - return cs, cs.clientHelloWrittenChan, nil -} - -func (h *cryptoSetup) ChangeConnectionID(id protocol.ConnectionID) error { - initialSealer, initialOpener, err := NewInitialAEAD(id, h.perspective) - if err != nil { - return err - } - h.initialSealer = initialSealer - h.initialOpener = initialOpener - return nil -} - -func (h *cryptoSetup) RunHandshake() error { - // Handle errors that might occur when HandleData() is called. - handshakeComplete := make(chan struct{}) - handshakeErrChan := make(chan error, 1) - go func() { - defer close(h.handshakeDone) - if err := h.conn.Handshake(); err != nil { - handshakeErrChan <- err - return - } - close(handshakeComplete) - }() - - select { - case <-h.closeChan: - close(h.messageChan) - // wait until the Handshake() go routine has returned - return errors.New("Handshake aborted") - case <-handshakeComplete: // return when the handshake is done - return nil - case alert := <-h.alertChan: - err := <-handshakeErrChan - return qerr.CryptoError(alert, err.Error()) - case err := <-h.messageErrChan: - // If the handshake errored because of an error that occurred during HandleData(), - // that error message will be more useful than the error message generated by Handshake(). - // Close the message chan that qtls is receiving messages from. - // This will make qtls.Handshake() return. - // Thereby the go routine running qtls.Handshake() will return. - close(h.messageChan) - return err - } -} - -func (h *cryptoSetup) Close() error { - close(h.closeChan) - // wait until qtls.Handshake() actually returned - <-h.handshakeDone - return nil -} - -// handleMessage handles a TLS handshake message. -// It is called by the crypto streams when a new message is available. -// It returns if it is done with messages on the same encryption level. -func (h *cryptoSetup) HandleMessage(data []byte, encLevel protocol.EncryptionLevel) bool /* stream finished */ { - msgType := messageType(data[0]) - h.logger.Debugf("Received %s message (%d bytes, encryption level: %s)", msgType, len(data), encLevel) - if err := h.checkEncryptionLevel(msgType, encLevel); err != nil { - h.messageErrChan <- err - return false - } - h.messageChan <- data - switch h.perspective { - case protocol.PerspectiveClient: - return h.handleMessageForClient(msgType) - case protocol.PerspectiveServer: - return h.handleMessageForServer(msgType) - default: - panic("") - } -} - -func (h *cryptoSetup) checkEncryptionLevel(msgType messageType, encLevel protocol.EncryptionLevel) error { - var expected protocol.EncryptionLevel - switch msgType { - case typeClientHello, - typeServerHello: - expected = protocol.EncryptionInitial - case typeEncryptedExtensions, - typeCertificate, - typeCertificateRequest, - typeCertificateVerify, - typeFinished: - expected = protocol.EncryptionHandshake - case typeNewSessionTicket: - expected = protocol.Encryption1RTT - default: - return fmt.Errorf("unexpected handshake message: %d", msgType) - } - if encLevel != expected { - return fmt.Errorf("expected handshake message %s to have encryption level %s, has %s", msgType, expected, encLevel) - } - return nil -} - -func (h *cryptoSetup) handleMessageForServer(msgType messageType) bool { - switch msgType { - case typeClientHello: - select { - case <-h.writeRecord: - // If qtls sends a HelloRetryRequest, it will only write the record. - // If it accepts the ClientHello, it will first read the transport parameters. - h.logger.Debugf("Sending HelloRetryRequest") - return false - case data := <-h.paramsChan: - h.handleParamsCallback(data) - case <-h.handshakeDone: - return false - } - // get the handshake read key - select { - case <-h.receivedReadKey: - case <-h.handshakeDone: - return false - } - // get the handshake write key - select { - case <-h.receivedWriteKey: - case <-h.handshakeDone: - return false - } - // get the 1-RTT write key - select { - case <-h.receivedWriteKey: - case <-h.handshakeDone: - return false - } - return true - case typeCertificate, typeCertificateVerify: - // nothing to do - return false - case typeFinished: - // get the 1-RTT read key - select { - case <-h.receivedReadKey: - case <-h.handshakeDone: - return false - } - return true - default: - panic("unexpected handshake message") - } -} - -func (h *cryptoSetup) handleMessageForClient(msgType messageType) bool { - switch msgType { - case typeServerHello: - // get the handshake write key - select { - case <-h.writeRecord: - // If qtls writes in response to a ServerHello, this means that this ServerHello - // is a HelloRetryRequest. - // Otherwise, we'd just wait for the Certificate message. - h.logger.Debugf("ServerHello is a HelloRetryRequest") - return false - case <-h.receivedWriteKey: - case <-h.handshakeDone: - return false - } - // get the handshake read key - select { - case <-h.receivedReadKey: - case <-h.handshakeDone: - return false - } - return true - case typeEncryptedExtensions: - select { - case data := <-h.paramsChan: - h.handleParamsCallback(data) - case <-h.handshakeDone: - return false - } - return false - case typeCertificateRequest, typeCertificate, typeCertificateVerify: - // nothing to do - return false - case typeFinished: - // get the 1-RTT read key - select { - case <-h.receivedReadKey: - case <-h.handshakeDone: - return false - } - // get the handshake write key - select { - case <-h.receivedWriteKey: - case <-h.handshakeDone: - return false - } - return true - case typeNewSessionTicket: - <-h.handshakeDone // don't process session tickets before the handshake has completed - h.conn.HandlePostHandshakeMessage() - return false - default: - panic("unexpected handshake message: ") - } -} - -// ReadHandshakeMessage is called by TLS. -// It blocks until a new handshake message is available. -func (h *cryptoSetup) ReadHandshakeMessage() ([]byte, error) { - msg, ok := <-h.messageChan - if !ok { - return nil, errors.New("error while handling the handshake message") - } - return msg, nil -} - -func (h *cryptoSetup) SetReadKey(suite *qtls.CipherSuite, trafficSecret []byte) { - key := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic key", suite.KeyLen()) - iv := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic iv", suite.IVLen()) - hpKey := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic hp", suite.KeyLen()) - hpDecrypter, err := aes.NewCipher(hpKey) - if err != nil { - panic(fmt.Sprintf("error creating new AES cipher: %s", err)) - } - - h.mutex.Lock() - switch h.readEncLevel { - case protocol.EncryptionInitial: - h.readEncLevel = protocol.EncryptionHandshake - h.handshakeOpener = newOpener(suite.AEAD(key, iv), hpDecrypter, false) - h.logger.Debugf("Installed Handshake Read keys") - case protocol.EncryptionHandshake: - h.readEncLevel = protocol.Encryption1RTT - h.opener = newOpener(suite.AEAD(key, iv), hpDecrypter, true) - h.logger.Debugf("Installed 1-RTT Read keys") - default: - panic("unexpected read encryption level") - } - h.mutex.Unlock() - h.receivedReadKey <- struct{}{} -} - -func (h *cryptoSetup) SetWriteKey(suite *qtls.CipherSuite, trafficSecret []byte) { - key := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic key", suite.KeyLen()) - iv := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic iv", suite.IVLen()) - hpKey := qtls.HkdfExpandLabel(suite.Hash(), trafficSecret, []byte{}, "quic hp", suite.KeyLen()) - hpEncrypter, err := aes.NewCipher(hpKey) - if err != nil { - panic(fmt.Sprintf("error creating new AES cipher: %s", err)) - } - - h.mutex.Lock() - switch h.writeEncLevel { - case protocol.EncryptionInitial: - h.writeEncLevel = protocol.EncryptionHandshake - h.handshakeSealer = newSealer(suite.AEAD(key, iv), hpEncrypter, false) - h.logger.Debugf("Installed Handshake Write keys") - case protocol.EncryptionHandshake: - h.writeEncLevel = protocol.Encryption1RTT - h.sealer = newSealer(suite.AEAD(key, iv), hpEncrypter, true) - h.logger.Debugf("Installed 1-RTT Write keys") - default: - panic("unexpected write encryption level") - } - h.mutex.Unlock() - h.receivedWriteKey <- struct{}{} -} - -// WriteRecord is called when TLS writes data -func (h *cryptoSetup) WriteRecord(p []byte) (int, error) { - defer func() { - select { - case h.writeRecord <- struct{}{}: - default: - } - }() - - h.mutex.Lock() - defer h.mutex.Unlock() - - switch h.writeEncLevel { - case protocol.EncryptionInitial: - // assume that the first WriteRecord call contains the ClientHello - n, err := h.initialStream.Write(p) - if !h.clientHelloWritten && h.perspective == protocol.PerspectiveClient { - h.clientHelloWritten = true - close(h.clientHelloWrittenChan) - } - return n, err - case protocol.EncryptionHandshake: - return h.handshakeStream.Write(p) - case protocol.Encryption1RTT: - return h.oneRTTStream.Write(p) - default: - panic(fmt.Sprintf("unexpected write encryption level: %s", h.writeEncLevel)) - } -} - -func (h *cryptoSetup) SendAlert(alert uint8) { - h.alertChan <- alert -} - -func (h *cryptoSetup) GetSealer() (protocol.EncryptionLevel, Sealer) { - h.mutex.Lock() - defer h.mutex.Unlock() - - if h.sealer != nil { - return protocol.Encryption1RTT, h.sealer - } - if h.handshakeSealer != nil { - return protocol.EncryptionHandshake, h.handshakeSealer - } - return protocol.EncryptionInitial, h.initialSealer -} - -func (h *cryptoSetup) GetSealerWithEncryptionLevel(level protocol.EncryptionLevel) (Sealer, error) { - errNoSealer := fmt.Errorf("CryptoSetup: no sealer with encryption level %s", level.String()) - - h.mutex.Lock() - defer h.mutex.Unlock() - - switch level { - case protocol.EncryptionInitial: - return h.initialSealer, nil - case protocol.EncryptionHandshake: - if h.handshakeSealer == nil { - return nil, errNoSealer - } - return h.handshakeSealer, nil - case protocol.Encryption1RTT: - if h.sealer == nil { - return nil, errNoSealer - } - return h.sealer, nil - default: - return nil, errNoSealer - } -} - -func (h *cryptoSetup) GetOpener(level protocol.EncryptionLevel) (Opener, error) { - h.mutex.Lock() - defer h.mutex.Unlock() - - switch level { - case protocol.EncryptionInitial: - return h.initialOpener, nil - case protocol.EncryptionHandshake: - if h.handshakeOpener == nil { - return nil, ErrOpenerNotYetAvailable - } - return h.handshakeOpener, nil - case protocol.Encryption1RTT: - if h.opener == nil { - return nil, ErrOpenerNotYetAvailable - } - return h.opener, nil - default: - return nil, fmt.Errorf("CryptoSetup: no opener with encryption level %s", level) - } -} - -func (h *cryptoSetup) ConnectionState() tls.ConnectionState { - cs := h.conn.ConnectionState() - // h.conn is a qtls.Conn, which returns a qtls.ConnectionState. - // qtls.ConnectionState is identical to the tls.ConnectionState. - // It contains an unexported field which is used ExportKeyingMaterial(). - // The only way to return a tls.ConnectionState is to use unsafe. - // In unsafe.go we check that the two objects are actually identical. - return *(*tls.ConnectionState)(unsafe.Pointer(&cs)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go deleted file mode 100644 index 5d78bbe2c..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/initial_aead.go +++ /dev/null @@ -1,52 +0,0 @@ -package handshake - -import ( - "crypto" - "crypto/aes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/marten-seemann/qtls" -) - -var quicVersion1Salt = []byte{0xef, 0x4f, 0xb0, 0xab, 0xb4, 0x74, 0x70, 0xc4, 0x1b, 0xef, 0xcf, 0x80, 0x31, 0x33, 0x4f, 0xae, 0x48, 0x5e, 0x09, 0xa0} - -// NewInitialAEAD creates a new AEAD for Initial encryption / decryption. -func NewInitialAEAD(connID protocol.ConnectionID, pers protocol.Perspective) (Sealer, Opener, error) { - clientSecret, serverSecret := computeSecrets(connID) - var mySecret, otherSecret []byte - if pers == protocol.PerspectiveClient { - mySecret = clientSecret - otherSecret = serverSecret - } else { - mySecret = serverSecret - otherSecret = clientSecret - } - myKey, myHPKey, myIV := computeInitialKeyAndIV(mySecret) - otherKey, otherHPKey, otherIV := computeInitialKeyAndIV(otherSecret) - - encrypter := qtls.AEADAESGCMTLS13(myKey, myIV) - hpEncrypter, err := aes.NewCipher(myHPKey) - if err != nil { - return nil, nil, err - } - decrypter := qtls.AEADAESGCMTLS13(otherKey, otherIV) - hpDecrypter, err := aes.NewCipher(otherHPKey) - if err != nil { - return nil, nil, err - } - return newSealer(encrypter, hpEncrypter, false), newOpener(decrypter, hpDecrypter, false), nil -} - -func computeSecrets(connID protocol.ConnectionID) (clientSecret, serverSecret []byte) { - initialSecret := qtls.HkdfExtract(crypto.SHA256, connID, quicVersion1Salt) - clientSecret = qtls.HkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "client in", crypto.SHA256.Size()) - serverSecret = qtls.HkdfExpandLabel(crypto.SHA256, initialSecret, []byte{}, "server in", crypto.SHA256.Size()) - return -} - -func computeInitialKeyAndIV(secret []byte) (key, hpKey, iv []byte) { - key = qtls.HkdfExpandLabel(crypto.SHA256, secret, []byte{}, "quic key", 16) - hpKey = qtls.HkdfExpandLabel(crypto.SHA256, secret, []byte{}, "quic hp", 16) - iv = qtls.HkdfExpandLabel(crypto.SHA256, secret, []byte{}, "quic iv", 12) - return -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go deleted file mode 100644 index 225225138..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/interface.go +++ /dev/null @@ -1,52 +0,0 @@ -package handshake - -import ( - "crypto/tls" - "crypto/x509" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/marten-seemann/qtls" -) - -// Opener opens a packet -type Opener interface { - Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, error) - DecryptHeader(sample []byte, firstByte *byte, pnBytes []byte) -} - -// Sealer seals a packet -type Sealer interface { - Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) []byte - EncryptHeader(sample []byte, firstByte *byte, pnBytes []byte) - Overhead() int -} - -// A tlsExtensionHandler sends and received the QUIC TLS extension. -type tlsExtensionHandler interface { - GetExtensions(msgType uint8) []qtls.Extension - ReceivedExtensions(msgType uint8, exts []qtls.Extension) - TransportParameters() <-chan []byte -} - -// CryptoSetup handles the handshake and protecting / unprotecting packets -type CryptoSetup interface { - RunHandshake() error - io.Closer - ChangeConnectionID(protocol.ConnectionID) error - - HandleMessage([]byte, protocol.EncryptionLevel) bool - ConnectionState() tls.ConnectionState - - GetSealer() (protocol.EncryptionLevel, Sealer) - GetSealerWithEncryptionLevel(protocol.EncryptionLevel) (Sealer, error) - GetOpener(protocol.EncryptionLevel) (Opener, error) -} - -// ConnectionState records basic details about the QUIC connection. -// Warning: This API should not be considered stable and might change soon. -type ConnectionState struct { - HandshakeComplete bool // handshake is complete - ServerName string // server name requested by client, if any (server side only) - PeerCertificates []*x509.Certificate // certificate chain presented by remote peer -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/qtls.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/qtls.go deleted file mode 100644 index 8880d9cb9..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/qtls.go +++ /dev/null @@ -1,132 +0,0 @@ -package handshake - -import ( - "crypto/tls" - "net" - "time" - "unsafe" - - "github.com/marten-seemann/qtls" -) - -type conn struct { - remoteAddr net.Addr -} - -func newConn(remote net.Addr) net.Conn { - return &conn{remoteAddr: remote} -} - -var _ net.Conn = &conn{} - -func (c *conn) Read([]byte) (int, error) { return 0, nil } -func (c *conn) Write([]byte) (int, error) { return 0, nil } -func (c *conn) Close() error { return nil } -func (c *conn) RemoteAddr() net.Addr { return c.remoteAddr } -func (c *conn) LocalAddr() net.Addr { return nil } -func (c *conn) SetReadDeadline(time.Time) error { return nil } -func (c *conn) SetWriteDeadline(time.Time) error { return nil } -func (c *conn) SetDeadline(time.Time) error { return nil } - -type clientSessionCache struct { - tls.ClientSessionCache -} - -var _ qtls.ClientSessionCache = &clientSessionCache{} - -func (c *clientSessionCache) Get(sessionKey string) (*qtls.ClientSessionState, bool) { - sess, ok := c.ClientSessionCache.Get(sessionKey) - if sess == nil { - return nil, ok - } - // qtls.ClientSessionState is identical to the tls.ClientSessionState. - // In order to allow users of quic-go to use a tls.Config, - // we need this workaround to use the ClientSessionCache. - // In unsafe.go we check that the two structs are actually identical. - usess := (*[unsafe.Sizeof(*sess)]byte)(unsafe.Pointer(sess))[:] - var session qtls.ClientSessionState - usession := (*[unsafe.Sizeof(session)]byte)(unsafe.Pointer(&session))[:] - copy(usession, usess) - return &session, ok -} - -func (c *clientSessionCache) Put(sessionKey string, cs *qtls.ClientSessionState) { - // qtls.ClientSessionState is identical to the tls.ClientSessionState. - // In order to allow users of quic-go to use a tls.Config, - // we need this workaround to use the ClientSessionCache. - // In unsafe.go we check that the two structs are actually identical. - usess := (*[unsafe.Sizeof(*cs)]byte)(unsafe.Pointer(cs))[:] - var session tls.ClientSessionState - usession := (*[unsafe.Sizeof(session)]byte)(unsafe.Pointer(&session))[:] - copy(usession, usess) - c.ClientSessionCache.Put(sessionKey, &session) -} - -func tlsConfigToQtlsConfig( - c *tls.Config, - recordLayer qtls.RecordLayer, - extHandler tlsExtensionHandler, -) *qtls.Config { - if c == nil { - c = &tls.Config{} - } - // Clone the config first. This executes the tls.Config.serverInit(). - // This sets the SessionTicketKey, if the user didn't supply one. - c = c.Clone() - // QUIC requires TLS 1.3 or newer - minVersion := c.MinVersion - if minVersion < qtls.VersionTLS13 { - minVersion = qtls.VersionTLS13 - } - maxVersion := c.MaxVersion - if maxVersion < qtls.VersionTLS13 { - maxVersion = qtls.VersionTLS13 - } - var getConfigForClient func(ch *tls.ClientHelloInfo) (*qtls.Config, error) - if c.GetConfigForClient != nil { - getConfigForClient = func(ch *tls.ClientHelloInfo) (*qtls.Config, error) { - tlsConf, err := c.GetConfigForClient(ch) - if err != nil { - return nil, err - } - if tlsConf == nil { - return nil, nil - } - return tlsConfigToQtlsConfig(tlsConf, recordLayer, extHandler), nil - } - } - var csc qtls.ClientSessionCache - if c.ClientSessionCache != nil { - csc = &clientSessionCache{c.ClientSessionCache} - } - return &qtls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - GetClientCertificate: c.GetClientCertificate, - GetConfigForClient: getConfigForClient, - VerifyPeerCertificate: c.VerifyPeerCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: csc, - MinVersion: minVersion, - MaxVersion: maxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - // no need to copy Renegotiation, it's not supported by TLS 1.3 - KeyLogWriter: c.KeyLogWriter, - AlternativeRecordLayer: recordLayer, - GetExtensions: extHandler.GetExtensions, - ReceivedExtensions: extHandler.ReceivedExtensions, - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go deleted file mode 100644 index 590aafd1a..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/tls_extension_handler.go +++ /dev/null @@ -1,58 +0,0 @@ -package handshake - -import ( - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/marten-seemann/qtls" -) - -const quicTLSExtensionType = 0xffa5 - -type extensionHandler struct { - ourParams []byte - paramsChan chan []byte - - perspective protocol.Perspective -} - -var _ tlsExtensionHandler = &extensionHandler{} - -// newExtensionHandler creates a new extension handler -func newExtensionHandler(params []byte, pers protocol.Perspective) tlsExtensionHandler { - return &extensionHandler{ - ourParams: params, - paramsChan: make(chan []byte), - perspective: pers, - } -} - -func (h *extensionHandler) GetExtensions(msgType uint8) []qtls.Extension { - if (h.perspective == protocol.PerspectiveClient && messageType(msgType) != typeClientHello) || - (h.perspective == protocol.PerspectiveServer && messageType(msgType) != typeEncryptedExtensions) { - return nil - } - return []qtls.Extension{{ - Type: quicTLSExtensionType, - Data: h.ourParams, - }} -} - -func (h *extensionHandler) ReceivedExtensions(msgType uint8, exts []qtls.Extension) { - if (h.perspective == protocol.PerspectiveClient && messageType(msgType) != typeEncryptedExtensions) || - (h.perspective == protocol.PerspectiveServer && messageType(msgType) != typeClientHello) { - return - } - - var data []byte - for _, ext := range exts { - if ext.Type == quicTLSExtensionType { - data = ext.Data - break - } - } - - h.paramsChan <- data -} - -func (h *extensionHandler) TransportParameters() <-chan []byte { - return h.paramsChan -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/transport_parameters.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/transport_parameters.go deleted file mode 100644 index 6810cbc37..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/transport_parameters.go +++ /dev/null @@ -1,260 +0,0 @@ -package handshake - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sort" - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -type transportParameterID uint16 - -const ( - originalConnectionIDParameterID transportParameterID = 0x0 - idleTimeoutParameterID transportParameterID = 0x1 - statelessResetTokenParameterID transportParameterID = 0x2 - maxPacketSizeParameterID transportParameterID = 0x3 - initialMaxDataParameterID transportParameterID = 0x4 - initialMaxStreamDataBidiLocalParameterID transportParameterID = 0x5 - initialMaxStreamDataBidiRemoteParameterID transportParameterID = 0x6 - initialMaxStreamDataUniParameterID transportParameterID = 0x7 - initialMaxStreamsBidiParameterID transportParameterID = 0x8 - initialMaxStreamsUniParameterID transportParameterID = 0x9 - ackDelayExponentParameterID transportParameterID = 0xa - disableMigrationParameterID transportParameterID = 0xc -) - -// TransportParameters are parameters sent to the peer during the handshake -type TransportParameters struct { - InitialMaxStreamDataBidiLocal protocol.ByteCount - InitialMaxStreamDataBidiRemote protocol.ByteCount - InitialMaxStreamDataUni protocol.ByteCount - InitialMaxData protocol.ByteCount - - AckDelayExponent uint8 - - MaxPacketSize protocol.ByteCount - - MaxUniStreams uint64 - MaxBidiStreams uint64 - - IdleTimeout time.Duration - DisableMigration bool - - StatelessResetToken *[16]byte - OriginalConnectionID protocol.ConnectionID -} - -// Unmarshal the transport parameters -func (p *TransportParameters) Unmarshal(data []byte, sentBy protocol.Perspective) error { - if len(data) < 2 { - return errors.New("transport parameter data too short") - } - length := binary.BigEndian.Uint16(data[:2]) - if len(data)-2 < int(length) { - return fmt.Errorf("expected transport parameters to be %d bytes long, have %d", length, len(data)-2) - } - - // needed to check that every parameter is only sent at most once - var parameterIDs []transportParameterID - - var readAckDelayExponent bool - - r := bytes.NewReader(data[2:]) - for r.Len() >= 4 { - paramIDInt, _ := utils.BigEndian.ReadUint16(r) - paramID := transportParameterID(paramIDInt) - paramLen, _ := utils.BigEndian.ReadUint16(r) - parameterIDs = append(parameterIDs, paramID) - switch paramID { - case ackDelayExponentParameterID: - readAckDelayExponent = true - fallthrough - case initialMaxStreamDataBidiLocalParameterID, - initialMaxStreamDataBidiRemoteParameterID, - initialMaxStreamDataUniParameterID, - initialMaxDataParameterID, - initialMaxStreamsBidiParameterID, - initialMaxStreamsUniParameterID, - idleTimeoutParameterID, - maxPacketSizeParameterID: - if err := p.readNumericTransportParameter(r, paramID, int(paramLen)); err != nil { - return err - } - default: - if r.Len() < int(paramLen) { - return fmt.Errorf("remaining length (%d) smaller than parameter length (%d)", r.Len(), paramLen) - } - switch paramID { - case disableMigrationParameterID: - if paramLen != 0 { - return fmt.Errorf("wrong length for disable_migration: %d (expected empty)", paramLen) - } - p.DisableMigration = true - case statelessResetTokenParameterID: - if sentBy == protocol.PerspectiveClient { - return errors.New("client sent a stateless_reset_token") - } - if paramLen != 16 { - return fmt.Errorf("wrong length for stateless_reset_token: %d (expected 16)", paramLen) - } - var token [16]byte - r.Read(token[:]) - p.StatelessResetToken = &token - case originalConnectionIDParameterID: - if sentBy == protocol.PerspectiveClient { - return errors.New("client sent an original_connection_id") - } - p.OriginalConnectionID, _ = protocol.ReadConnectionID(r, int(paramLen)) - default: - r.Seek(int64(paramLen), io.SeekCurrent) - } - } - } - - if !readAckDelayExponent { - p.AckDelayExponent = protocol.DefaultAckDelayExponent - } - - // check that every transport parameter was sent at most once - sort.Slice(parameterIDs, func(i, j int) bool { return parameterIDs[i] < parameterIDs[j] }) - for i := 0; i < len(parameterIDs)-1; i++ { - if parameterIDs[i] == parameterIDs[i+1] { - return fmt.Errorf("received duplicate transport parameter %#x", parameterIDs[i]) - } - } - - if r.Len() != 0 { - return fmt.Errorf("should have read all data. Still have %d bytes", r.Len()) - } - return nil -} - -func (p *TransportParameters) readNumericTransportParameter( - r *bytes.Reader, - paramID transportParameterID, - expectedLen int, -) error { - remainingLen := r.Len() - val, err := utils.ReadVarInt(r) - if err != nil { - return fmt.Errorf("error while reading transport parameter %d: %s", paramID, err) - } - if remainingLen-r.Len() != expectedLen { - return fmt.Errorf("inconsistent transport parameter length for %d", paramID) - } - switch paramID { - case initialMaxStreamDataBidiLocalParameterID: - p.InitialMaxStreamDataBidiLocal = protocol.ByteCount(val) - case initialMaxStreamDataBidiRemoteParameterID: - p.InitialMaxStreamDataBidiRemote = protocol.ByteCount(val) - case initialMaxStreamDataUniParameterID: - p.InitialMaxStreamDataUni = protocol.ByteCount(val) - case initialMaxDataParameterID: - p.InitialMaxData = protocol.ByteCount(val) - case initialMaxStreamsBidiParameterID: - p.MaxBidiStreams = val - case initialMaxStreamsUniParameterID: - p.MaxUniStreams = val - case idleTimeoutParameterID: - p.IdleTimeout = utils.MaxDuration(protocol.MinRemoteIdleTimeout, time.Duration(val)*time.Millisecond) - case maxPacketSizeParameterID: - if val < 1200 { - return fmt.Errorf("invalid value for max_packet_size: %d (minimum 1200)", val) - } - p.MaxPacketSize = protocol.ByteCount(val) - case ackDelayExponentParameterID: - if val > protocol.MaxAckDelayExponent { - return fmt.Errorf("invalid value for ack_delay_exponent: %d (maximum %d)", val, protocol.MaxAckDelayExponent) - } - p.AckDelayExponent = uint8(val) - default: - return fmt.Errorf("TransportParameter BUG: transport parameter %d not found", paramID) - } - return nil -} - -// Marshal the transport parameters -func (p *TransportParameters) Marshal() []byte { - b := &bytes.Buffer{} - b.Write([]byte{0, 0}) // length. Will be replaced later - - // initial_max_stream_data_bidi_local - utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamDataBidiLocalParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxStreamDataBidiLocal)))) - utils.WriteVarInt(b, uint64(p.InitialMaxStreamDataBidiLocal)) - // initial_max_stream_data_bidi_remote - utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamDataBidiRemoteParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxStreamDataBidiRemote)))) - utils.WriteVarInt(b, uint64(p.InitialMaxStreamDataBidiRemote)) - // initial_max_stream_data_uni - utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamDataUniParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxStreamDataUni)))) - utils.WriteVarInt(b, uint64(p.InitialMaxStreamDataUni)) - // initial_max_data - utils.BigEndian.WriteUint16(b, uint16(initialMaxDataParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.InitialMaxData)))) - utils.WriteVarInt(b, uint64(p.InitialMaxData)) - // initial_max_bidi_streams - utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamsBidiParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(p.MaxBidiStreams))) - utils.WriteVarInt(b, p.MaxBidiStreams) - // initial_max_uni_streams - utils.BigEndian.WriteUint16(b, uint16(initialMaxStreamsUniParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(p.MaxUniStreams))) - utils.WriteVarInt(b, p.MaxUniStreams) - // idle_timeout - idleTimeout := uint64(p.IdleTimeout / time.Millisecond) - utils.BigEndian.WriteUint16(b, uint16(idleTimeoutParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(idleTimeout))) - utils.WriteVarInt(b, idleTimeout) - // max_packet_size - utils.BigEndian.WriteUint16(b, uint16(maxPacketSizeParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(protocol.MaxReceivePacketSize)))) - utils.WriteVarInt(b, uint64(protocol.MaxReceivePacketSize)) - // ack_delay_exponent - // Only send it if is different from the default value. - if p.AckDelayExponent != protocol.DefaultAckDelayExponent { - utils.BigEndian.WriteUint16(b, uint16(ackDelayExponentParameterID)) - utils.BigEndian.WriteUint16(b, uint16(utils.VarIntLen(uint64(p.AckDelayExponent)))) - utils.WriteVarInt(b, uint64(p.AckDelayExponent)) - } - // disable_migration - if p.DisableMigration { - utils.BigEndian.WriteUint16(b, uint16(disableMigrationParameterID)) - utils.BigEndian.WriteUint16(b, 0) - } - if p.StatelessResetToken != nil { - utils.BigEndian.WriteUint16(b, uint16(statelessResetTokenParameterID)) - utils.BigEndian.WriteUint16(b, 16) - b.Write(p.StatelessResetToken[:]) - } - // original_connection_id - if p.OriginalConnectionID.Len() > 0 { - utils.BigEndian.WriteUint16(b, uint16(originalConnectionIDParameterID)) - utils.BigEndian.WriteUint16(b, uint16(p.OriginalConnectionID.Len())) - b.Write(p.OriginalConnectionID.Bytes()) - } - - data := b.Bytes() - binary.BigEndian.PutUint16(data[:2], uint16(b.Len()-2)) - return data -} - -// String returns a string representation, intended for logging. -func (p *TransportParameters) String() string { - logString := "&handshake.TransportParameters{OriginalConnectionID: %s, InitialMaxStreamDataBidiLocal: %#x, InitialMaxStreamDataBidiRemote: %#x, InitialMaxStreamDataUni: %#x, InitialMaxData: %#x, MaxBidiStreams: %d, MaxUniStreams: %d, IdleTimeout: %s, AckDelayExponent: %d" - logParams := []interface{}{p.OriginalConnectionID, p.InitialMaxStreamDataBidiLocal, p.InitialMaxStreamDataBidiRemote, p.InitialMaxStreamDataUni, p.InitialMaxData, p.MaxBidiStreams, p.MaxUniStreams, p.IdleTimeout, p.AckDelayExponent} - if p.StatelessResetToken != nil { // the client never sends a stateless reset token - logString += ", StatelessResetToken: %#x" - logParams = append(logParams, *p.StatelessResetToken) - } - logString += "}" - return fmt.Sprintf(logString, logParams...) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/unsafe.go b/vendor/github.com/lucas-clemente/quic-go/internal/handshake/unsafe.go deleted file mode 100644 index fb051aebf..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/handshake/unsafe.go +++ /dev/null @@ -1,38 +0,0 @@ -package handshake - -// This package uses unsafe to convert between: -// * qtls.ConnectionState and tls.ConnectionState -// * qtls.ClientSessionState and tls.ClientSessionState -// We check in init() that this conversion actually is safe. - -import ( - "crypto/tls" - "reflect" - - "github.com/marten-seemann/qtls" -) - -func init() { - if !structsEqual(&tls.ConnectionState{}, &qtls.ConnectionState{}) { - panic("qtls.ConnectionState not compatible with tls.ConnectionState") - } - if !structsEqual(&tls.ClientSessionState{}, &qtls.ClientSessionState{}) { - panic("qtls.ClientSessionState not compatible with tls.ClientSessionState") - } -} - -func structsEqual(a, b interface{}) bool { - sa := reflect.ValueOf(a).Elem() - sb := reflect.ValueOf(b).Elem() - if sa.NumField() != sb.NumField() { - return false - } - for i := 0; i < sa.NumField(); i++ { - fa := sa.Type().Field(i) - fb := sb.Type().Field(i) - if !reflect.DeepEqual(fa.Index, fb.Index) || fa.Name != fb.Name || fa.Anonymous != fb.Anonymous || fa.Offset != fb.Offset || !reflect.DeepEqual(fa.Type, fb.Type) { - return false - } - } - return true -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go deleted file mode 100644 index f99461b2c..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/connection_id.go +++ /dev/null @@ -1,69 +0,0 @@ -package protocol - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" -) - -// A ConnectionID in QUIC -type ConnectionID []byte - -const maxConnectionIDLen = 18 - -// GenerateConnectionID generates a connection ID using cryptographic random -func GenerateConnectionID(len int) (ConnectionID, error) { - b := make([]byte, len) - if _, err := rand.Read(b); err != nil { - return nil, err - } - return ConnectionID(b), nil -} - -// GenerateConnectionIDForInitial generates a connection ID for the Initial packet. -// It uses a length randomly chosen between 8 and 18 bytes. -func GenerateConnectionIDForInitial() (ConnectionID, error) { - r := make([]byte, 1) - if _, err := rand.Read(r); err != nil { - return nil, err - } - len := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1) - return GenerateConnectionID(len) -} - -// ReadConnectionID reads a connection ID of length len from the given io.Reader. -// It returns io.EOF if there are not enough bytes to read. -func ReadConnectionID(r io.Reader, len int) (ConnectionID, error) { - if len == 0 { - return nil, nil - } - c := make(ConnectionID, len) - _, err := io.ReadFull(r, c) - if err == io.ErrUnexpectedEOF { - return nil, io.EOF - } - return c, err -} - -// Equal says if two connection IDs are equal -func (c ConnectionID) Equal(other ConnectionID) bool { - return bytes.Equal(c, other) -} - -// Len returns the length of the connection ID in bytes -func (c ConnectionID) Len() int { - return len(c) -} - -// Bytes returns the byte representation -func (c ConnectionID) Bytes() []byte { - return []byte(c) -} - -func (c ConnectionID) String() string { - if c.Len() == 0 { - return "(empty)" - } - return fmt.Sprintf("%#x", c.Bytes()) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go deleted file mode 100644 index 4b059b3a8..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/encryption_level.go +++ /dev/null @@ -1,28 +0,0 @@ -package protocol - -// EncryptionLevel is the encryption level -// Default value is Unencrypted -type EncryptionLevel int - -const ( - // EncryptionUnspecified is a not specified encryption level - EncryptionUnspecified EncryptionLevel = iota - // EncryptionInitial is the Initial encryption level - EncryptionInitial - // EncryptionHandshake is the Handshake encryption level - EncryptionHandshake - // Encryption1RTT is the 1-RTT encryption level - Encryption1RTT -) - -func (e EncryptionLevel) String() string { - switch e { - case EncryptionInitial: - return "Initial" - case EncryptionHandshake: - return "Handshake" - case Encryption1RTT: - return "1-RTT" - } - return "unknown" -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go deleted file mode 100644 index 405a07ac7..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/packet_number.go +++ /dev/null @@ -1,85 +0,0 @@ -package protocol - -// PacketNumberLen is the length of the packet number in bytes -type PacketNumberLen uint8 - -const ( - // PacketNumberLenInvalid is the default value and not a valid length for a packet number - PacketNumberLenInvalid PacketNumberLen = 0 - // PacketNumberLen1 is a packet number length of 1 byte - PacketNumberLen1 PacketNumberLen = 1 - // PacketNumberLen2 is a packet number length of 2 bytes - PacketNumberLen2 PacketNumberLen = 2 - // PacketNumberLen3 is a packet number length of 3 bytes - PacketNumberLen3 PacketNumberLen = 3 - // PacketNumberLen4 is a packet number length of 4 bytes - PacketNumberLen4 PacketNumberLen = 4 -) - -// DecodePacketNumber calculates the packet number based on the received packet number, its length and the last seen packet number -func DecodePacketNumber( - packetNumberLength PacketNumberLen, - lastPacketNumber PacketNumber, - wirePacketNumber PacketNumber, -) PacketNumber { - var epochDelta PacketNumber - switch packetNumberLength { - case PacketNumberLen1: - epochDelta = PacketNumber(1) << 8 - case PacketNumberLen2: - epochDelta = PacketNumber(1) << 16 - case PacketNumberLen3: - epochDelta = PacketNumber(1) << 24 - case PacketNumberLen4: - epochDelta = PacketNumber(1) << 32 - } - epoch := lastPacketNumber & ^(epochDelta - 1) - prevEpochBegin := epoch - epochDelta - nextEpochBegin := epoch + epochDelta - return closestTo( - lastPacketNumber+1, - epoch+wirePacketNumber, - closestTo(lastPacketNumber+1, prevEpochBegin+wirePacketNumber, nextEpochBegin+wirePacketNumber), - ) -} - -func closestTo(target, a, b PacketNumber) PacketNumber { - if delta(target, a) < delta(target, b) { - return a - } - return b -} - -func delta(a, b PacketNumber) PacketNumber { - if a < b { - return b - a - } - return a - b -} - -// GetPacketNumberLengthForHeader gets the length of the packet number for the public header -// it never chooses a PacketNumberLen of 1 byte, since this is too short under certain circumstances -func GetPacketNumberLengthForHeader(packetNumber, leastUnacked PacketNumber) PacketNumberLen { - diff := uint64(packetNumber - leastUnacked) - if diff < (1 << (16 - 1)) { - return PacketNumberLen2 - } - if diff < (1 << (24 - 1)) { - return PacketNumberLen3 - } - return PacketNumberLen4 -} - -// GetPacketNumberLength gets the minimum length needed to fully represent the packet number -func GetPacketNumberLength(packetNumber PacketNumber) PacketNumberLen { - if packetNumber < (1 << (uint8(PacketNumberLen1) * 8)) { - return PacketNumberLen1 - } - if packetNumber < (1 << (uint8(PacketNumberLen2) * 8)) { - return PacketNumberLen2 - } - if packetNumber < (1 << (uint8(PacketNumberLen3) * 8)) { - return PacketNumberLen3 - } - return PacketNumberLen4 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go deleted file mode 100644 index 88e0ae458..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/params.go +++ /dev/null @@ -1,126 +0,0 @@ -package protocol - -import "time" - -// MaxPacketSizeIPv4 is the maximum packet size that we use for sending IPv4 packets. -const MaxPacketSizeIPv4 = 1252 - -// MaxPacketSizeIPv6 is the maximum packet size that we use for sending IPv6 packets. -const MaxPacketSizeIPv6 = 1232 - -const defaultMaxCongestionWindowPackets = 1000 - -// DefaultMaxCongestionWindow is the default for the max congestion window -const DefaultMaxCongestionWindow ByteCount = defaultMaxCongestionWindowPackets * DefaultTCPMSS - -// InitialCongestionWindow is the initial congestion window in QUIC packets -const InitialCongestionWindow ByteCount = 32 * DefaultTCPMSS - -// MaxUndecryptablePackets limits the number of undecryptable packets that are queued in the session. -const MaxUndecryptablePackets = 10 - -// ConnectionFlowControlMultiplier determines how much larger the connection flow control windows needs to be relative to any stream's flow control window -// This is the value that Chromium is using -const ConnectionFlowControlMultiplier = 1.5 - -// InitialMaxStreamData is the stream-level flow control window for receiving data -const InitialMaxStreamData = (1 << 10) * 512 // 512 kb - -// InitialMaxData is the connection-level flow control window for receiving data -const InitialMaxData = ConnectionFlowControlMultiplier * InitialMaxStreamData - -// DefaultMaxReceiveStreamFlowControlWindow is the default maximum stream-level flow control window for receiving data, for the server -const DefaultMaxReceiveStreamFlowControlWindow = 6 * (1 << 20) // 6 MB - -// DefaultMaxReceiveConnectionFlowControlWindow is the default connection-level flow control window for receiving data, for the server -const DefaultMaxReceiveConnectionFlowControlWindow = 15 * (1 << 20) // 12 MB - -// WindowUpdateThreshold is the fraction of the receive window that has to be consumed before an higher offset is advertised to the client -const WindowUpdateThreshold = 0.25 - -// DefaultMaxIncomingStreams is the maximum number of streams that a peer may open -const DefaultMaxIncomingStreams = 100 - -// DefaultMaxIncomingUniStreams is the maximum number of unidirectional streams that a peer may open -const DefaultMaxIncomingUniStreams = 100 - -// MaxSessionUnprocessedPackets is the max number of packets stored in each session that are not yet processed. -const MaxSessionUnprocessedPackets = defaultMaxCongestionWindowPackets - -// SkipPacketAveragePeriodLength is the average period length in which one packet number is skipped to prevent an Optimistic ACK attack -const SkipPacketAveragePeriodLength PacketNumber = 500 - -// MaxTrackedSkippedPackets is the maximum number of skipped packet numbers the SentPacketHandler keep track of for Optimistic ACK attack mitigation -const MaxTrackedSkippedPackets = 10 - -// MaxAcceptQueueSize is the maximum number of sessions that the server queues for accepting. -// If the queue is full, new connection attempts will be rejected. -const MaxAcceptQueueSize = 32 - -// CookieExpiryTime is the valid time of a cookie -const CookieExpiryTime = 24 * time.Hour - -// MaxOutstandingSentPackets is maximum number of packets saved for retransmission. -// When reached, it imposes a soft limit on sending new packets: -// Sending ACKs and retransmission is still allowed, but now new regular packets can be sent. -const MaxOutstandingSentPackets = 2 * defaultMaxCongestionWindowPackets - -// MaxTrackedSentPackets is maximum number of sent packets saved for retransmission. -// When reached, no more packets will be sent. -// This value *must* be larger than MaxOutstandingSentPackets. -const MaxTrackedSentPackets = MaxOutstandingSentPackets * 5 / 4 - -// MaxTrackedReceivedAckRanges is the maximum number of ACK ranges tracked -const MaxTrackedReceivedAckRanges = defaultMaxCongestionWindowPackets - -// MaxNonRetransmittableAcks is the maximum number of packets containing an ACK, but no retransmittable frames, that we send in a row -const MaxNonRetransmittableAcks = 19 - -// MaxStreamFrameSorterGaps is the maximum number of gaps between received StreamFrames -// prevents DoS attacks against the streamFrameSorter -const MaxStreamFrameSorterGaps = 1000 - -// MaxCryptoStreamOffset is the maximum offset allowed on any of the crypto streams. -// This limits the size of the ClientHello and Certificates that can be received. -const MaxCryptoStreamOffset = 16 * (1 << 10) - -// MinRemoteIdleTimeout is the minimum value that we accept for the remote idle timeout -const MinRemoteIdleTimeout = 5 * time.Second - -// DefaultIdleTimeout is the default idle timeout -const DefaultIdleTimeout = 30 * time.Second - -// DefaultHandshakeTimeout is the default timeout for a connection until the crypto handshake succeeds. -const DefaultHandshakeTimeout = 10 * time.Second - -// RetiredConnectionIDDeleteTimeout is the time we keep closed sessions around in order to retransmit the CONNECTION_CLOSE. -// after this time all information about the old connection will be deleted -const RetiredConnectionIDDeleteTimeout = 5 * time.Second - -// MinStreamFrameSize is the minimum size that has to be left in a packet, so that we add another STREAM frame. -// This avoids splitting up STREAM frames into small pieces, which has 2 advantages: -// 1. it reduces the framing overhead -// 2. it reduces the head-of-line blocking, when a packet is lost -const MinStreamFrameSize ByteCount = 128 - -// MaxPostHandshakeCryptoFrameSize is the maximum size of CRYPTO frames -// we send after the handshake completes. -const MaxPostHandshakeCryptoFrameSize ByteCount = 1000 - -// MaxAckFrameSize is the maximum size for an ACK frame that we write -// Due to the varint encoding, ACK frames can grow (almost) indefinitely large. -// The MaxAckFrameSize should be large enough to encode many ACK range, -// but must ensure that a maximum size ACK frame fits into one packet. -const MaxAckFrameSize ByteCount = 1000 - -// MinPacingDelay is the minimum duration that is used for packet pacing -// If the packet packing frequency is higher, multiple packets might be sent at once. -// Example: For a packet pacing delay of 20 microseconds, we would send 5 packets at once, wait for 100 microseconds, and so forth. -const MinPacingDelay time.Duration = 100 * time.Microsecond - -// DefaultConnectionIDLength is the connection ID length that is used for multiplexed connections -// if no other value is configured. -const DefaultConnectionIDLength = 4 - -// AckDelayExponent is the ack delay exponent used when sending ACKs. -const AckDelayExponent = 3 diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go deleted file mode 100644 index 43358fecb..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/perspective.go +++ /dev/null @@ -1,26 +0,0 @@ -package protocol - -// Perspective determines if we're acting as a server or a client -type Perspective int - -// the perspectives -const ( - PerspectiveServer Perspective = 1 - PerspectiveClient Perspective = 2 -) - -// Opposite returns the perspective of the peer -func (p Perspective) Opposite() Perspective { - return 3 - p -} - -func (p Perspective) String() string { - switch p { - case PerspectiveServer: - return "Server" - case PerspectiveClient: - return "Client" - default: - return "invalid perspective" - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go deleted file mode 100644 index 6e59afcb4..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/protocol.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "fmt" -) - -// A PacketNumber in QUIC -type PacketNumber uint64 - -// The PacketType is the Long Header Type -type PacketType uint8 - -const ( - // PacketTypeInitial is the packet type of an Initial packet - PacketTypeInitial PacketType = 1 + iota - // PacketTypeRetry is the packet type of a Retry packet - PacketTypeRetry - // PacketTypeHandshake is the packet type of a Handshake packet - PacketTypeHandshake - // PacketType0RTT is the packet type of a 0-RTT packet - PacketType0RTT -) - -func (t PacketType) String() string { - switch t { - case PacketTypeInitial: - return "Initial" - case PacketTypeRetry: - return "Retry" - case PacketTypeHandshake: - return "Handshake" - case PacketType0RTT: - return "0-RTT Protected" - default: - return fmt.Sprintf("unknown packet type: %d", t) - } -} - -// A ByteCount in QUIC -type ByteCount uint64 - -// MaxByteCount is the maximum value of a ByteCount -const MaxByteCount = ByteCount(1<<62 - 1) - -// An ApplicationErrorCode is an application-defined error code. -type ApplicationErrorCode uint16 - -// MaxReceivePacketSize maximum packet size of any QUIC packet, based on -// ethernet's max size, minus the IP and UDP headers. IPv6 has a 40 byte header, -// UDP adds an additional 8 bytes. This is a total overhead of 48 bytes. -// Ethernet's max packet size is 1500 bytes, 1500 - 48 = 1452. -const MaxReceivePacketSize ByteCount = 1452 - -// DefaultTCPMSS is the default maximum packet size used in the Linux TCP implementation. -// Used in QUIC for congestion window computations in bytes. -const DefaultTCPMSS ByteCount = 1460 - -// MinInitialPacketSize is the minimum size an Initial packet is required to have. -const MinInitialPacketSize = 1200 - -// MinStatelessResetSize is the minimum size of a stateless reset packet -const MinStatelessResetSize = 1 /* first byte */ + 22 /* random bytes */ + 16 /* token */ - -// MinConnectionIDLenInitial is the minimum length of the destination connection ID on an Initial packet. -const MinConnectionIDLenInitial = 8 - -// MaxStreamCount is the maximum stream count value that can be sent in MAX_STREAMS frames -// and as the stream count in the transport parameters -const MaxStreamCount = 1 << 60 - -// DefaultAckDelayExponent is the default ack delay exponent -const DefaultAckDelayExponent = 3 - -// MaxAckDelayExponent is the maximum ack delay exponent -const MaxAckDelayExponent = 20 diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream_id.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream_id.go deleted file mode 100644 index b96e0c2bc..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/stream_id.go +++ /dev/null @@ -1,67 +0,0 @@ -package protocol - -// A StreamID in QUIC -type StreamID uint64 - -// StreamType encodes if this is a unidirectional or bidirectional stream -type StreamType uint8 - -const ( - // StreamTypeUni is a unidirectional stream - StreamTypeUni StreamType = iota - // StreamTypeBidi is a bidirectional stream - StreamTypeBidi -) - -// InitiatedBy says if the stream was initiated by the client or by the server -func (s StreamID) InitiatedBy() Perspective { - if s%2 == 0 { - return PerspectiveClient - } - return PerspectiveServer -} - -//Type says if this is a unidirectional or bidirectional stream -func (s StreamID) Type() StreamType { - if s%4 >= 2 { - return StreamTypeUni - } - return StreamTypeBidi -} - -// StreamNum returns how many streams in total are below this -// Example: for stream 9 it returns 3 (i.e. streams 1, 5 and 9) -func (s StreamID) StreamNum() uint64 { - return uint64(s/4) + 1 -} - -// MaxStreamID is the highest stream ID that a peer is allowed to open, -// when it is allowed to open numStreams. -func MaxStreamID(stype StreamType, numStreams uint64, pers Perspective) StreamID { - if numStreams == 0 { - return 0 - } - var first StreamID - switch stype { - case StreamTypeBidi: - switch pers { - case PerspectiveClient: - first = 0 - case PerspectiveServer: - first = 1 - } - case StreamTypeUni: - switch pers { - case PerspectiveClient: - first = 2 - case PerspectiveServer: - first = 3 - } - } - return first + 4*StreamID(numStreams-1) -} - -// FirstStream returns the first valid stream ID -func FirstStream(stype StreamType, pers Perspective) StreamID { - return MaxStreamID(stype, 1, pers) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go b/vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go deleted file mode 100644 index 6e7d6d953..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/protocol/version.go +++ /dev/null @@ -1,119 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "encoding/binary" - "fmt" - "math" -) - -// VersionNumber is a version number as int -type VersionNumber uint32 - -// gQUIC version range as defined in the wiki: https://github.com/quicwg/base-drafts/wiki/QUIC-Versions -const ( - gquicVersion0 = 0x51303030 - maxGquicVersion = 0x51303439 -) - -// The version numbers, making grepping easier -const ( - VersionTLS VersionNumber = VersionMilestone0_11_1 - VersionWhatever VersionNumber = 1 // for when the version doesn't matter - VersionUnknown VersionNumber = math.MaxUint32 - - VersionMilestone0_11_1 VersionNumber = 0xff000013 // QUIC WG draft-19 -) - -// SupportedVersions lists the versions that the server supports -// must be in sorted descending order -var SupportedVersions = []VersionNumber{VersionMilestone0_11_1} - -// IsValidVersion says if the version is known to quic-go -func IsValidVersion(v VersionNumber) bool { - return v == VersionTLS || IsSupportedVersion(SupportedVersions, v) -} - -func (vn VersionNumber) String() string { - switch vn { - case VersionWhatever: - return "whatever" - case VersionUnknown: - return "unknown" - case VersionMilestone0_11_1: - return "QUIC WG draft-19" - default: - if vn.isGQUIC() { - return fmt.Sprintf("gQUIC %d", vn.toGQUICVersion()) - } - return fmt.Sprintf("%#x", uint32(vn)) - } -} - -// ToAltSvc returns the representation of the version for the H2 Alt-Svc parameters -func (vn VersionNumber) ToAltSvc() string { - return fmt.Sprintf("%d", vn) -} - -func (vn VersionNumber) isGQUIC() bool { - return vn > gquicVersion0 && vn <= maxGquicVersion -} - -func (vn VersionNumber) toGQUICVersion() int { - return int(10*(vn-gquicVersion0)/0x100) + int(vn%0x10) -} - -// IsSupportedVersion returns true if the server supports this version -func IsSupportedVersion(supported []VersionNumber, v VersionNumber) bool { - for _, t := range supported { - if t == v { - return true - } - } - return false -} - -// ChooseSupportedVersion finds the best version in the overlap of ours and theirs -// ours is a slice of versions that we support, sorted by our preference (descending) -// theirs is a slice of versions offered by the peer. The order does not matter. -// The bool returned indicates if a matching version was found. -func ChooseSupportedVersion(ours, theirs []VersionNumber) (VersionNumber, bool) { - for _, ourVer := range ours { - for _, theirVer := range theirs { - if ourVer == theirVer { - return ourVer, true - } - } - } - return 0, false -} - -// generateReservedVersion generates a reserved version number (v & 0x0f0f0f0f == 0x0a0a0a0a) -func generateReservedVersion() VersionNumber { - b := make([]byte, 4) - _, _ = rand.Read(b) // ignore the error here. Failure to read random data doesn't break anything - return VersionNumber((binary.BigEndian.Uint32(b) | 0x0a0a0a0a) & 0xfafafafa) -} - -// GetGreasedVersions adds one reserved version number to a slice of version numbers, at a random position -func GetGreasedVersions(supported []VersionNumber) []VersionNumber { - b := make([]byte, 1) - _, _ = rand.Read(b) // ignore the error here. Failure to read random data doesn't break anything - randPos := int(b[0]) % (len(supported) + 1) - greased := make([]VersionNumber, len(supported)+1) - copy(greased, supported[:randPos]) - greased[randPos] = generateReservedVersion() - copy(greased[randPos+1:], supported[randPos:]) - return greased -} - -// StripGreasedVersions strips all greased versions from a slice of versions -func StripGreasedVersions(versions []VersionNumber) []VersionNumber { - realVersions := make([]VersionNumber, 0, len(versions)) - for _, v := range versions { - if v&0x0f0f0f0f != 0x0a0a0a0a { - realVersions = append(realVersions, v) - } - } - return realVersions -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go b/vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go deleted file mode 100644 index 091de1ca3..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/error_codes.go +++ /dev/null @@ -1,71 +0,0 @@ -package qerr - -import ( - "fmt" - - "github.com/marten-seemann/qtls" -) - -// ErrorCode can be used as a normal error without reason. -type ErrorCode uint16 - -// The error codes defined by QUIC -const ( - NoError ErrorCode = 0x0 - InternalError ErrorCode = 0x1 - ServerBusy ErrorCode = 0x2 - FlowControlError ErrorCode = 0x3 - StreamLimitError ErrorCode = 0x4 - StreamStateError ErrorCode = 0x5 - FinalSizeError ErrorCode = 0x6 - FrameEncodingError ErrorCode = 0x7 - TransportParameterError ErrorCode = 0x8 - VersionNegotiationError ErrorCode = 0x9 - ProtocolViolation ErrorCode = 0xa - InvalidMigration ErrorCode = 0xc -) - -func (e ErrorCode) isCryptoError() bool { - return e >= 0x100 && e < 0x200 -} - -func (e ErrorCode) Error() string { - if e.isCryptoError() { - return fmt.Sprintf("%s: %s", e.String(), qtls.Alert(e-0x100).Error()) - } - return e.String() -} - -func (e ErrorCode) String() string { - switch e { - case NoError: - return "NO_ERROR" - case InternalError: - return "INTERNAL_ERROR" - case ServerBusy: - return "SERVER_BUSY" - case FlowControlError: - return "FLOW_CONTROL_ERROR" - case StreamLimitError: - return "STREAM_LIMIT_ERROR" - case StreamStateError: - return "STREAM_STATE_ERROR" - case FinalSizeError: - return "FINAL_SIZE_ERROR" - case FrameEncodingError: - return "FRAME_ENCODING_ERROR" - case TransportParameterError: - return "TRANSPORT_PARAMETER_ERROR" - case VersionNegotiationError: - return "VERSION_NEGOTIATION_ERROR" - case ProtocolViolation: - return "PROTOCOL_VIOLATION" - case InvalidMigration: - return "INVALID_MIGRATION" - default: - if e.isCryptoError() { - return "CRYPTO_ERROR" - } - return fmt.Sprintf("unknown error code: %#x", uint16(e)) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/quic_error.go b/vendor/github.com/lucas-clemente/quic-go/internal/qerr/quic_error.go deleted file mode 100644 index 3a1718b02..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/qerr/quic_error.go +++ /dev/null @@ -1,73 +0,0 @@ -package qerr - -import ( - "fmt" - "net" -) - -// A QuicError consists of an error code plus a error reason -type QuicError struct { - ErrorCode ErrorCode - ErrorMessage string - isTimeout bool -} - -var _ net.Error = &QuicError{} - -// Error creates a new QuicError instance -func Error(errorCode ErrorCode, errorMessage string) *QuicError { - return &QuicError{ - ErrorCode: errorCode, - ErrorMessage: errorMessage, - } -} - -// TimeoutError creates a new QuicError instance for a timeout error -func TimeoutError(errorMessage string) *QuicError { - return &QuicError{ - ErrorMessage: errorMessage, - isTimeout: true, - } -} - -// CryptoError create a new QuicError instance for a crypto error -func CryptoError(tlsAlert uint8, errorMessage string) *QuicError { - return &QuicError{ - ErrorCode: 0x100 + ErrorCode(tlsAlert), - ErrorMessage: errorMessage, - } -} - -func (e *QuicError) Error() string { - if len(e.ErrorMessage) == 0 { - return e.ErrorCode.Error() - } - return fmt.Sprintf("%s: %s", e.ErrorCode.String(), e.ErrorMessage) -} - -// IsCryptoError says if this error is a crypto error -func (e *QuicError) IsCryptoError() bool { - return e.ErrorCode.isCryptoError() -} - -// Temporary says if the error is temporary. -func (e *QuicError) Temporary() bool { - return false -} - -// Timeout says if this error is a timeout. -func (e *QuicError) Timeout() bool { - return e.isTimeout -} - -// ToQuicError converts an arbitrary error to a QuicError. It leaves QuicErrors -// unchanged, and properly handles `ErrorCode`s. -func ToQuicError(err error) *QuicError { - switch e := err.(type) { - case *QuicError: - return e - case ErrorCode: - return Error(e, "") - } - return Error(InternalError, err.Error()) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go deleted file mode 100644 index cf4642504..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/atomic_bool.go +++ /dev/null @@ -1,22 +0,0 @@ -package utils - -import "sync/atomic" - -// An AtomicBool is an atomic bool -type AtomicBool struct { - v int32 -} - -// Set sets the value -func (a *AtomicBool) Set(value bool) { - var n int32 - if value { - n = 1 - } - atomic.StoreInt32(&a.v, n) -} - -// Get gets the value -func (a *AtomicBool) Get() bool { - return atomic.LoadInt32(&a.v) != 0 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go deleted file mode 100644 index 096023ef2..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteinterval_linkedlist.go +++ /dev/null @@ -1,217 +0,0 @@ -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package utils - -// Linked list implementation from the Go standard library. - -// ByteIntervalElement is an element of a linked list. -type ByteIntervalElement struct { - // Next and previous pointers in the doubly-linked list of elements. - // To simplify the implementation, internally a list l is implemented - // as a ring, such that &l.root is both the next element of the last - // list element (l.Back()) and the previous element of the first list - // element (l.Front()). - next, prev *ByteIntervalElement - - // The list to which this element belongs. - list *ByteIntervalList - - // The value stored with this element. - Value ByteInterval -} - -// Next returns the next list element or nil. -func (e *ByteIntervalElement) Next() *ByteIntervalElement { - if p := e.next; e.list != nil && p != &e.list.root { - return p - } - return nil -} - -// Prev returns the previous list element or nil. -func (e *ByteIntervalElement) Prev() *ByteIntervalElement { - if p := e.prev; e.list != nil && p != &e.list.root { - return p - } - return nil -} - -// ByteIntervalList is a linked list of ByteIntervals. -type ByteIntervalList struct { - root ByteIntervalElement // sentinel list element, only &root, root.prev, and root.next are used - len int // current list length excluding (this) sentinel element -} - -// Init initializes or clears list l. -func (l *ByteIntervalList) Init() *ByteIntervalList { - l.root.next = &l.root - l.root.prev = &l.root - l.len = 0 - return l -} - -// NewByteIntervalList returns an initialized list. -func NewByteIntervalList() *ByteIntervalList { return new(ByteIntervalList).Init() } - -// Len returns the number of elements of list l. -// The complexity is O(1). -func (l *ByteIntervalList) Len() int { return l.len } - -// Front returns the first element of list l or nil if the list is empty. -func (l *ByteIntervalList) Front() *ByteIntervalElement { - if l.len == 0 { - return nil - } - return l.root.next -} - -// Back returns the last element of list l or nil if the list is empty. -func (l *ByteIntervalList) Back() *ByteIntervalElement { - if l.len == 0 { - return nil - } - return l.root.prev -} - -// lazyInit lazily initializes a zero List value. -func (l *ByteIntervalList) lazyInit() { - if l.root.next == nil { - l.Init() - } -} - -// insert inserts e after at, increments l.len, and returns e. -func (l *ByteIntervalList) insert(e, at *ByteIntervalElement) *ByteIntervalElement { - n := at.next - at.next = e - e.prev = at - e.next = n - n.prev = e - e.list = l - l.len++ - return e -} - -// insertValue is a convenience wrapper for insert(&Element{Value: v}, at). -func (l *ByteIntervalList) insertValue(v ByteInterval, at *ByteIntervalElement) *ByteIntervalElement { - return l.insert(&ByteIntervalElement{Value: v}, at) -} - -// remove removes e from its list, decrements l.len, and returns e. -func (l *ByteIntervalList) remove(e *ByteIntervalElement) *ByteIntervalElement { - e.prev.next = e.next - e.next.prev = e.prev - e.next = nil // avoid memory leaks - e.prev = nil // avoid memory leaks - e.list = nil - l.len-- - return e -} - -// Remove removes e from l if e is an element of list l. -// It returns the element value e.Value. -// The element must not be nil. -func (l *ByteIntervalList) Remove(e *ByteIntervalElement) ByteInterval { - if e.list == l { - // if e.list == l, l must have been initialized when e was inserted - // in l or l == nil (e is a zero Element) and l.remove will crash - l.remove(e) - } - return e.Value -} - -// PushFront inserts a new element e with value v at the front of list l and returns e. -func (l *ByteIntervalList) PushFront(v ByteInterval) *ByteIntervalElement { - l.lazyInit() - return l.insertValue(v, &l.root) -} - -// PushBack inserts a new element e with value v at the back of list l and returns e. -func (l *ByteIntervalList) PushBack(v ByteInterval) *ByteIntervalElement { - l.lazyInit() - return l.insertValue(v, l.root.prev) -} - -// InsertBefore inserts a new element e with value v immediately before mark and returns e. -// If mark is not an element of l, the list is not modified. -// The mark must not be nil. -func (l *ByteIntervalList) InsertBefore(v ByteInterval, mark *ByteIntervalElement) *ByteIntervalElement { - if mark.list != l { - return nil - } - // see comment in List.Remove about initialization of l - return l.insertValue(v, mark.prev) -} - -// InsertAfter inserts a new element e with value v immediately after mark and returns e. -// If mark is not an element of l, the list is not modified. -// The mark must not be nil. -func (l *ByteIntervalList) InsertAfter(v ByteInterval, mark *ByteIntervalElement) *ByteIntervalElement { - if mark.list != l { - return nil - } - // see comment in List.Remove about initialization of l - return l.insertValue(v, mark) -} - -// MoveToFront moves element e to the front of list l. -// If e is not an element of l, the list is not modified. -// The element must not be nil. -func (l *ByteIntervalList) MoveToFront(e *ByteIntervalElement) { - if e.list != l || l.root.next == e { - return - } - // see comment in List.Remove about initialization of l - l.insert(l.remove(e), &l.root) -} - -// MoveToBack moves element e to the back of list l. -// If e is not an element of l, the list is not modified. -// The element must not be nil. -func (l *ByteIntervalList) MoveToBack(e *ByteIntervalElement) { - if e.list != l || l.root.prev == e { - return - } - // see comment in List.Remove about initialization of l - l.insert(l.remove(e), l.root.prev) -} - -// MoveBefore moves element e to its new position before mark. -// If e or mark is not an element of l, or e == mark, the list is not modified. -// The element and mark must not be nil. -func (l *ByteIntervalList) MoveBefore(e, mark *ByteIntervalElement) { - if e.list != l || e == mark || mark.list != l { - return - } - l.insert(l.remove(e), mark.prev) -} - -// MoveAfter moves element e to its new position after mark. -// If e or mark is not an element of l, or e == mark, the list is not modified. -// The element and mark must not be nil. -func (l *ByteIntervalList) MoveAfter(e, mark *ByteIntervalElement) { - if e.list != l || e == mark || mark.list != l { - return - } - l.insert(l.remove(e), mark) -} - -// PushBackList inserts a copy of an other list at the back of list l. -// The lists l and other may be the same. They must not be nil. -func (l *ByteIntervalList) PushBackList(other *ByteIntervalList) { - l.lazyInit() - for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() { - l.insertValue(e.Value, l.root.prev) - } -} - -// PushFrontList inserts a copy of an other list at the front of list l. -// The lists l and other may be the same. They must not be nil. -func (l *ByteIntervalList) PushFrontList(other *ByteIntervalList) { - l.lazyInit() - for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() { - l.insertValue(e.Value, &l.root) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go deleted file mode 100644 index 6b92cfa26..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder.go +++ /dev/null @@ -1,17 +0,0 @@ -package utils - -import ( - "bytes" - "io" -) - -// A ByteOrder specifies how to convert byte sequences into 16-, 32-, or 64-bit unsigned integers. -type ByteOrder interface { - ReadUintN(b io.ByteReader, length uint8) (uint64, error) - ReadUint32(io.ByteReader) (uint32, error) - ReadUint16(io.ByteReader) (uint16, error) - - WriteUintN(b *bytes.Buffer, length uint8, value uint64) - WriteUint32(*bytes.Buffer, uint32) - WriteUint16(*bytes.Buffer, uint16) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go deleted file mode 100644 index eede9cd72..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/byteorder_big_endian.go +++ /dev/null @@ -1,74 +0,0 @@ -package utils - -import ( - "bytes" - "io" -) - -// BigEndian is the big-endian implementation of ByteOrder. -var BigEndian ByteOrder = bigEndian{} - -type bigEndian struct{} - -var _ ByteOrder = &bigEndian{} - -// ReadUintN reads N bytes -func (bigEndian) ReadUintN(b io.ByteReader, length uint8) (uint64, error) { - var res uint64 - for i := uint8(0); i < length; i++ { - bt, err := b.ReadByte() - if err != nil { - return 0, err - } - res ^= uint64(bt) << ((length - 1 - i) * 8) - } - return res, nil -} - -// ReadUint32 reads a uint32 -func (bigEndian) ReadUint32(b io.ByteReader) (uint32, error) { - var b1, b2, b3, b4 uint8 - var err error - if b4, err = b.ReadByte(); err != nil { - return 0, err - } - if b3, err = b.ReadByte(); err != nil { - return 0, err - } - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint32(b1) + uint32(b2)<<8 + uint32(b3)<<16 + uint32(b4)<<24, nil -} - -// ReadUint16 reads a uint16 -func (bigEndian) ReadUint16(b io.ByteReader) (uint16, error) { - var b1, b2 uint8 - var err error - if b2, err = b.ReadByte(); err != nil { - return 0, err - } - if b1, err = b.ReadByte(); err != nil { - return 0, err - } - return uint16(b1) + uint16(b2)<<8, nil -} - -func (bigEndian) WriteUintN(b *bytes.Buffer, length uint8, i uint64) { - for j := length; j > 0; j-- { - b.WriteByte(uint8(i >> (8 * (j - 1)))) - } -} - -// WriteUint32 writes a uint32 -func (bigEndian) WriteUint32(b *bytes.Buffer, i uint32) { - b.Write([]byte{uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i)}) -} - -// WriteUint16 writes a uint16 -func (bigEndian) WriteUint16(b *bytes.Buffer, i uint16) { - b.Write([]byte{uint8(i >> 8), uint8(i)}) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go deleted file mode 100644 index bb839be66..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/gen.go +++ /dev/null @@ -1,4 +0,0 @@ -package utils - -//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out byteinterval_linkedlist.go gen Item=ByteInterval -//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out packetinterval_linkedlist.go gen Item=PacketInterval diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/host.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/host.go deleted file mode 100644 index a1d6453b0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/host.go +++ /dev/null @@ -1,27 +0,0 @@ -package utils - -import ( - "net/url" - "strings" -) - -// HostnameFromAddr determines the hostname in an address string -func HostnameFromAddr(addr string) (string, error) { - p, err := url.Parse(addr) - if err != nil { - return "", err - } - h := p.Host - - // copied from https://golang.org/src/net/http/transport.go - if hasPort(h) { - h = h[:strings.LastIndex(h, ":")] - } - - return h, nil -} - -// copied from https://golang.org/src/net/http/http.go -func hasPort(s string) bool { - return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go deleted file mode 100644 index e27f01b4a..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/log.go +++ /dev/null @@ -1,131 +0,0 @@ -package utils - -import ( - "fmt" - "log" - "os" - "strings" - "time" -) - -// LogLevel of quic-go -type LogLevel uint8 - -const ( - // LogLevelNothing disables - LogLevelNothing LogLevel = iota - // LogLevelError enables err logs - LogLevelError - // LogLevelInfo enables info logs (e.g. packets) - LogLevelInfo - // LogLevelDebug enables debug logs (e.g. packet contents) - LogLevelDebug -) - -const logEnv = "QUIC_GO_LOG_LEVEL" - -// A Logger logs. -type Logger interface { - SetLogLevel(LogLevel) - SetLogTimeFormat(format string) - WithPrefix(prefix string) Logger - Debug() bool - - Errorf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Debugf(format string, args ...interface{}) -} - -// DefaultLogger is used by quic-go for logging. -var DefaultLogger Logger - -type defaultLogger struct { - prefix string - - logLevel LogLevel - timeFormat string -} - -var _ Logger = &defaultLogger{} - -// SetLogLevel sets the log level -func (l *defaultLogger) SetLogLevel(level LogLevel) { - l.logLevel = level -} - -// SetLogTimeFormat sets the format of the timestamp -// an empty string disables the logging of timestamps -func (l *defaultLogger) SetLogTimeFormat(format string) { - log.SetFlags(0) // disable timestamp logging done by the log package - l.timeFormat = format -} - -// Debugf logs something -func (l *defaultLogger) Debugf(format string, args ...interface{}) { - if l.logLevel == LogLevelDebug { - l.logMessage(format, args...) - } -} - -// Infof logs something -func (l *defaultLogger) Infof(format string, args ...interface{}) { - if l.logLevel >= LogLevelInfo { - l.logMessage(format, args...) - } -} - -// Errorf logs something -func (l *defaultLogger) Errorf(format string, args ...interface{}) { - if l.logLevel >= LogLevelError { - l.logMessage(format, args...) - } -} - -func (l *defaultLogger) logMessage(format string, args ...interface{}) { - var pre string - - if len(l.timeFormat) > 0 { - pre = time.Now().Format(l.timeFormat) + " " - } - if len(l.prefix) > 0 { - pre += l.prefix + " " - } - log.Printf(pre+format, args...) -} - -func (l *defaultLogger) WithPrefix(prefix string) Logger { - if len(l.prefix) > 0 { - prefix = l.prefix + " " + prefix - } - return &defaultLogger{ - logLevel: l.logLevel, - timeFormat: l.timeFormat, - prefix: prefix, - } -} - -// Debug returns true if the log level is LogLevelDebug -func (l *defaultLogger) Debug() bool { - return l.logLevel == LogLevelDebug -} - -func init() { - DefaultLogger = &defaultLogger{} - DefaultLogger.SetLogLevel(readLoggingEnv()) -} - -func readLoggingEnv() LogLevel { - switch strings.ToLower(os.Getenv(logEnv)) { - case "": - return LogLevelNothing - case "debug": - return LogLevelDebug - case "info": - return LogLevelInfo - case "error": - return LogLevelError - default: - fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/lucas-clemente/quic-go/wiki/Logging") - return LogLevelNothing - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go deleted file mode 100644 index 84cbec7b9..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/minmax.go +++ /dev/null @@ -1,159 +0,0 @@ -package utils - -import ( - "math" - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// InfDuration is a duration of infinite length -const InfDuration = time.Duration(math.MaxInt64) - -// Max returns the maximum of two Ints -func Max(a, b int) int { - if a < b { - return b - } - return a -} - -// MaxUint32 returns the maximum of two uint32 -func MaxUint32(a, b uint32) uint32 { - if a < b { - return b - } - return a -} - -// MaxUint64 returns the maximum of two uint64 -func MaxUint64(a, b uint64) uint64 { - if a < b { - return b - } - return a -} - -// MinUint64 returns the maximum of two uint64 -func MinUint64(a, b uint64) uint64 { - if a < b { - return a - } - return b -} - -// Min returns the minimum of two Ints -func Min(a, b int) int { - if a < b { - return a - } - return b -} - -// MinUint32 returns the maximum of two uint32 -func MinUint32(a, b uint32) uint32 { - if a < b { - return a - } - return b -} - -// MinInt64 returns the minimum of two int64 -func MinInt64(a, b int64) int64 { - if a < b { - return a - } - return b -} - -// MaxInt64 returns the minimum of two int64 -func MaxInt64(a, b int64) int64 { - if a > b { - return a - } - return b -} - -// MinByteCount returns the minimum of two ByteCounts -func MinByteCount(a, b protocol.ByteCount) protocol.ByteCount { - if a < b { - return a - } - return b -} - -// MaxByteCount returns the maximum of two ByteCounts -func MaxByteCount(a, b protocol.ByteCount) protocol.ByteCount { - if a < b { - return b - } - return a -} - -// MaxDuration returns the max duration -func MaxDuration(a, b time.Duration) time.Duration { - if a > b { - return a - } - return b -} - -// MinDuration returns the minimum duration -func MinDuration(a, b time.Duration) time.Duration { - if a > b { - return b - } - return a -} - -// AbsDuration returns the absolute value of a time duration -func AbsDuration(d time.Duration) time.Duration { - if d >= 0 { - return d - } - return -d -} - -// MinTime returns the earlier time -func MinTime(a, b time.Time) time.Time { - if a.After(b) { - return b - } - return a -} - -// MinNonZeroTime returns the earlist time that is not time.Time{} -// If both a and b are time.Time{}, it returns time.Time{} -func MinNonZeroTime(a, b time.Time) time.Time { - if a.IsZero() { - return b - } - if b.IsZero() { - return a - } - return MinTime(a, b) -} - -// MaxTime returns the later time -func MaxTime(a, b time.Time) time.Time { - if a.After(b) { - return a - } - return b -} - -// MaxPacketNumber returns the max packet number -func MaxPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber { - if a > b { - return a - } - return b -} - -// MinPacketNumber returns the min packet number -func MinPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go deleted file mode 100644 index 62cc8b9cb..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packet_interval.go +++ /dev/null @@ -1,9 +0,0 @@ -package utils - -import "github.com/lucas-clemente/quic-go/internal/protocol" - -// PacketInterval is an interval from one PacketNumber to the other -type PacketInterval struct { - Start protocol.PacketNumber - End protocol.PacketNumber -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go deleted file mode 100644 index b461e85a9..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go +++ /dev/null @@ -1,217 +0,0 @@ -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package utils - -// Linked list implementation from the Go standard library. - -// PacketIntervalElement is an element of a linked list. -type PacketIntervalElement struct { - // Next and previous pointers in the doubly-linked list of elements. - // To simplify the implementation, internally a list l is implemented - // as a ring, such that &l.root is both the next element of the last - // list element (l.Back()) and the previous element of the first list - // element (l.Front()). - next, prev *PacketIntervalElement - - // The list to which this element belongs. - list *PacketIntervalList - - // The value stored with this element. - Value PacketInterval -} - -// Next returns the next list element or nil. -func (e *PacketIntervalElement) Next() *PacketIntervalElement { - if p := e.next; e.list != nil && p != &e.list.root { - return p - } - return nil -} - -// Prev returns the previous list element or nil. -func (e *PacketIntervalElement) Prev() *PacketIntervalElement { - if p := e.prev; e.list != nil && p != &e.list.root { - return p - } - return nil -} - -// PacketIntervalList is a linked list of PacketIntervals. -type PacketIntervalList struct { - root PacketIntervalElement // sentinel list element, only &root, root.prev, and root.next are used - len int // current list length excluding (this) sentinel element -} - -// Init initializes or clears list l. -func (l *PacketIntervalList) Init() *PacketIntervalList { - l.root.next = &l.root - l.root.prev = &l.root - l.len = 0 - return l -} - -// NewPacketIntervalList returns an initialized list. -func NewPacketIntervalList() *PacketIntervalList { return new(PacketIntervalList).Init() } - -// Len returns the number of elements of list l. -// The complexity is O(1). -func (l *PacketIntervalList) Len() int { return l.len } - -// Front returns the first element of list l or nil if the list is empty. -func (l *PacketIntervalList) Front() *PacketIntervalElement { - if l.len == 0 { - return nil - } - return l.root.next -} - -// Back returns the last element of list l or nil if the list is empty. -func (l *PacketIntervalList) Back() *PacketIntervalElement { - if l.len == 0 { - return nil - } - return l.root.prev -} - -// lazyInit lazily initializes a zero List value. -func (l *PacketIntervalList) lazyInit() { - if l.root.next == nil { - l.Init() - } -} - -// insert inserts e after at, increments l.len, and returns e. -func (l *PacketIntervalList) insert(e, at *PacketIntervalElement) *PacketIntervalElement { - n := at.next - at.next = e - e.prev = at - e.next = n - n.prev = e - e.list = l - l.len++ - return e -} - -// insertValue is a convenience wrapper for insert(&Element{Value: v}, at). -func (l *PacketIntervalList) insertValue(v PacketInterval, at *PacketIntervalElement) *PacketIntervalElement { - return l.insert(&PacketIntervalElement{Value: v}, at) -} - -// remove removes e from its list, decrements l.len, and returns e. -func (l *PacketIntervalList) remove(e *PacketIntervalElement) *PacketIntervalElement { - e.prev.next = e.next - e.next.prev = e.prev - e.next = nil // avoid memory leaks - e.prev = nil // avoid memory leaks - e.list = nil - l.len-- - return e -} - -// Remove removes e from l if e is an element of list l. -// It returns the element value e.Value. -// The element must not be nil. -func (l *PacketIntervalList) Remove(e *PacketIntervalElement) PacketInterval { - if e.list == l { - // if e.list == l, l must have been initialized when e was inserted - // in l or l == nil (e is a zero Element) and l.remove will crash - l.remove(e) - } - return e.Value -} - -// PushFront inserts a new element e with value v at the front of list l and returns e. -func (l *PacketIntervalList) PushFront(v PacketInterval) *PacketIntervalElement { - l.lazyInit() - return l.insertValue(v, &l.root) -} - -// PushBack inserts a new element e with value v at the back of list l and returns e. -func (l *PacketIntervalList) PushBack(v PacketInterval) *PacketIntervalElement { - l.lazyInit() - return l.insertValue(v, l.root.prev) -} - -// InsertBefore inserts a new element e with value v immediately before mark and returns e. -// If mark is not an element of l, the list is not modified. -// The mark must not be nil. -func (l *PacketIntervalList) InsertBefore(v PacketInterval, mark *PacketIntervalElement) *PacketIntervalElement { - if mark.list != l { - return nil - } - // see comment in List.Remove about initialization of l - return l.insertValue(v, mark.prev) -} - -// InsertAfter inserts a new element e with value v immediately after mark and returns e. -// If mark is not an element of l, the list is not modified. -// The mark must not be nil. -func (l *PacketIntervalList) InsertAfter(v PacketInterval, mark *PacketIntervalElement) *PacketIntervalElement { - if mark.list != l { - return nil - } - // see comment in List.Remove about initialization of l - return l.insertValue(v, mark) -} - -// MoveToFront moves element e to the front of list l. -// If e is not an element of l, the list is not modified. -// The element must not be nil. -func (l *PacketIntervalList) MoveToFront(e *PacketIntervalElement) { - if e.list != l || l.root.next == e { - return - } - // see comment in List.Remove about initialization of l - l.insert(l.remove(e), &l.root) -} - -// MoveToBack moves element e to the back of list l. -// If e is not an element of l, the list is not modified. -// The element must not be nil. -func (l *PacketIntervalList) MoveToBack(e *PacketIntervalElement) { - if e.list != l || l.root.prev == e { - return - } - // see comment in List.Remove about initialization of l - l.insert(l.remove(e), l.root.prev) -} - -// MoveBefore moves element e to its new position before mark. -// If e or mark is not an element of l, or e == mark, the list is not modified. -// The element and mark must not be nil. -func (l *PacketIntervalList) MoveBefore(e, mark *PacketIntervalElement) { - if e.list != l || e == mark || mark.list != l { - return - } - l.insert(l.remove(e), mark.prev) -} - -// MoveAfter moves element e to its new position after mark. -// If e or mark is not an element of l, or e == mark, the list is not modified. -// The element and mark must not be nil. -func (l *PacketIntervalList) MoveAfter(e, mark *PacketIntervalElement) { - if e.list != l || e == mark || mark.list != l { - return - } - l.insert(l.remove(e), mark) -} - -// PushBackList inserts a copy of an other list at the back of list l. -// The lists l and other may be the same. They must not be nil. -func (l *PacketIntervalList) PushBackList(other *PacketIntervalList) { - l.lazyInit() - for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() { - l.insertValue(e.Value, l.root.prev) - } -} - -// PushFrontList inserts a copy of an other list at the front of list l. -// The lists l and other may be the same. They must not be nil. -func (l *PacketIntervalList) PushFrontList(other *PacketIntervalList) { - l.lazyInit() - for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() { - l.insertValue(e.Value, &l.root) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go deleted file mode 100644 index ec16d251b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go +++ /dev/null @@ -1,9 +0,0 @@ -package utils - -import "github.com/lucas-clemente/quic-go/internal/protocol" - -// ByteInterval is an interval from one ByteCount to the other -type ByteInterval struct { - Start protocol.ByteCount - End protocol.ByteCount -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go deleted file mode 100644 index 1fefc6ec8..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/timer.go +++ /dev/null @@ -1,48 +0,0 @@ -package utils - -import ( - "math" - "time" -) - -// A Timer wrapper that behaves correctly when resetting -type Timer struct { - t *time.Timer - read bool - deadline time.Time -} - -// NewTimer creates a new timer that is not set -func NewTimer() *Timer { - return &Timer{t: time.NewTimer(time.Duration(math.MaxInt64))} -} - -// Chan returns the channel of the wrapped timer -func (t *Timer) Chan() <-chan time.Time { - return t.t.C -} - -// Reset the timer, no matter whether the value was read or not -func (t *Timer) Reset(deadline time.Time) { - if deadline.Equal(t.deadline) && !t.read { - // No need to reset the timer - return - } - - // We need to drain the timer if the value from its channel was not read yet. - // See https://groups.google.com/forum/#!topic/golang-dev/c9UUfASVPoU - if !t.t.Stop() && !t.read { - <-t.t.C - } - if !deadline.IsZero() { - t.t.Reset(time.Until(deadline)) - } - - t.read = false - t.deadline = deadline -} - -// SetRead should be called after the value from the chan was read -func (t *Timer) SetRead() { - t.read = true -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/utils/varint.go b/vendor/github.com/lucas-clemente/quic-go/internal/utils/varint.go deleted file mode 100644 index 35e8674e2..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/utils/varint.go +++ /dev/null @@ -1,101 +0,0 @@ -package utils - -import ( - "bytes" - "fmt" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// taken from the QUIC draft -const ( - maxVarInt1 = 63 - maxVarInt2 = 16383 - maxVarInt4 = 1073741823 - maxVarInt8 = 4611686018427387903 -) - -// ReadVarInt reads a number in the QUIC varint format -func ReadVarInt(b io.ByteReader) (uint64, error) { - firstByte, err := b.ReadByte() - if err != nil { - return 0, err - } - // the first two bits of the first byte encode the length - len := 1 << ((firstByte & 0xc0) >> 6) - b1 := firstByte & (0xff - 0xc0) - if len == 1 { - return uint64(b1), nil - } - b2, err := b.ReadByte() - if err != nil { - return 0, err - } - if len == 2 { - return uint64(b2) + uint64(b1)<<8, nil - } - b3, err := b.ReadByte() - if err != nil { - return 0, err - } - b4, err := b.ReadByte() - if err != nil { - return 0, err - } - if len == 4 { - return uint64(b4) + uint64(b3)<<8 + uint64(b2)<<16 + uint64(b1)<<24, nil - } - b5, err := b.ReadByte() - if err != nil { - return 0, err - } - b6, err := b.ReadByte() - if err != nil { - return 0, err - } - b7, err := b.ReadByte() - if err != nil { - return 0, err - } - b8, err := b.ReadByte() - if err != nil { - return 0, err - } - return uint64(b8) + uint64(b7)<<8 + uint64(b6)<<16 + uint64(b5)<<24 + uint64(b4)<<32 + uint64(b3)<<40 + uint64(b2)<<48 + uint64(b1)<<56, nil -} - -// WriteVarInt writes a number in the QUIC varint format -func WriteVarInt(b *bytes.Buffer, i uint64) { - if i <= maxVarInt1 { - b.WriteByte(uint8(i)) - } else if i <= maxVarInt2 { - b.Write([]byte{uint8(i>>8) | 0x40, uint8(i)}) - } else if i <= maxVarInt4 { - b.Write([]byte{uint8(i>>24) | 0x80, uint8(i >> 16), uint8(i >> 8), uint8(i)}) - } else if i <= maxVarInt8 { - b.Write([]byte{ - uint8(i>>56) | 0xc0, uint8(i >> 48), uint8(i >> 40), uint8(i >> 32), - uint8(i >> 24), uint8(i >> 16), uint8(i >> 8), uint8(i), - }) - } else { - panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i)) - } -} - -// VarIntLen determines the number of bytes that will be needed to write a number -func VarIntLen(i uint64) protocol.ByteCount { - if i <= maxVarInt1 { - return 1 - } - if i <= maxVarInt2 { - return 2 - } - if i <= maxVarInt4 { - return 4 - } - if i <= maxVarInt8 { - return 8 - } - panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go deleted file mode 100644 index 7909bffa8..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_frame.go +++ /dev/null @@ -1,226 +0,0 @@ -package wire - -import ( - "bytes" - "errors" - "sort" - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -var errInvalidAckRanges = errors.New("AckFrame: ACK frame contains invalid ACK ranges") - -// An AckFrame is an ACK frame -type AckFrame struct { - AckRanges []AckRange // has to be ordered. The highest ACK range goes first, the lowest ACK range goes last - DelayTime time.Duration -} - -// parseAckFrame reads an ACK frame -func parseAckFrame(r *bytes.Reader, ackDelayExponent uint8, version protocol.VersionNumber) (*AckFrame, error) { - typeByte, err := r.ReadByte() - if err != nil { - return nil, err - } - ecn := typeByte&0x1 > 0 - - frame := &AckFrame{} - - la, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - largestAcked := protocol.PacketNumber(la) - delay, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - frame.DelayTime = time.Duration(delay*1< largestAcked { - return nil, errors.New("invalid first ACK range") - } - smallest := largestAcked - ackBlock - - // read all the other ACK ranges - frame.AckRanges = append(frame.AckRanges, AckRange{Smallest: smallest, Largest: largestAcked}) - for i := uint64(0); i < numBlocks; i++ { - g, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - gap := protocol.PacketNumber(g) - if smallest < gap+2 { - return nil, errInvalidAckRanges - } - largest := smallest - gap - 2 - - ab, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - ackBlock := protocol.PacketNumber(ab) - - if ackBlock > largest { - return nil, errInvalidAckRanges - } - smallest = largest - ackBlock - frame.AckRanges = append(frame.AckRanges, AckRange{Smallest: smallest, Largest: largest}) - } - - if !frame.validateAckRanges() { - return nil, errInvalidAckRanges - } - - // parse (and skip) the ECN section - if ecn { - for i := 0; i < 3; i++ { - if _, err := utils.ReadVarInt(r); err != nil { - return nil, err - } - } - } - - return frame, nil -} - -// Write writes an ACK frame. -func (f *AckFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x2) - utils.WriteVarInt(b, uint64(f.LargestAcked())) - utils.WriteVarInt(b, encodeAckDelay(f.DelayTime)) - - numRanges := f.numEncodableAckRanges() - utils.WriteVarInt(b, uint64(numRanges-1)) - - // write the first range - _, firstRange := f.encodeAckRange(0) - utils.WriteVarInt(b, firstRange) - - // write all the other range - for i := 1; i < numRanges; i++ { - gap, len := f.encodeAckRange(i) - utils.WriteVarInt(b, gap) - utils.WriteVarInt(b, len) - } - return nil -} - -// Length of a written frame -func (f *AckFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - largestAcked := f.AckRanges[0].Largest - numRanges := f.numEncodableAckRanges() - - length := 1 + utils.VarIntLen(uint64(largestAcked)) + utils.VarIntLen(encodeAckDelay(f.DelayTime)) - - length += utils.VarIntLen(uint64(numRanges - 1)) - lowestInFirstRange := f.AckRanges[0].Smallest - length += utils.VarIntLen(uint64(largestAcked - lowestInFirstRange)) - - for i := 1; i < numRanges; i++ { - gap, len := f.encodeAckRange(i) - length += utils.VarIntLen(gap) - length += utils.VarIntLen(len) - } - return length -} - -// gets the number of ACK ranges that can be encoded -// such that the resulting frame is smaller than the maximum ACK frame size -func (f *AckFrame) numEncodableAckRanges() int { - length := 1 + utils.VarIntLen(uint64(f.LargestAcked())) + utils.VarIntLen(encodeAckDelay(f.DelayTime)) - length += 2 // assume that the number of ranges will consume 2 bytes - for i := 1; i < len(f.AckRanges); i++ { - gap, len := f.encodeAckRange(i) - rangeLen := utils.VarIntLen(gap) + utils.VarIntLen(len) - if length+rangeLen > protocol.MaxAckFrameSize { - // Writing range i would exceed the MaxAckFrameSize. - // So encode one range less than that. - return i - 1 - } - length += rangeLen - } - return len(f.AckRanges) -} - -func (f *AckFrame) encodeAckRange(i int) (uint64 /* gap */, uint64 /* length */) { - if i == 0 { - return 0, uint64(f.AckRanges[0].Largest - f.AckRanges[0].Smallest) - } - return uint64(f.AckRanges[i-1].Smallest - f.AckRanges[i].Largest - 2), - uint64(f.AckRanges[i].Largest - f.AckRanges[i].Smallest) -} - -// HasMissingRanges returns if this frame reports any missing packets -func (f *AckFrame) HasMissingRanges() bool { - return len(f.AckRanges) > 1 -} - -func (f *AckFrame) validateAckRanges() bool { - if len(f.AckRanges) == 0 { - return false - } - - // check the validity of every single ACK range - for _, ackRange := range f.AckRanges { - if ackRange.Smallest > ackRange.Largest { - return false - } - } - - // check the consistency for ACK with multiple NACK ranges - for i, ackRange := range f.AckRanges { - if i == 0 { - continue - } - lastAckRange := f.AckRanges[i-1] - if lastAckRange.Smallest <= ackRange.Smallest { - return false - } - if lastAckRange.Smallest <= ackRange.Largest+1 { - return false - } - } - - return true -} - -// LargestAcked is the largest acked packet number -func (f *AckFrame) LargestAcked() protocol.PacketNumber { - return f.AckRanges[0].Largest -} - -// LowestAcked is the lowest acked packet number -func (f *AckFrame) LowestAcked() protocol.PacketNumber { - return f.AckRanges[len(f.AckRanges)-1].Smallest -} - -// AcksPacket determines if this ACK frame acks a certain packet number -func (f *AckFrame) AcksPacket(p protocol.PacketNumber) bool { - if p < f.LowestAcked() || p > f.LargestAcked() { - return false - } - - i := sort.Search(len(f.AckRanges), func(i int) bool { - return p >= f.AckRanges[i].Smallest - }) - // i will always be < len(f.AckRanges), since we checked above that p is not bigger than the largest acked - return p <= f.AckRanges[i].Largest -} - -func encodeAckDelay(delay time.Duration) uint64 { - return uint64(delay.Nanoseconds() / (1000 * (1 << protocol.AckDelayExponent))) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go deleted file mode 100644 index 0f4185801..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ack_range.go +++ /dev/null @@ -1,14 +0,0 @@ -package wire - -import "github.com/lucas-clemente/quic-go/internal/protocol" - -// AckRange is an ACK range -type AckRange struct { - Smallest protocol.PacketNumber - Largest protocol.PacketNumber -} - -// Len returns the number of packets contained in this ACK range -func (r AckRange) Len() protocol.PacketNumber { - return r.Largest - r.Smallest + 1 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go deleted file mode 100644 index 60378a5d8..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/connection_close_frame.go +++ /dev/null @@ -1,81 +0,0 @@ -package wire - -import ( - "bytes" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A ConnectionCloseFrame is a CONNECTION_CLOSE frame -type ConnectionCloseFrame struct { - IsApplicationError bool - ErrorCode qerr.ErrorCode - ReasonPhrase string -} - -func parseConnectionCloseFrame(r *bytes.Reader, version protocol.VersionNumber) (*ConnectionCloseFrame, error) { - typeByte, err := r.ReadByte() - if err != nil { - return nil, err - } - - f := &ConnectionCloseFrame{IsApplicationError: typeByte == 0x1d} - ec, err := utils.BigEndian.ReadUint16(r) - if err != nil { - return nil, err - } - f.ErrorCode = qerr.ErrorCode(ec) - // read the Frame Type, if this is not an application error - if !f.IsApplicationError { - if _, err := utils.ReadVarInt(r); err != nil { - return nil, err - } - } - var reasonPhraseLen uint64 - reasonPhraseLen, err = utils.ReadVarInt(r) - if err != nil { - return nil, err - } - // shortcut to prevent the unnecessary allocation of dataLen bytes - // if the dataLen is larger than the remaining length of the packet - // reading the whole reason phrase would result in EOF when attempting to READ - if int(reasonPhraseLen) > r.Len() { - return nil, io.EOF - } - - reasonPhrase := make([]byte, reasonPhraseLen) - if _, err := io.ReadFull(r, reasonPhrase); err != nil { - // this should never happen, since we already checked the reasonPhraseLen earlier - return nil, err - } - f.ReasonPhrase = string(reasonPhrase) - return f, nil -} - -// Length of a written frame -func (f *ConnectionCloseFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - length := 1 + 2 + utils.VarIntLen(uint64(len(f.ReasonPhrase))) + protocol.ByteCount(len(f.ReasonPhrase)) - if !f.IsApplicationError { - length++ // for the frame type - } - return length -} - -func (f *ConnectionCloseFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - if f.IsApplicationError { - b.WriteByte(0x1d) - } else { - b.WriteByte(0x1c) - } - - utils.BigEndian.WriteUint16(b, uint16(f.ErrorCode)) - if !f.IsApplicationError { - utils.WriteVarInt(b, 0) - } - utils.WriteVarInt(b, uint64(len(f.ReasonPhrase))) - b.WriteString(f.ReasonPhrase) - return nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go deleted file mode 100644 index eeafea974..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/crypto_frame.go +++ /dev/null @@ -1,71 +0,0 @@ -package wire - -import ( - "bytes" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A CryptoFrame is a CRYPTO frame -type CryptoFrame struct { - Offset protocol.ByteCount - Data []byte -} - -func parseCryptoFrame(r *bytes.Reader, _ protocol.VersionNumber) (*CryptoFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - - frame := &CryptoFrame{} - offset, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - frame.Offset = protocol.ByteCount(offset) - dataLen, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - if dataLen > uint64(r.Len()) { - return nil, io.EOF - } - if dataLen != 0 { - frame.Data = make([]byte, dataLen) - if _, err := io.ReadFull(r, frame.Data); err != nil { - // this should never happen, since we already checked the dataLen earlier - return nil, err - } - } - return frame, nil -} - -func (f *CryptoFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x6) - utils.WriteVarInt(b, uint64(f.Offset)) - utils.WriteVarInt(b, uint64(len(f.Data))) - b.Write(f.Data) - return nil -} - -// Length of a written frame -func (f *CryptoFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(f.Offset)) + utils.VarIntLen(uint64(len(f.Data))) + protocol.ByteCount(len(f.Data)) -} - -// MaxDataLen returns the maximum data length -func (f *CryptoFrame) MaxDataLen(maxSize protocol.ByteCount) protocol.ByteCount { - // pretend that the data size will be 1 bytes - // if it turns out that varint encoding the length will consume 2 bytes, we need to adjust the data length afterwards - headerLen := 1 + utils.VarIntLen(uint64(f.Offset)) + 1 - if headerLen > maxSize { - return 0 - } - maxDataLen := maxSize - headerLen - if utils.VarIntLen(uint64(maxDataLen)) != 1 { - maxDataLen-- - } - return maxDataLen -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go deleted file mode 100644 index 91c05ccfb..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/data_blocked_frame.go +++ /dev/null @@ -1,38 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A DataBlockedFrame is a DATA_BLOCKED frame -type DataBlockedFrame struct { - DataLimit protocol.ByteCount -} - -func parseDataBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*DataBlockedFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - offset, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - return &DataBlockedFrame{ - DataLimit: protocol.ByteCount(offset), - }, nil -} - -func (f *DataBlockedFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - typeByte := uint8(0x14) - b.WriteByte(typeByte) - utils.WriteVarInt(b, uint64(f.DataLimit)) - return nil -} - -// Length of a written frame -func (f *DataBlockedFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(f.DataLimit)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go deleted file mode 100644 index 19a0b064e..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/extended_header.go +++ /dev/null @@ -1,204 +0,0 @@ -package wire - -import ( - "bytes" - "errors" - "fmt" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// ExtendedHeader is the header of a QUIC packet. -type ExtendedHeader struct { - Header - - typeByte byte - - PacketNumberLen protocol.PacketNumberLen - PacketNumber protocol.PacketNumber - - KeyPhase int -} - -func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (*ExtendedHeader, error) { - // read the (now unencrypted) first byte - var err error - h.typeByte, err = b.ReadByte() - if err != nil { - return nil, err - } - if _, err := b.Seek(int64(h.ParsedLen())-1, io.SeekCurrent); err != nil { - return nil, err - } - if h.IsLongHeader { - return h.parseLongHeader(b, v) - } - return h.parseShortHeader(b, v) -} - -func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, v protocol.VersionNumber) (*ExtendedHeader, error) { - if h.typeByte&0xc != 0 { - return nil, errors.New("5th and 6th bit must be 0") - } - if err := h.readPacketNumber(b); err != nil { - return nil, err - } - return h, nil -} - -func (h *ExtendedHeader) parseShortHeader(b *bytes.Reader, v protocol.VersionNumber) (*ExtendedHeader, error) { - if h.typeByte&0x18 != 0 { - return nil, errors.New("4th and 5th bit must be 0") - } - - h.KeyPhase = int(h.typeByte&0x4) >> 2 - - if err := h.readPacketNumber(b); err != nil { - return nil, err - } - return h, nil -} - -func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error { - h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1 - pn, err := utils.BigEndian.ReadUintN(b, uint8(h.PacketNumberLen)) - if err != nil { - return err - } - h.PacketNumber = protocol.PacketNumber(pn) - return nil -} - -// Write writes the Header. -func (h *ExtendedHeader) Write(b *bytes.Buffer, ver protocol.VersionNumber) error { - if h.IsLongHeader { - return h.writeLongHeader(b, ver) - } - return h.writeShortHeader(b, ver) -} - -func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, v protocol.VersionNumber) error { - var packetType uint8 - switch h.Type { - case protocol.PacketTypeInitial: - packetType = 0x0 - case protocol.PacketType0RTT: - packetType = 0x1 - case protocol.PacketTypeHandshake: - packetType = 0x2 - case protocol.PacketTypeRetry: - packetType = 0x3 - } - firstByte := 0xc0 | packetType<<4 - if h.Type == protocol.PacketTypeRetry { - odcil, err := encodeSingleConnIDLen(h.OrigDestConnectionID) - if err != nil { - return err - } - firstByte |= odcil - } else { // Retry packets don't have a packet number - firstByte |= uint8(h.PacketNumberLen - 1) - } - - b.WriteByte(firstByte) - utils.BigEndian.WriteUint32(b, uint32(h.Version)) - connIDLen, err := encodeConnIDLen(h.DestConnectionID, h.SrcConnectionID) - if err != nil { - return err - } - b.WriteByte(connIDLen) - b.Write(h.DestConnectionID.Bytes()) - b.Write(h.SrcConnectionID.Bytes()) - - switch h.Type { - case protocol.PacketTypeRetry: - b.Write(h.OrigDestConnectionID.Bytes()) - b.Write(h.Token) - return nil - case protocol.PacketTypeInitial: - utils.WriteVarInt(b, uint64(len(h.Token))) - b.Write(h.Token) - } - - utils.WriteVarInt(b, uint64(h.Length)) - return h.writePacketNumber(b) -} - -// TODO: add support for the key phase -func (h *ExtendedHeader) writeShortHeader(b *bytes.Buffer, v protocol.VersionNumber) error { - typeByte := 0x40 | uint8(h.PacketNumberLen-1) - typeByte |= byte(h.KeyPhase << 2) - - b.WriteByte(typeByte) - b.Write(h.DestConnectionID.Bytes()) - return h.writePacketNumber(b) -} - -func (h *ExtendedHeader) writePacketNumber(b *bytes.Buffer) error { - if h.PacketNumberLen == protocol.PacketNumberLenInvalid || h.PacketNumberLen > protocol.PacketNumberLen4 { - return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen) - } - utils.BigEndian.WriteUintN(b, uint8(h.PacketNumberLen), uint64(h.PacketNumber)) - return nil -} - -// GetLength determines the length of the Header. -func (h *ExtendedHeader) GetLength(v protocol.VersionNumber) protocol.ByteCount { - if h.IsLongHeader { - length := 1 /* type byte */ + 4 /* version */ + 1 /* conn id len byte */ + protocol.ByteCount(h.DestConnectionID.Len()+h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + utils.VarIntLen(uint64(h.Length)) - if h.Type == protocol.PacketTypeInitial { - length += utils.VarIntLen(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token)) - } - return length - } - - length := protocol.ByteCount(1 /* type byte */ + h.DestConnectionID.Len()) - length += protocol.ByteCount(h.PacketNumberLen) - return length -} - -// Log logs the Header -func (h *ExtendedHeader) Log(logger utils.Logger) { - if h.IsLongHeader { - var token string - if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry { - if len(h.Token) == 0 { - token = "Token: (empty), " - } else { - token = fmt.Sprintf("Token: %#x, ", h.Token) - } - if h.Type == protocol.PacketTypeRetry { - logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sOrigDestConnectionID: %s, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.OrigDestConnectionID, h.Version) - return - } - } - logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %#x, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version) - } else { - logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %#x, PacketNumberLen: %d, KeyPhase: %d}", h.DestConnectionID, h.PacketNumber, h.PacketNumberLen, h.KeyPhase) - } -} - -func encodeConnIDLen(dest, src protocol.ConnectionID) (byte, error) { - dcil, err := encodeSingleConnIDLen(dest) - if err != nil { - return 0, err - } - scil, err := encodeSingleConnIDLen(src) - if err != nil { - return 0, err - } - return scil | dcil<<4, nil -} - -func encodeSingleConnIDLen(id protocol.ConnectionID) (byte, error) { - len := id.Len() - if len == 0 { - return 0, nil - } - if len < 4 || len > 18 { - return 0, fmt.Errorf("invalid connection ID length: %d bytes", len) - } - return byte(len - 3), nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go deleted file mode 100644 index e9d3d6a59..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/frame_parser.go +++ /dev/null @@ -1,97 +0,0 @@ -package wire - -import ( - "bytes" - "fmt" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" -) - -type frameParser struct { - ackDelayExponent uint8 - - version protocol.VersionNumber -} - -// NewFrameParser creates a new frame parser. -func NewFrameParser(v protocol.VersionNumber) FrameParser { - return &frameParser{version: v} -} - -// ParseNextFrame parses the next frame -// It skips PADDING frames. -func (p *frameParser) ParseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) { - for r.Len() != 0 { - typeByte, _ := r.ReadByte() - if typeByte == 0x0 { // PADDING frame - continue - } - r.UnreadByte() - - return p.parseFrame(r, typeByte, encLevel) - } - return nil, nil -} - -func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel) (Frame, error) { - var frame Frame - var err error - if typeByte&0xf8 == 0x8 { - frame, err = parseStreamFrame(r, p.version) - if err != nil { - return nil, qerr.Error(qerr.FrameEncodingError, err.Error()) - } - return frame, nil - } - switch typeByte { - case 0x1: - frame, err = parsePingFrame(r, p.version) - case 0x2, 0x3: - ackDelayExponent := p.ackDelayExponent - if encLevel != protocol.Encryption1RTT { - ackDelayExponent = protocol.DefaultAckDelayExponent - } - frame, err = parseAckFrame(r, ackDelayExponent, p.version) - case 0x4: - frame, err = parseResetStreamFrame(r, p.version) - case 0x5: - frame, err = parseStopSendingFrame(r, p.version) - case 0x6: - frame, err = parseCryptoFrame(r, p.version) - case 0x7: - frame, err = parseNewTokenFrame(r, p.version) - case 0x10: - frame, err = parseMaxDataFrame(r, p.version) - case 0x11: - frame, err = parseMaxStreamDataFrame(r, p.version) - case 0x12, 0x13: - frame, err = parseMaxStreamsFrame(r, p.version) - case 0x14: - frame, err = parseDataBlockedFrame(r, p.version) - case 0x15: - frame, err = parseStreamDataBlockedFrame(r, p.version) - case 0x16, 0x17: - frame, err = parseStreamsBlockedFrame(r, p.version) - case 0x18: - frame, err = parseNewConnectionIDFrame(r, p.version) - case 0x19: - frame, err = parseRetireConnectionIDFrame(r, p.version) - case 0x1a: - frame, err = parsePathChallengeFrame(r, p.version) - case 0x1b: - frame, err = parsePathResponseFrame(r, p.version) - case 0x1c, 0x1d: - frame, err = parseConnectionCloseFrame(r, p.version) - default: - err = fmt.Errorf("unknown type byte 0x%x", typeByte) - } - if err != nil { - return nil, qerr.Error(qerr.FrameEncodingError, err.Error()) - } - return frame, nil -} - -func (p *frameParser) SetAckDelayExponent(exp uint8) { - p.ackDelayExponent = exp -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go deleted file mode 100644 index 21eb784c3..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/header.go +++ /dev/null @@ -1,251 +0,0 @@ -package wire - -import ( - "bytes" - "errors" - "fmt" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// ParseConnectionID parses the destination connection ID of a packet. -// It uses the data slice for the connection ID. -// That means that the connection ID must not be used after the packet buffer is released. -func ParseConnectionID(data []byte, shortHeaderConnIDLen int) (protocol.ConnectionID, error) { - if len(data) == 0 { - return nil, io.EOF - } - isLongHeader := data[0]&0x80 > 0 - if !isLongHeader { - if len(data) < shortHeaderConnIDLen+1 { - return nil, io.EOF - } - return protocol.ConnectionID(data[1 : 1+shortHeaderConnIDLen]), nil - } - if len(data) < 6 { - return nil, io.EOF - } - destConnIDLen, _ := decodeConnIDLen(data[5]) - if len(data) < 6+destConnIDLen { - return nil, io.EOF - } - return protocol.ConnectionID(data[6 : 6+destConnIDLen]), nil -} - -// IsVersionNegotiationPacket says if this is a version negotiation packet -func IsVersionNegotiationPacket(b []byte) bool { - if len(b) < 5 { - return false - } - return b[0]&0x80 > 0 && b[1] == 0 && b[2] == 0 && b[3] == 0 && b[4] == 0 -} - -var errUnsupportedVersion = errors.New("unsupported version") - -// The Header is the version independent part of the header -type Header struct { - Version protocol.VersionNumber - SrcConnectionID protocol.ConnectionID - DestConnectionID protocol.ConnectionID - - IsLongHeader bool - Type protocol.PacketType - Length protocol.ByteCount - - Token []byte - SupportedVersions []protocol.VersionNumber // sent in a Version Negotiation Packet - OrigDestConnectionID protocol.ConnectionID // sent in the Retry packet - - typeByte byte - parsedLen protocol.ByteCount // how many bytes were read while parsing this header -} - -// ParsePacket parses a packet. -// If the packet has a long header, the packet is cut according to the length field. -// If we understand the version, the packet is header up unto the packet number. -// Otherwise, only the invariant part of the header is parsed. -func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* packet data */, []byte /* rest */, error) { - hdr, err := parseHeader(bytes.NewReader(data), shortHeaderConnIDLen) - if err != nil { - if err == errUnsupportedVersion { - return hdr, nil, nil, nil - } - return nil, nil, nil, err - } - var rest []byte - if hdr.IsLongHeader { - if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length { - return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length) - } - packetLen := int(hdr.ParsedLen() + hdr.Length) - rest = data[packetLen:] - data = data[:packetLen] - } - return hdr, data, rest, nil -} - -// ParseHeader parses the header. -// For short header packets: up to the packet number. -// For long header packets: -// * if we understand the version: up to the packet number -// * if not, only the invariant part of the header -func parseHeader(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) { - startLen := b.Len() - h, err := parseHeaderImpl(b, shortHeaderConnIDLen) - if err != nil { - return h, err - } - h.parsedLen = protocol.ByteCount(startLen - b.Len()) - return h, err -} - -func parseHeaderImpl(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) { - typeByte, err := b.ReadByte() - if err != nil { - return nil, err - } - - h := &Header{ - typeByte: typeByte, - IsLongHeader: typeByte&0x80 > 0, - } - - if !h.IsLongHeader { - if h.typeByte&0x40 == 0 { - return nil, errors.New("not a QUIC packet") - } - if err := h.parseShortHeader(b, shortHeaderConnIDLen); err != nil { - return nil, err - } - return h, nil - } - return h, h.parseLongHeader(b) -} - -func (h *Header) parseShortHeader(b *bytes.Reader, shortHeaderConnIDLen int) error { - var err error - h.DestConnectionID, err = protocol.ReadConnectionID(b, shortHeaderConnIDLen) - return err -} - -func (h *Header) parseLongHeader(b *bytes.Reader) error { - v, err := utils.BigEndian.ReadUint32(b) - if err != nil { - return err - } - h.Version = protocol.VersionNumber(v) - if h.Version != 0 && h.typeByte&0x40 == 0 { - return errors.New("not a QUIC packet") - } - connIDLenByte, err := b.ReadByte() - if err != nil { - return err - } - dcil, scil := decodeConnIDLen(connIDLenByte) - h.DestConnectionID, err = protocol.ReadConnectionID(b, dcil) - if err != nil { - return err - } - h.SrcConnectionID, err = protocol.ReadConnectionID(b, scil) - if err != nil { - return err - } - if h.Version == 0 { - return h.parseVersionNegotiationPacket(b) - } - // If we don't understand the version, we have no idea how to interpret the rest of the bytes - if !protocol.IsSupportedVersion(protocol.SupportedVersions, h.Version) { - return errUnsupportedVersion - } - - switch (h.typeByte & 0x30) >> 4 { - case 0x0: - h.Type = protocol.PacketTypeInitial - case 0x1: - h.Type = protocol.PacketType0RTT - case 0x2: - h.Type = protocol.PacketTypeHandshake - case 0x3: - h.Type = protocol.PacketTypeRetry - } - - if h.Type == protocol.PacketTypeRetry { - odcil := decodeSingleConnIDLen(h.typeByte & 0xf) - h.OrigDestConnectionID, err = protocol.ReadConnectionID(b, odcil) - if err != nil { - return err - } - h.Token = make([]byte, b.Len()) - if _, err := io.ReadFull(b, h.Token); err != nil { - return err - } - return nil - } - - if h.Type == protocol.PacketTypeInitial { - tokenLen, err := utils.ReadVarInt(b) - if err != nil { - return err - } - if tokenLen > uint64(b.Len()) { - return io.EOF - } - h.Token = make([]byte, tokenLen) - if _, err := io.ReadFull(b, h.Token); err != nil { - return err - } - } - - pl, err := utils.ReadVarInt(b) - if err != nil { - return err - } - h.Length = protocol.ByteCount(pl) - return nil -} - -func (h *Header) parseVersionNegotiationPacket(b *bytes.Reader) error { - if b.Len() == 0 { - return errors.New("Version Negoation packet has empty version list") - } - if b.Len()%4 != 0 { - return errors.New("Version Negotation packet has a version list with an invalid length") - } - h.SupportedVersions = make([]protocol.VersionNumber, b.Len()/4) - for i := 0; b.Len() > 0; i++ { - v, err := utils.BigEndian.ReadUint32(b) - if err != nil { - return err - } - h.SupportedVersions[i] = protocol.VersionNumber(v) - } - return nil -} - -// ParsedLen returns the number of bytes that were consumed when parsing the header -func (h *Header) ParsedLen() protocol.ByteCount { - return h.parsedLen -} - -// ParseExtended parses the version dependent part of the header. -// The Reader has to be set such that it points to the first byte of the header. -func (h *Header) ParseExtended(b *bytes.Reader, ver protocol.VersionNumber) (*ExtendedHeader, error) { - return h.toExtendedHeader().parse(b, ver) -} - -func (h *Header) toExtendedHeader() *ExtendedHeader { - return &ExtendedHeader{Header: *h} -} - -func decodeConnIDLen(enc byte) (int /*dest conn id len*/, int /*src conn id len*/) { - return decodeSingleConnIDLen(enc >> 4), decodeSingleConnIDLen(enc & 0xf) -} - -func decodeSingleConnIDLen(enc uint8) int { - if enc == 0 { - return 0 - } - return int(enc) + 3 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go deleted file mode 100644 index 99fdc80fb..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/interface.go +++ /dev/null @@ -1,19 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// A Frame in QUIC -type Frame interface { - Write(b *bytes.Buffer, version protocol.VersionNumber) error - Length(version protocol.VersionNumber) protocol.ByteCount -} - -// A FrameParser parses QUIC frames, one by one. -type FrameParser interface { - ParseNext(*bytes.Reader, protocol.EncryptionLevel) (Frame, error) - SetAckDelayExponent(uint8) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go deleted file mode 100644 index 3c56ac458..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/log.go +++ /dev/null @@ -1,43 +0,0 @@ -package wire - -import ( - "fmt" - "strings" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// LogFrame logs a frame, either sent or received -func LogFrame(logger utils.Logger, frame Frame, sent bool) { - if !logger.Debug() { - return - } - dir := "<-" - if sent { - dir = "->" - } - switch f := frame.(type) { - case *CryptoFrame: - dataLen := protocol.ByteCount(len(f.Data)) - logger.Debugf("\t%s &wire.CryptoFrame{Offset: 0x%x, Data length: 0x%x, Offset + Data length: 0x%x}", dir, f.Offset, dataLen, f.Offset+dataLen) - case *StreamFrame: - logger.Debugf("\t%s &wire.StreamFrame{StreamID: %d, FinBit: %t, Offset: 0x%x, Data length: 0x%x, Offset + Data length: 0x%x}", dir, f.StreamID, f.FinBit, f.Offset, f.DataLen(), f.Offset+f.DataLen()) - case *AckFrame: - if len(f.AckRanges) > 1 { - ackRanges := make([]string, len(f.AckRanges)) - for i, r := range f.AckRanges { - ackRanges[i] = fmt.Sprintf("{Largest: %#x, Smallest: %#x}", r.Largest, r.Smallest) - } - logger.Debugf("\t%s &wire.AckFrame{LargestAcked: %#x, LowestAcked: %#x, AckRanges: {%s}, DelayTime: %s}", dir, f.LargestAcked(), f.LowestAcked(), strings.Join(ackRanges, ", "), f.DelayTime.String()) - } else { - logger.Debugf("\t%s &wire.AckFrame{LargestAcked: %#x, LowestAcked: %#x, DelayTime: %s}", dir, f.LargestAcked(), f.LowestAcked(), f.DelayTime.String()) - } - case *NewConnectionIDFrame: - logger.Debugf("\t%s &wire.NewConnectionIDFrame{SequenceNumber: %d, ConnectionID: %s, StatelessResetToken: %#x}", dir, f.SequenceNumber, f.ConnectionID, f.StatelessResetToken) - case *NewTokenFrame: - logger.Debugf("\t%s &wire.NewTokenFrame{Token: %#x}", dir, f.Token) - default: - logger.Debugf("\t%s %#v", dir, frame) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go deleted file mode 100644 index c4a9be0df..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_data_frame.go +++ /dev/null @@ -1,40 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A MaxDataFrame carries flow control information for the connection -type MaxDataFrame struct { - ByteOffset protocol.ByteCount -} - -// parseMaxDataFrame parses a MAX_DATA frame -func parseMaxDataFrame(r *bytes.Reader, version protocol.VersionNumber) (*MaxDataFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - - frame := &MaxDataFrame{} - byteOffset, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - frame.ByteOffset = protocol.ByteCount(byteOffset) - return frame, nil -} - -//Write writes a MAX_STREAM_DATA frame -func (f *MaxDataFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x10) - utils.WriteVarInt(b, uint64(f.ByteOffset)) - return nil -} - -// Length of a written frame -func (f *MaxDataFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(f.ByteOffset)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go deleted file mode 100644 index 2566f1c9b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_stream_data_frame.go +++ /dev/null @@ -1,46 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A MaxStreamDataFrame is a MAX_STREAM_DATA frame -type MaxStreamDataFrame struct { - StreamID protocol.StreamID - ByteOffset protocol.ByteCount -} - -func parseMaxStreamDataFrame(r *bytes.Reader, version protocol.VersionNumber) (*MaxStreamDataFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - - sid, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - offset, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - - return &MaxStreamDataFrame{ - StreamID: protocol.StreamID(sid), - ByteOffset: protocol.ByteCount(offset), - }, nil -} - -func (f *MaxStreamDataFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x11) - utils.WriteVarInt(b, uint64(f.StreamID)) - utils.WriteVarInt(b, uint64(f.ByteOffset)) - return nil -} - -// Length of a written frame -func (f *MaxStreamDataFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(f.StreamID)) + utils.VarIntLen(uint64(f.ByteOffset)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go deleted file mode 100644 index c7a3cd41c..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/max_streams_frame.go +++ /dev/null @@ -1,51 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A MaxStreamsFrame is a MAX_STREAMS frame -type MaxStreamsFrame struct { - Type protocol.StreamType - MaxStreams uint64 -} - -func parseMaxStreamsFrame(r *bytes.Reader, _ protocol.VersionNumber) (*MaxStreamsFrame, error) { - typeByte, err := r.ReadByte() - if err != nil { - return nil, err - } - - f := &MaxStreamsFrame{} - switch typeByte { - case 0x12: - f.Type = protocol.StreamTypeBidi - case 0x13: - f.Type = protocol.StreamTypeUni - } - streamID, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - f.MaxStreams = streamID - return f, nil -} - -func (f *MaxStreamsFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - switch f.Type { - case protocol.StreamTypeBidi: - b.WriteByte(0x12) - case protocol.StreamTypeUni: - b.WriteByte(0x13) - } - utils.WriteVarInt(b, f.MaxStreams) - return nil -} - -// Length of a written frame -func (f *MaxStreamsFrame) Length(protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(f.MaxStreams) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go deleted file mode 100644 index 9a612aa6c..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_connection_id_frame.go +++ /dev/null @@ -1,70 +0,0 @@ -package wire - -import ( - "bytes" - "fmt" - "io" - - "github.com/lucas-clemente/quic-go/internal/utils" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// A NewConnectionIDFrame is a NEW_CONNECTION_ID frame -type NewConnectionIDFrame struct { - SequenceNumber uint64 - ConnectionID protocol.ConnectionID - StatelessResetToken [16]byte -} - -func parseNewConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewConnectionIDFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - - seq, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - connIDLen, err := r.ReadByte() - if err != nil { - return nil, err - } - if connIDLen < 4 || connIDLen > 18 { - return nil, fmt.Errorf("invalid connection ID length: %d", connIDLen) - } - connID, err := protocol.ReadConnectionID(r, int(connIDLen)) - if err != nil { - return nil, err - } - frame := &NewConnectionIDFrame{ - SequenceNumber: seq, - ConnectionID: connID, - } - if _, err := io.ReadFull(r, frame.StatelessResetToken[:]); err != nil { - if err == io.ErrUnexpectedEOF { - return nil, io.EOF - } - return nil, err - } - - return frame, nil -} - -func (f *NewConnectionIDFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x18) - utils.WriteVarInt(b, f.SequenceNumber) - connIDLen := f.ConnectionID.Len() - if connIDLen < 4 || connIDLen > 18 { - return fmt.Errorf("invalid connection ID length: %d", connIDLen) - } - b.WriteByte(uint8(connIDLen)) - b.Write(f.ConnectionID.Bytes()) - b.Write(f.StatelessResetToken[:]) - return nil -} - -// Length of a written frame -func (f *NewConnectionIDFrame) Length(protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(f.SequenceNumber) + 1 /* connection ID length */ + protocol.ByteCount(f.ConnectionID.Len()) + 16 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go deleted file mode 100644 index 2cf6fce5e..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/new_token_frame.go +++ /dev/null @@ -1,44 +0,0 @@ -package wire - -import ( - "bytes" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A NewTokenFrame is a NEW_TOKEN frame -type NewTokenFrame struct { - Token []byte -} - -func parseNewTokenFrame(r *bytes.Reader, _ protocol.VersionNumber) (*NewTokenFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - tokenLen, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - if uint64(r.Len()) < tokenLen { - return nil, io.EOF - } - token := make([]byte, int(tokenLen)) - if _, err := io.ReadFull(r, token); err != nil { - return nil, err - } - return &NewTokenFrame{Token: token}, nil -} - -func (f *NewTokenFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x7) - utils.WriteVarInt(b, uint64(len(f.Token))) - b.Write(f.Token) - return nil -} - -// Length of a written frame -func (f *NewTokenFrame) Length(protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(len(f.Token))) + protocol.ByteCount(len(f.Token)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go deleted file mode 100644 index d35ee3b54..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_challenge_frame.go +++ /dev/null @@ -1,38 +0,0 @@ -package wire - -import ( - "bytes" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// A PathChallengeFrame is a PATH_CHALLENGE frame -type PathChallengeFrame struct { - Data [8]byte -} - -func parsePathChallengeFrame(r *bytes.Reader, version protocol.VersionNumber) (*PathChallengeFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - frame := &PathChallengeFrame{} - if _, err := io.ReadFull(r, frame.Data[:]); err != nil { - if err == io.ErrUnexpectedEOF { - return nil, io.EOF - } - return nil, err - } - return frame, nil -} - -func (f *PathChallengeFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x1a) - b.Write(f.Data[:]) - return nil -} - -// Length of a written frame -func (f *PathChallengeFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { - return 1 + 8 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go deleted file mode 100644 index 20d8fd721..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/path_response_frame.go +++ /dev/null @@ -1,38 +0,0 @@ -package wire - -import ( - "bytes" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// A PathResponseFrame is a PATH_RESPONSE frame -type PathResponseFrame struct { - Data [8]byte -} - -func parsePathResponseFrame(r *bytes.Reader, version protocol.VersionNumber) (*PathResponseFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - frame := &PathResponseFrame{} - if _, err := io.ReadFull(r, frame.Data[:]); err != nil { - if err == io.ErrUnexpectedEOF { - return nil, io.EOF - } - return nil, err - } - return frame, nil -} - -func (f *PathResponseFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x1b) - b.Write(f.Data[:]) - return nil -} - -// Length of a written frame -func (f *PathResponseFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { - return 1 + 8 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go deleted file mode 100644 index aed6857b5..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/ping_frame.go +++ /dev/null @@ -1,27 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// A PingFrame is a PING frame -type PingFrame struct{} - -func parsePingFrame(r *bytes.Reader, version protocol.VersionNumber) (*PingFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - return &PingFrame{}, nil -} - -func (f *PingFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x1) - return nil -} - -// Length of a written frame -func (f *PingFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - return 1 -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go deleted file mode 100644 index d3a40dcf9..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/reset_stream_frame.go +++ /dev/null @@ -1,58 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A ResetStreamFrame is a RESET_STREAM frame in QUIC -type ResetStreamFrame struct { - StreamID protocol.StreamID - ErrorCode protocol.ApplicationErrorCode - ByteOffset protocol.ByteCount -} - -func parseResetStreamFrame(r *bytes.Reader, version protocol.VersionNumber) (*ResetStreamFrame, error) { - if _, err := r.ReadByte(); err != nil { // read the TypeByte - return nil, err - } - - var streamID protocol.StreamID - var errorCode uint16 - var byteOffset protocol.ByteCount - sid, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - streamID = protocol.StreamID(sid) - errorCode, err = utils.BigEndian.ReadUint16(r) - if err != nil { - return nil, err - } - bo, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - byteOffset = protocol.ByteCount(bo) - - return &ResetStreamFrame{ - StreamID: streamID, - ErrorCode: protocol.ApplicationErrorCode(errorCode), - ByteOffset: byteOffset, - }, nil -} - -func (f *ResetStreamFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x4) - utils.WriteVarInt(b, uint64(f.StreamID)) - utils.BigEndian.WriteUint16(b, uint16(f.ErrorCode)) - utils.WriteVarInt(b, uint64(f.ByteOffset)) - return nil -} - -// Length of a written frame -func (f *ResetStreamFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(f.StreamID)) + 2 + utils.VarIntLen(uint64(f.ByteOffset)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go deleted file mode 100644 index 9a715a4c4..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/retire_connection_id_frame.go +++ /dev/null @@ -1,36 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A RetireConnectionIDFrame is a RETIRE_CONNECTION_ID frame -type RetireConnectionIDFrame struct { - SequenceNumber uint64 -} - -func parseRetireConnectionIDFrame(r *bytes.Reader, _ protocol.VersionNumber) (*RetireConnectionIDFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - - seq, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - return &RetireConnectionIDFrame{SequenceNumber: seq}, nil -} - -func (f *RetireConnectionIDFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x19) - utils.WriteVarInt(b, f.SequenceNumber) - return nil -} - -// Length of a written frame -func (f *RetireConnectionIDFrame) Length(protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(f.SequenceNumber) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go deleted file mode 100644 index f9a5d60b0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stop_sending_frame.go +++ /dev/null @@ -1,47 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A StopSendingFrame is a STOP_SENDING frame -type StopSendingFrame struct { - StreamID protocol.StreamID - ErrorCode protocol.ApplicationErrorCode -} - -// parseStopSendingFrame parses a STOP_SENDING frame -func parseStopSendingFrame(r *bytes.Reader, _ protocol.VersionNumber) (*StopSendingFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - - streamID, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - errorCode, err := utils.BigEndian.ReadUint16(r) - if err != nil { - return nil, err - } - - return &StopSendingFrame{ - StreamID: protocol.StreamID(streamID), - ErrorCode: protocol.ApplicationErrorCode(errorCode), - }, nil -} - -// Length of a written frame -func (f *StopSendingFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(f.StreamID)) + 2 -} - -func (f *StopSendingFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - b.WriteByte(0x5) - utils.WriteVarInt(b, uint64(f.StreamID)) - utils.BigEndian.WriteUint16(b, uint16(f.ErrorCode)) - return nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go deleted file mode 100644 index 9f2e90be0..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_data_blocked_frame.go +++ /dev/null @@ -1,46 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A StreamDataBlockedFrame is a STREAM_DATA_BLOCKED frame -type StreamDataBlockedFrame struct { - StreamID protocol.StreamID - DataLimit protocol.ByteCount -} - -func parseStreamDataBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*StreamDataBlockedFrame, error) { - if _, err := r.ReadByte(); err != nil { - return nil, err - } - - sid, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - offset, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - - return &StreamDataBlockedFrame{ - StreamID: protocol.StreamID(sid), - DataLimit: protocol.ByteCount(offset), - }, nil -} - -func (f *StreamDataBlockedFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - b.WriteByte(0x15) - utils.WriteVarInt(b, uint64(f.StreamID)) - utils.WriteVarInt(b, uint64(f.DataLimit)) - return nil -} - -// Length of a written frame -func (f *StreamDataBlockedFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(uint64(f.StreamID)) + utils.VarIntLen(uint64(f.DataLimit)) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go deleted file mode 100644 index dfc1d1adf..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/stream_frame.go +++ /dev/null @@ -1,168 +0,0 @@ -package wire - -import ( - "bytes" - "errors" - "io" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A StreamFrame of QUIC -type StreamFrame struct { - StreamID protocol.StreamID - FinBit bool - DataLenPresent bool - Offset protocol.ByteCount - Data []byte -} - -func parseStreamFrame(r *bytes.Reader, version protocol.VersionNumber) (*StreamFrame, error) { - typeByte, err := r.ReadByte() - if err != nil { - return nil, err - } - - hasOffset := typeByte&0x4 > 0 - frame := &StreamFrame{ - FinBit: typeByte&0x1 > 0, - DataLenPresent: typeByte&0x2 > 0, - } - - streamID, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - frame.StreamID = protocol.StreamID(streamID) - if hasOffset { - offset, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - frame.Offset = protocol.ByteCount(offset) - } - - var dataLen uint64 - if frame.DataLenPresent { - var err error - dataLen, err = utils.ReadVarInt(r) - if err != nil { - return nil, err - } - // shortcut to prevent the unnecessary allocation of dataLen bytes - // if the dataLen is larger than the remaining length of the packet - // reading the packet contents would result in EOF when attempting to READ - if dataLen > uint64(r.Len()) { - return nil, io.EOF - } - } else { - // The rest of the packet is data - dataLen = uint64(r.Len()) - } - if dataLen != 0 { - frame.Data = make([]byte, dataLen) - if _, err := io.ReadFull(r, frame.Data); err != nil { - // this should never happen, since we already checked the dataLen earlier - return nil, err - } - } - if frame.Offset+frame.DataLen() > protocol.MaxByteCount { - return nil, qerr.Error(qerr.FrameEncodingError, "stream data overflows maximum offset") - } - return frame, nil -} - -// Write writes a STREAM frame -func (f *StreamFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error { - if len(f.Data) == 0 && !f.FinBit { - return errors.New("StreamFrame: attempting to write empty frame without FIN") - } - - typeByte := byte(0x8) - if f.FinBit { - typeByte ^= 0x1 - } - hasOffset := f.Offset != 0 - if f.DataLenPresent { - typeByte ^= 0x2 - } - if hasOffset { - typeByte ^= 0x4 - } - b.WriteByte(typeByte) - utils.WriteVarInt(b, uint64(f.StreamID)) - if hasOffset { - utils.WriteVarInt(b, uint64(f.Offset)) - } - if f.DataLenPresent { - utils.WriteVarInt(b, uint64(f.DataLen())) - } - b.Write(f.Data) - return nil -} - -// Length returns the total length of the STREAM frame -func (f *StreamFrame) Length(version protocol.VersionNumber) protocol.ByteCount { - length := 1 + utils.VarIntLen(uint64(f.StreamID)) - if f.Offset != 0 { - length += utils.VarIntLen(uint64(f.Offset)) - } - if f.DataLenPresent { - length += utils.VarIntLen(uint64(f.DataLen())) - } - return length + f.DataLen() -} - -// DataLen gives the length of data in bytes -func (f *StreamFrame) DataLen() protocol.ByteCount { - return protocol.ByteCount(len(f.Data)) -} - -// MaxDataLen returns the maximum data length -// If 0 is returned, writing will fail (a STREAM frame must contain at least 1 byte of data). -func (f *StreamFrame) MaxDataLen(maxSize protocol.ByteCount, version protocol.VersionNumber) protocol.ByteCount { - headerLen := 1 + utils.VarIntLen(uint64(f.StreamID)) - if f.Offset != 0 { - headerLen += utils.VarIntLen(uint64(f.Offset)) - } - if f.DataLenPresent { - // pretend that the data size will be 1 bytes - // if it turns out that varint encoding the length will consume 2 bytes, we need to adjust the data length afterwards - headerLen++ - } - if headerLen > maxSize { - return 0 - } - maxDataLen := maxSize - headerLen - if f.DataLenPresent && utils.VarIntLen(uint64(maxDataLen)) != 1 { - maxDataLen-- - } - return maxDataLen -} - -// MaybeSplitOffFrame splits a frame such that it is not bigger than n bytes. -// If n >= len(frame), nil is returned and nothing is modified. -func (f *StreamFrame) MaybeSplitOffFrame(maxSize protocol.ByteCount, version protocol.VersionNumber) (*StreamFrame, error) { - if maxSize >= f.Length(version) { - return nil, nil - } - - n := f.MaxDataLen(maxSize, version) - if n == 0 { - return nil, errors.New("too small") - } - newFrame := &StreamFrame{ - FinBit: false, - StreamID: f.StreamID, - Offset: f.Offset, - Data: f.Data[:n], - DataLenPresent: f.DataLenPresent, - } - - f.Data = f.Data[n:] - f.Offset += n - - return newFrame, nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go deleted file mode 100644 index b41d68f1b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/streams_blocked_frame.go +++ /dev/null @@ -1,52 +0,0 @@ -package wire - -import ( - "bytes" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// A StreamsBlockedFrame is a STREAMS_BLOCKED frame -type StreamsBlockedFrame struct { - Type protocol.StreamType - StreamLimit uint64 -} - -func parseStreamsBlockedFrame(r *bytes.Reader, _ protocol.VersionNumber) (*StreamsBlockedFrame, error) { - typeByte, err := r.ReadByte() - if err != nil { - return nil, err - } - - f := &StreamsBlockedFrame{} - switch typeByte { - case 0x16: - f.Type = protocol.StreamTypeBidi - case 0x17: - f.Type = protocol.StreamTypeUni - } - streamLimit, err := utils.ReadVarInt(r) - if err != nil { - return nil, err - } - f.StreamLimit = streamLimit - - return f, nil -} - -func (f *StreamsBlockedFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error { - switch f.Type { - case protocol.StreamTypeBidi: - b.WriteByte(0x16) - case protocol.StreamTypeUni: - b.WriteByte(0x17) - } - utils.WriteVarInt(b, f.StreamLimit) - return nil -} - -// Length of a written frame -func (f *StreamsBlockedFrame) Length(_ protocol.VersionNumber) protocol.ByteCount { - return 1 + utils.VarIntLen(f.StreamLimit) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go b/vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go deleted file mode 100644 index bfc27af77..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/internal/wire/version_negotiation.go +++ /dev/null @@ -1,31 +0,0 @@ -package wire - -import ( - "bytes" - "crypto/rand" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" -) - -// ComposeVersionNegotiation composes a Version Negotiation -func ComposeVersionNegotiation(destConnID, srcConnID protocol.ConnectionID, versions []protocol.VersionNumber) ([]byte, error) { - greasedVersions := protocol.GetGreasedVersions(versions) - expectedLen := 1 /* type byte */ + 4 /* version field */ + 1 /* connection ID length field */ + destConnID.Len() + srcConnID.Len() + len(greasedVersions)*4 - buf := bytes.NewBuffer(make([]byte, 0, expectedLen)) - r := make([]byte, 1) - _, _ = rand.Read(r) // ignore the error here. It is not critical to have perfect random here. - buf.WriteByte(r[0] | 0xc0) - utils.BigEndian.WriteUint32(buf, 0) // version 0 - connIDLen, err := encodeConnIDLen(destConnID, srcConnID) - if err != nil { - return nil, err - } - buf.WriteByte(connIDLen) - buf.Write(destConnID) - buf.Write(srcConnID) - for _, v := range greasedVersions { - utils.BigEndian.WriteUint32(buf, uint32(v)) - } - return buf.Bytes(), nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/mockgen.go b/vendor/github.com/lucas-clemente/quic-go/mockgen.go deleted file mode 100644 index 1a882c358..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/mockgen.go +++ /dev/null @@ -1,21 +0,0 @@ -package quic - -//go:generate sh -c "./mockgen_private.sh quic mock_stream_internal_test.go github.com/lucas-clemente/quic-go streamI" -//go:generate sh -c "./mockgen_private.sh quic mock_crypto_stream_test.go github.com/lucas-clemente/quic-go cryptoStream" -//go:generate sh -c "./mockgen_private.sh quic mock_receive_stream_internal_test.go github.com/lucas-clemente/quic-go receiveStreamI" -//go:generate sh -c "./mockgen_private.sh quic mock_send_stream_internal_test.go github.com/lucas-clemente/quic-go sendStreamI" -//go:generate sh -c "./mockgen_private.sh quic mock_stream_sender_test.go github.com/lucas-clemente/quic-go streamSender" -//go:generate sh -c "./mockgen_private.sh quic mock_stream_getter_test.go github.com/lucas-clemente/quic-go streamGetter" -//go:generate sh -c "./mockgen_private.sh quic mock_crypto_data_handler_test.go github.com/lucas-clemente/quic-go cryptoDataHandler" -//go:generate sh -c "./mockgen_private.sh quic mock_frame_source_test.go github.com/lucas-clemente/quic-go frameSource" -//go:generate sh -c "./mockgen_private.sh quic mock_ack_frame_source_test.go github.com/lucas-clemente/quic-go ackFrameSource" -//go:generate sh -c "./mockgen_private.sh quic mock_stream_manager_test.go github.com/lucas-clemente/quic-go streamManager" -//go:generate sh -c "./mockgen_private.sh quic mock_sealing_manager_test.go github.com/lucas-clemente/quic-go sealingManager" -//go:generate sh -c "./mockgen_private.sh quic mock_unpacker_test.go github.com/lucas-clemente/quic-go unpacker" -//go:generate sh -c "./mockgen_private.sh quic mock_packer_test.go github.com/lucas-clemente/quic-go packer" -//go:generate sh -c "./mockgen_private.sh quic mock_session_runner_test.go github.com/lucas-clemente/quic-go sessionRunner" -//go:generate sh -c "./mockgen_private.sh quic mock_quic_session_test.go github.com/lucas-clemente/quic-go quicSession" -//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_test.go github.com/lucas-clemente/quic-go packetHandler" -//go:generate sh -c "./mockgen_private.sh quic mock_unknown_packet_handler_test.go github.com/lucas-clemente/quic-go unknownPacketHandler" -//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_manager_test.go github.com/lucas-clemente/quic-go packetHandlerManager" -//go:generate sh -c "./mockgen_private.sh quic mock_multiplexer_test.go github.com/lucas-clemente/quic-go multiplexer" diff --git a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh b/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh deleted file mode 100644 index 0ba5f64ef..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/mockgen_private.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -# Mockgen refuses to generate mocks private types. -# This script copies the quic package to a temporary directory, and adds an public alias for the private type. -# It then creates a mock for this public (alias) type. - -TEMP_DIR=$(mktemp -d) -mkdir -p $TEMP_DIR/src/github.com/lucas-clemente/quic-go/ - -# uppercase the name of the interface -INTERFACE_NAME="$(tr '[:lower:]' '[:upper:]' <<< ${4:0:1})${4:1}" - -# copy all .go files to a temporary directory -rsync -r --exclude 'vendor' --include='*.go' --include '*/' --exclude '*' $GOPATH/src/github.com/lucas-clemente/quic-go/ $TEMP_DIR/src/github.com/lucas-clemente/quic-go/ - -# create a public alias for the interface, so that mockgen can process it -echo -e "package $1\n" > $TEMP_DIR/src/github.com/lucas-clemente/quic-go/mockgen_interface.go -echo "type $INTERFACE_NAME = $4" >> $TEMP_DIR/src/github.com/lucas-clemente/quic-go/mockgen_interface.go - -export GOPATH="$TEMP_DIR:$GOPATH" - -mockgen -package $1 -self_package $1 -destination $2 $3 $INTERFACE_NAME - -# mockgen imports quic-go as 'import quic_go github.com/lucas_clemente/quic-go' -sed -i '' 's/quic_go.//g' $2 -goimports -w $2 - -rm -r "$TEMP_DIR" diff --git a/vendor/github.com/lucas-clemente/quic-go/multiplexer.go b/vendor/github.com/lucas-clemente/quic-go/multiplexer.go deleted file mode 100644 index eeffca53b..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/multiplexer.go +++ /dev/null @@ -1,89 +0,0 @@ -package quic - -import ( - "bytes" - "fmt" - "net" - "sync" - - "github.com/lucas-clemente/quic-go/internal/utils" -) - -var ( - connMuxerOnce sync.Once - connMuxer multiplexer -) - -type multiplexer interface { - AddConn(c net.PacketConn, connIDLen int, statelessResetKey []byte) (packetHandlerManager, error) - RemoveConn(net.PacketConn) error -} - -type connManager struct { - connIDLen int - statelessResetKey []byte - manager packetHandlerManager -} - -// The connMultiplexer listens on multiple net.PacketConns and dispatches -// incoming packets to the session handler. -type connMultiplexer struct { - mutex sync.Mutex - - conns map[net.PacketConn]connManager - newPacketHandlerManager func(net.PacketConn, int, []byte, utils.Logger) packetHandlerManager // so it can be replaced in the tests - - logger utils.Logger -} - -var _ multiplexer = &connMultiplexer{} - -func getMultiplexer() multiplexer { - connMuxerOnce.Do(func() { - connMuxer = &connMultiplexer{ - conns: make(map[net.PacketConn]connManager), - logger: utils.DefaultLogger.WithPrefix("muxer"), - newPacketHandlerManager: newPacketHandlerMap, - } - }) - return connMuxer -} - -func (m *connMultiplexer) AddConn( - c net.PacketConn, - connIDLen int, - statelessResetKey []byte, -) (packetHandlerManager, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - p, ok := m.conns[c] - if !ok { - manager := m.newPacketHandlerManager(c, connIDLen, statelessResetKey, m.logger) - p = connManager{ - connIDLen: connIDLen, - statelessResetKey: statelessResetKey, - manager: manager, - } - m.conns[c] = p - } - if p.connIDLen != connIDLen { - return nil, fmt.Errorf("cannot use %d byte connection IDs on a connection that is already using %d byte connction IDs", connIDLen, p.connIDLen) - } - if statelessResetKey != nil && !bytes.Equal(p.statelessResetKey, statelessResetKey) { - return nil, fmt.Errorf("cannot use different stateless reset keys on the same packet conn") - } - return p.manager, nil -} - -func (m *connMultiplexer) RemoveConn(c net.PacketConn) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - if _, ok := m.conns[c]; !ok { - return fmt.Errorf("cannote remove connection, connection is unknown") - } - - delete(m.conns, c) - return nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go b/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go deleted file mode 100644 index 0af6a4074..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/packet_handler_map.go +++ /dev/null @@ -1,274 +0,0 @@ -package quic - -import ( - "crypto/hmac" - "crypto/rand" - "crypto/sha256" - "errors" - "hash" - "net" - "sync" - "time" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -// The packetHandlerMap stores packetHandlers, identified by connection ID. -// It is used: -// * by the server to store sessions -// * when multiplexing outgoing connections to store clients -type packetHandlerMap struct { - mutex sync.RWMutex - - conn net.PacketConn - connIDLen int - - handlers map[string] /* string(ConnectionID)*/ packetHandler - resetTokens map[[16]byte] /* stateless reset token */ packetHandler - server unknownPacketHandler - - listening chan struct{} // is closed when listen returns - closed bool - - deleteRetiredSessionsAfter time.Duration - - statelessResetEnabled bool - statelessResetHasher hash.Hash - - logger utils.Logger -} - -var _ packetHandlerManager = &packetHandlerMap{} - -func newPacketHandlerMap( - conn net.PacketConn, - connIDLen int, - statelessResetKey []byte, - logger utils.Logger, -) packetHandlerManager { - m := &packetHandlerMap{ - conn: conn, - connIDLen: connIDLen, - listening: make(chan struct{}), - handlers: make(map[string]packetHandler), - resetTokens: make(map[[16]byte]packetHandler), - deleteRetiredSessionsAfter: protocol.RetiredConnectionIDDeleteTimeout, - statelessResetEnabled: len(statelessResetKey) > 0, - statelessResetHasher: hmac.New(sha256.New, statelessResetKey), - logger: logger, - } - go m.listen() - return m -} - -func (h *packetHandlerMap) Add(id protocol.ConnectionID, handler packetHandler) { - h.mutex.Lock() - h.handlers[string(id)] = handler - h.mutex.Unlock() -} - -func (h *packetHandlerMap) Remove(id protocol.ConnectionID) { - h.removeByConnectionIDAsString(string(id)) -} - -func (h *packetHandlerMap) removeByConnectionIDAsString(id string) { - h.mutex.Lock() - delete(h.handlers, id) - h.mutex.Unlock() -} - -func (h *packetHandlerMap) Retire(id protocol.ConnectionID) { - h.retireByConnectionIDAsString(string(id)) -} - -func (h *packetHandlerMap) retireByConnectionIDAsString(id string) { - time.AfterFunc(h.deleteRetiredSessionsAfter, func() { - h.removeByConnectionIDAsString(id) - }) -} - -func (h *packetHandlerMap) AddResetToken(token [16]byte, handler packetHandler) { - h.mutex.Lock() - h.resetTokens[token] = handler - h.mutex.Unlock() -} - -func (h *packetHandlerMap) RemoveResetToken(token [16]byte) { - h.mutex.Lock() - delete(h.resetTokens, token) - h.mutex.Unlock() -} - -func (h *packetHandlerMap) SetServer(s unknownPacketHandler) { - h.mutex.Lock() - h.server = s - h.mutex.Unlock() -} - -func (h *packetHandlerMap) CloseServer() { - h.mutex.Lock() - h.server = nil - var wg sync.WaitGroup - for id, handler := range h.handlers { - if handler.getPerspective() == protocol.PerspectiveServer { - wg.Add(1) - go func(id string, handler packetHandler) { - // session.Close() blocks until the CONNECTION_CLOSE has been sent and the run-loop has stopped - _ = handler.Close() - h.retireByConnectionIDAsString(id) - wg.Done() - }(id, handler) - } - } - h.mutex.Unlock() - wg.Wait() -} - -// Close the underlying connection and wait until listen() has returned. -func (h *packetHandlerMap) Close() error { - if err := h.conn.Close(); err != nil { - return err - } - <-h.listening // wait until listening returns - return nil -} - -func (h *packetHandlerMap) close(e error) error { - h.mutex.Lock() - if h.closed { - h.mutex.Unlock() - return nil - } - h.closed = true - - var wg sync.WaitGroup - for _, handler := range h.handlers { - wg.Add(1) - go func(handler packetHandler) { - handler.destroy(e) - wg.Done() - }(handler) - } - - if h.server != nil { - h.server.closeWithError(e) - } - h.mutex.Unlock() - wg.Wait() - return getMultiplexer().RemoveConn(h.conn) -} - -func (h *packetHandlerMap) listen() { - defer close(h.listening) - for { - buffer := getPacketBuffer() - data := buffer.Slice - // The packet size should not exceed protocol.MaxReceivePacketSize bytes - // If it does, we only read a truncated packet, which will then end up undecryptable - n, addr, err := h.conn.ReadFrom(data) - if err != nil { - h.close(err) - return - } - h.handlePacket(addr, buffer, data[:n]) - } -} - -func (h *packetHandlerMap) handlePacket( - addr net.Addr, - buffer *packetBuffer, - data []byte, -) { - connID, err := wire.ParseConnectionID(data, h.connIDLen) - if err != nil { - h.logger.Debugf("error parsing connection ID on packet from %s: %s", addr, err) - return - } - rcvTime := time.Now() - - h.mutex.RLock() - defer h.mutex.RUnlock() - - if isStatelessReset := h.maybeHandleStatelessReset(data); isStatelessReset { - return - } - - handler, handlerFound := h.handlers[string(connID)] - - p := &receivedPacket{ - remoteAddr: addr, - rcvTime: rcvTime, - buffer: buffer, - data: data, - } - if handlerFound { // existing session - handler.handlePacket(p) - return - } - if data[0]&0x80 == 0 { - go h.maybeSendStatelessReset(p, connID) - return - } - if h.server == nil { // no server set - h.logger.Debugf("received a packet with an unexpected connection ID %s", connID) - return - } - h.server.handlePacket(p) -} - -func (h *packetHandlerMap) maybeHandleStatelessReset(data []byte) bool { - // stateless resets are always short header packets - if data[0]&0x80 != 0 { - return false - } - if len(data) < protocol.MinStatelessResetSize { - return false - } - - var token [16]byte - copy(token[:], data[len(data)-16:]) - if sess, ok := h.resetTokens[token]; ok { - h.logger.Debugf("Received a stateless retry with token %#x. Closing session.", token) - go sess.destroy(errors.New("received a stateless reset")) - return true - } - return false -} - -func (h *packetHandlerMap) GetStatelessResetToken(connID protocol.ConnectionID) [16]byte { - var token [16]byte - if !h.statelessResetEnabled { - // Return a random stateless reset token. - // This token will be sent in the server's transport parameters. - // By using a random token, an off-path attacker won't be able to disrupt the connection. - rand.Read(token[:]) - return token - } - h.statelessResetHasher.Write(connID.Bytes()) - copy(token[:], h.statelessResetHasher.Sum(nil)) - h.statelessResetHasher.Reset() - return token -} - -func (h *packetHandlerMap) maybeSendStatelessReset(p *receivedPacket, connID protocol.ConnectionID) { - defer p.buffer.Release() - if !h.statelessResetEnabled { - return - } - // Don't send a stateless reset in response to very small packets. - // This includes packets that could be stateless resets. - if len(p.data) <= protocol.MinStatelessResetSize { - return - } - token := h.GetStatelessResetToken(connID) - h.logger.Debugf("Sending stateless reset to %s (connection ID: %s). Token: %#x", p.remoteAddr, connID, token) - data := make([]byte, 23) - rand.Read(data) - data[0] = (data[0] & 0x7f) | 0x40 - data = append(data, token[:]...) - if _, err := h.conn.WriteTo(data, p.remoteAddr); err != nil { - h.logger.Debugf("Error sending Stateless Reset: %s", err) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_packer.go b/vendor/github.com/lucas-clemente/quic-go/packet_packer.go deleted file mode 100644 index 7fae340e6..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/packet_packer.go +++ /dev/null @@ -1,491 +0,0 @@ -package quic - -import ( - "bytes" - "errors" - "fmt" - "net" - "time" - - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type packer interface { - PackPacket() (*packedPacket, error) - MaybePackAckPacket() (*packedPacket, error) - PackRetransmission(packet *ackhandler.Packet) ([]*packedPacket, error) - PackConnectionClose(*wire.ConnectionCloseFrame) (*packedPacket, error) - - HandleTransportParameters(*handshake.TransportParameters) - SetToken([]byte) - ChangeDestConnectionID(protocol.ConnectionID) -} - -type packedPacket struct { - header *wire.ExtendedHeader - raw []byte - frames []wire.Frame - - buffer *packetBuffer -} - -func (p *packedPacket) EncryptionLevel() protocol.EncryptionLevel { - if !p.header.IsLongHeader { - return protocol.Encryption1RTT - } - switch p.header.Type { - case protocol.PacketTypeInitial: - return protocol.EncryptionInitial - case protocol.PacketTypeHandshake: - return protocol.EncryptionHandshake - default: - return protocol.EncryptionUnspecified - } -} - -func (p *packedPacket) IsRetransmittable() bool { - return ackhandler.HasRetransmittableFrames(p.frames) -} - -func (p *packedPacket) ToAckHandlerPacket() *ackhandler.Packet { - return &ackhandler.Packet{ - PacketNumber: p.header.PacketNumber, - PacketType: p.header.Type, - Frames: p.frames, - Length: protocol.ByteCount(len(p.raw)), - EncryptionLevel: p.EncryptionLevel(), - SendTime: time.Now(), - } -} - -func getMaxPacketSize(addr net.Addr) protocol.ByteCount { - maxSize := protocol.ByteCount(protocol.MinInitialPacketSize) - // If this is not a UDP address, we don't know anything about the MTU. - // Use the minimum size of an Initial packet as the max packet size. - if udpAddr, ok := addr.(*net.UDPAddr); ok { - // If ip is not an IPv4 address, To4 returns nil. - // Note that there might be some corner cases, where this is not correct. - // See https://stackoverflow.com/questions/22751035/golang-distinguish-ipv4-ipv6. - if udpAddr.IP.To4() == nil { - maxSize = protocol.MaxPacketSizeIPv6 - } else { - maxSize = protocol.MaxPacketSizeIPv4 - } - } - return maxSize -} - -type packetNumberManager interface { - PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen) - PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber -} - -type sealingManager interface { - GetSealer() (protocol.EncryptionLevel, handshake.Sealer) - GetSealerWithEncryptionLevel(protocol.EncryptionLevel) (handshake.Sealer, error) -} - -type frameSource interface { - AppendStreamFrames([]wire.Frame, protocol.ByteCount) []wire.Frame - AppendControlFrames([]wire.Frame, protocol.ByteCount) ([]wire.Frame, protocol.ByteCount) -} - -type ackFrameSource interface { - GetAckFrame(protocol.EncryptionLevel) *wire.AckFrame -} - -type packetPacker struct { - destConnID protocol.ConnectionID - srcConnID protocol.ConnectionID - - perspective protocol.Perspective - version protocol.VersionNumber - cryptoSetup sealingManager - - initialStream cryptoStream - handshakeStream cryptoStream - - token []byte - - pnManager packetNumberManager - framer frameSource - acks ackFrameSource - - maxPacketSize protocol.ByteCount - numNonRetransmittableAcks int -} - -var _ packer = &packetPacker{} - -func newPacketPacker( - destConnID protocol.ConnectionID, - srcConnID protocol.ConnectionID, - initialStream cryptoStream, - handshakeStream cryptoStream, - packetNumberManager packetNumberManager, - remoteAddr net.Addr, // only used for determining the max packet size - cryptoSetup sealingManager, - framer frameSource, - acks ackFrameSource, - perspective protocol.Perspective, - version protocol.VersionNumber, -) *packetPacker { - return &packetPacker{ - cryptoSetup: cryptoSetup, - destConnID: destConnID, - srcConnID: srcConnID, - initialStream: initialStream, - handshakeStream: handshakeStream, - perspective: perspective, - version: version, - framer: framer, - acks: acks, - pnManager: packetNumberManager, - maxPacketSize: getMaxPacketSize(remoteAddr), - } -} - -// PackConnectionClose packs a packet that ONLY contains a ConnectionCloseFrame -func (p *packetPacker) PackConnectionClose(ccf *wire.ConnectionCloseFrame) (*packedPacket, error) { - frames := []wire.Frame{ccf} - encLevel, sealer := p.cryptoSetup.GetSealer() - header := p.getHeader(encLevel) - return p.writeAndSealPacket(header, frames, encLevel, sealer) -} - -func (p *packetPacker) MaybePackAckPacket() (*packedPacket, error) { - ack := p.acks.GetAckFrame(protocol.Encryption1RTT) - if ack == nil { - return nil, nil - } - // TODO(#1534): only pack ACKs with the right encryption level - encLevel, sealer := p.cryptoSetup.GetSealer() - header := p.getHeader(encLevel) - frames := []wire.Frame{ack} - return p.writeAndSealPacket(header, frames, encLevel, sealer) -} - -// PackRetransmission packs a retransmission -// For packets sent after completion of the handshake, it might happen that 2 packets have to be sent. -// This can happen e.g. when a longer packet number is used in the header. -func (p *packetPacker) PackRetransmission(packet *ackhandler.Packet) ([]*packedPacket, error) { - var controlFrames []wire.Frame - var streamFrames []*wire.StreamFrame - for _, f := range packet.Frames { - // CRYPTO frames are treated as control frames here. - // Since we're making sure that the header can never be larger for a retransmission, - // we never have to split CRYPTO frames. - if sf, ok := f.(*wire.StreamFrame); ok { - sf.DataLenPresent = true - streamFrames = append(streamFrames, sf) - } else { - controlFrames = append(controlFrames, f) - } - } - - var packets []*packedPacket - encLevel := packet.EncryptionLevel - sealer, err := p.cryptoSetup.GetSealerWithEncryptionLevel(encLevel) - if err != nil { - return nil, err - } - for len(controlFrames) > 0 || len(streamFrames) > 0 { - var frames []wire.Frame - var length protocol.ByteCount - - header := p.getHeader(encLevel) - headerLen := header.GetLength(p.version) - maxSize := p.maxPacketSize - protocol.ByteCount(sealer.Overhead()) - headerLen - - for len(controlFrames) > 0 { - frame := controlFrames[0] - frameLen := frame.Length(p.version) - if length+frameLen > maxSize { - break - } - length += frameLen - frames = append(frames, frame) - controlFrames = controlFrames[1:] - } - - for len(streamFrames) > 0 && length+protocol.MinStreamFrameSize < maxSize { - frame := streamFrames[0] - frame.DataLenPresent = false - frameToAdd := frame - - sf, err := frame.MaybeSplitOffFrame(maxSize-length, p.version) - if err != nil { - return nil, err - } - if sf != nil { - frameToAdd = sf - } else { - streamFrames = streamFrames[1:] - } - frame.DataLenPresent = true - length += frameToAdd.Length(p.version) - frames = append(frames, frameToAdd) - } - if sf, ok := frames[len(frames)-1].(*wire.StreamFrame); ok { - sf.DataLenPresent = false - } - p, err := p.writeAndSealPacket(header, frames, encLevel, sealer) - if err != nil { - return nil, err - } - packets = append(packets, p) - } - return packets, nil -} - -// PackPacket packs a new packet -// the other controlFrames are sent in the next packet, but might be queued and sent in the next packet if the packet would overflow MaxPacketSize otherwise -func (p *packetPacker) PackPacket() (*packedPacket, error) { - packet, err := p.maybePackCryptoPacket() - if err != nil { - return nil, err - } - if packet != nil { - return packet, nil - } - - encLevel, sealer := p.cryptoSetup.GetSealer() - header := p.getHeader(encLevel) - headerLen := header.GetLength(p.version) - if err != nil { - return nil, err - } - - maxSize := p.maxPacketSize - protocol.ByteCount(sealer.Overhead()) - headerLen - frames, err := p.composeNextPacket(maxSize) - if err != nil { - return nil, err - } - - // Check if we have enough frames to send - if len(frames) == 0 { - return nil, nil - } - // check if this packet only contains an ACK - if !ackhandler.HasRetransmittableFrames(frames) { - if p.numNonRetransmittableAcks >= protocol.MaxNonRetransmittableAcks { - frames = append(frames, &wire.PingFrame{}) - p.numNonRetransmittableAcks = 0 - } else { - p.numNonRetransmittableAcks++ - } - } else { - p.numNonRetransmittableAcks = 0 - } - - return p.writeAndSealPacket(header, frames, encLevel, sealer) -} - -func (p *packetPacker) maybePackCryptoPacket() (*packedPacket, error) { - var s cryptoStream - var encLevel protocol.EncryptionLevel - - hasData := p.initialStream.HasData() - ack := p.acks.GetAckFrame(protocol.EncryptionInitial) - if hasData || ack != nil { - s = p.initialStream - encLevel = protocol.EncryptionInitial - } else { - hasData = p.handshakeStream.HasData() - ack = p.acks.GetAckFrame(protocol.EncryptionHandshake) - if hasData || ack != nil { - s = p.handshakeStream - encLevel = protocol.EncryptionHandshake - } - } - if s == nil { - return nil, nil - } - sealer, err := p.cryptoSetup.GetSealerWithEncryptionLevel(encLevel) - if err != nil { - // The sealer - return nil, err - } - - hdr := p.getHeader(encLevel) - hdrLen := hdr.GetLength(p.version) - var length protocol.ByteCount - frames := make([]wire.Frame, 0, 2) - if ack != nil { - frames = append(frames, ack) - length += ack.Length(p.version) - } - if hasData { - cf := s.PopCryptoFrame(p.maxPacketSize - hdrLen - protocol.ByteCount(sealer.Overhead()) - length) - frames = append(frames, cf) - } - return p.writeAndSealPacket(hdr, frames, encLevel, sealer) -} - -func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount) ([]wire.Frame, error) { - var length protocol.ByteCount - var frames []wire.Frame - - // ACKs need to go first, so that the sentPacketHandler will recognize them - if ack := p.acks.GetAckFrame(protocol.Encryption1RTT); ack != nil { - frames = append(frames, ack) - length += ack.Length(p.version) - } - - var lengthAdded protocol.ByteCount - frames, lengthAdded = p.framer.AppendControlFrames(frames, maxFrameSize-length) - length += lengthAdded - - // temporarily increase the maxFrameSize by the (minimum) length of the DataLen field - // this leads to a properly sized packet in all cases, since we do all the packet length calculations with STREAM frames that have the DataLen set - // however, for the last STREAM frame in the packet, we can omit the DataLen, thus yielding a packet of exactly the correct size - // the length is encoded to either 1 or 2 bytes - maxFrameSize++ - - frames = p.framer.AppendStreamFrames(frames, maxFrameSize-length) - if len(frames) > 0 { - lastFrame := frames[len(frames)-1] - if sf, ok := lastFrame.(*wire.StreamFrame); ok { - sf.DataLenPresent = false - } - } - return frames, nil -} - -func (p *packetPacker) getHeader(encLevel protocol.EncryptionLevel) *wire.ExtendedHeader { - pn, pnLen := p.pnManager.PeekPacketNumber(encLevel) - header := &wire.ExtendedHeader{} - header.PacketNumber = pn - header.PacketNumberLen = pnLen - header.Version = p.version - header.DestConnectionID = p.destConnID - - if encLevel != protocol.Encryption1RTT { - header.IsLongHeader = true - // Always send Initial and Handshake packets with the maximum packet number length. - // This simplifies retransmissions: Since the header can't get any larger, - // we don't need to split CRYPTO frames. - header.PacketNumberLen = protocol.PacketNumberLen4 - header.SrcConnectionID = p.srcConnID - // Set the length to the maximum packet size. - // Since it is encoded as a varint, this guarantees us that the header will end up at most as big as GetLength() returns. - header.Length = p.maxPacketSize - switch encLevel { - case protocol.EncryptionInitial: - header.Type = protocol.PacketTypeInitial - case protocol.EncryptionHandshake: - header.Type = protocol.PacketTypeHandshake - } - } - - return header -} - -func (p *packetPacker) writeAndSealPacket( - header *wire.ExtendedHeader, - frames []wire.Frame, - encLevel protocol.EncryptionLevel, - sealer handshake.Sealer, -) (*packedPacket, error) { - packetBuffer := getPacketBuffer() - buffer := bytes.NewBuffer(packetBuffer.Slice[:0]) - - addPaddingForInitial := p.perspective == protocol.PerspectiveClient && header.Type == protocol.PacketTypeInitial - - if header.IsLongHeader { - if p.perspective == protocol.PerspectiveClient && header.Type == protocol.PacketTypeInitial { - header.Token = p.token - } - if addPaddingForInitial { - headerLen := header.GetLength(p.version) - header.Length = protocol.ByteCount(header.PacketNumberLen) + protocol.MinInitialPacketSize - headerLen - } else { - // long header packets always use 4 byte packet number, so we never need to pad short payloads - length := protocol.ByteCount(sealer.Overhead()) + protocol.ByteCount(header.PacketNumberLen) - for _, frame := range frames { - length += frame.Length(p.version) - } - header.Length = length - } - } - - if err := header.Write(buffer, p.version); err != nil { - return nil, err - } - payloadOffset := buffer.Len() - - // write all frames but the last one - for _, frame := range frames[:len(frames)-1] { - if err := frame.Write(buffer, p.version); err != nil { - return nil, err - } - } - lastFrame := frames[len(frames)-1] - if addPaddingForInitial { - // when appending padding, we need to make sure that the last STREAM frames has the data length set - if sf, ok := lastFrame.(*wire.StreamFrame); ok { - sf.DataLenPresent = true - } - } else { - payloadLen := buffer.Len() - payloadOffset + int(lastFrame.Length(p.version)) - if paddingLen := 4 - int(header.PacketNumberLen) - payloadLen; paddingLen > 0 { - // Pad the packet such that packet number length + payload length is 4 bytes. - // This is needed to enable the peer to get a 16 byte sample for header protection. - buffer.Write(bytes.Repeat([]byte{0}, paddingLen)) - } - } - if err := lastFrame.Write(buffer, p.version); err != nil { - return nil, err - } - - if addPaddingForInitial { - paddingLen := protocol.MinInitialPacketSize - sealer.Overhead() - buffer.Len() - if paddingLen > 0 { - buffer.Write(bytes.Repeat([]byte{0}, paddingLen)) - } - } - - if size := protocol.ByteCount(buffer.Len() + sealer.Overhead()); size > p.maxPacketSize { - return nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize) - } - - raw := buffer.Bytes() - _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], header.PacketNumber, raw[:payloadOffset]) - raw = raw[0 : buffer.Len()+sealer.Overhead()] - - pnOffset := payloadOffset - int(header.PacketNumberLen) - sealer.EncryptHeader( - raw[pnOffset+4:pnOffset+4+16], - &raw[0], - raw[pnOffset:payloadOffset], - ) - - num := p.pnManager.PopPacketNumber(encLevel) - if num != header.PacketNumber { - return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match") - } - return &packedPacket{ - header: header, - raw: raw, - frames: frames, - buffer: packetBuffer, - }, nil -} - -func (p *packetPacker) ChangeDestConnectionID(connID protocol.ConnectionID) { - p.destConnID = connID -} - -func (p *packetPacker) SetToken(token []byte) { - p.token = token -} - -func (p *packetPacker) HandleTransportParameters(params *handshake.TransportParameters) { - if params.MaxPacketSize != 0 { - p.maxPacketSize = utils.MinByteCount(p.maxPacketSize, params.MaxPacketSize) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go b/vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go deleted file mode 100644 index f3b5a2f77..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/packet_unpacker.go +++ /dev/null @@ -1,102 +0,0 @@ -package quic - -import ( - "bytes" - "fmt" - - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type unpackedPacket struct { - packetNumber protocol.PacketNumber // the decoded packet number - hdr *wire.ExtendedHeader - encryptionLevel protocol.EncryptionLevel - data []byte -} - -// The packetUnpacker unpacks QUIC packets. -type packetUnpacker struct { - cs handshake.CryptoSetup - - largestRcvdPacketNumber protocol.PacketNumber - - version protocol.VersionNumber -} - -var _ unpacker = &packetUnpacker{} - -func newPacketUnpacker(cs handshake.CryptoSetup, version protocol.VersionNumber) unpacker { - return &packetUnpacker{ - cs: cs, - version: version, - } -} - -func (u *packetUnpacker) Unpack(hdr *wire.Header, data []byte) (*unpackedPacket, error) { - r := bytes.NewReader(data) - - var encLevel protocol.EncryptionLevel - switch hdr.Type { - case protocol.PacketTypeInitial: - encLevel = protocol.EncryptionInitial - case protocol.PacketTypeHandshake: - encLevel = protocol.EncryptionHandshake - default: - if hdr.IsLongHeader { - return nil, fmt.Errorf("unknown packet type: %s", hdr.Type) - } - encLevel = protocol.Encryption1RTT - } - opener, err := u.cs.GetOpener(encLevel) - if err != nil { - return nil, err - } - hdrLen := int(hdr.ParsedLen()) - if len(data) < hdrLen+4+16 { - return nil, fmt.Errorf("Packet too small. Expected at least 20 bytes after the header, got %d", len(data)-hdrLen) - } - // The packet number can be up to 4 bytes long, but we won't know the length until we decrypt it. - // 1. save a copy of the 4 bytes - origPNBytes := make([]byte, 4) - copy(origPNBytes, data[hdrLen:hdrLen+4]) - // 2. decrypt the header, assuming a 4 byte packet number - opener.DecryptHeader( - data[hdrLen+4:hdrLen+4+16], - &data[0], - data[hdrLen:hdrLen+4], - ) - // 3. parse the header (and learn the actual length of the packet number) - extHdr, err := hdr.ParseExtended(r, u.version) - if err != nil { - return nil, fmt.Errorf("error parsing extended header: %s", err) - } - extHdrLen := hdrLen + int(extHdr.PacketNumberLen) - // 4. if the packet number is shorter than 4 bytes, replace the remaining bytes with the copy we saved earlier - if extHdr.PacketNumberLen != protocol.PacketNumberLen4 { - copy(data[extHdrLen:hdrLen+4], origPNBytes[int(extHdr.PacketNumberLen):]) - } - - pn := protocol.DecodePacketNumber( - extHdr.PacketNumberLen, - u.largestRcvdPacketNumber, - extHdr.PacketNumber, - ) - - decrypted, err := opener.Open(data[extHdrLen:extHdrLen], data[extHdrLen:], pn, data[:extHdrLen]) - if err != nil { - return nil, err - } - - // Only do this after decrypting, so we are sure the packet is not attacker-controlled - u.largestRcvdPacketNumber = utils.MaxPacketNumber(u.largestRcvdPacketNumber, pn) - - return &unpackedPacket{ - hdr: extHdr, - packetNumber: pn, - encryptionLevel: encLevel, - data: decrypted, - }, nil -} diff --git a/vendor/github.com/lucas-clemente/quic-go/receive_stream.go b/vendor/github.com/lucas-clemente/quic-go/receive_stream.go deleted file mode 100644 index de76335e7..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/receive_stream.go +++ /dev/null @@ -1,324 +0,0 @@ -package quic - -import ( - "fmt" - "io" - "sync" - "time" - - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type receiveStreamI interface { - ReceiveStream - - handleStreamFrame(*wire.StreamFrame) error - handleResetStreamFrame(*wire.ResetStreamFrame) error - closeForShutdown(error) - getWindowUpdate() protocol.ByteCount -} - -type receiveStream struct { - mutex sync.Mutex - - streamID protocol.StreamID - - sender streamSender - - frameQueue *frameSorter - readOffset protocol.ByteCount - finalOffset protocol.ByteCount - - currentFrame []byte - currentFrameIsLast bool // is the currentFrame the last frame on this stream - readPosInFrame int - - closeForShutdownErr error - cancelReadErr error - resetRemotelyErr StreamError - - closedForShutdown bool // set when CloseForShutdown() is called - finRead bool // set once we read a frame with a FinBit - canceledRead bool // set when CancelRead() is called - resetRemotely bool // set when HandleResetStreamFrame() is called - - readChan chan struct{} - deadline time.Time - - flowController flowcontrol.StreamFlowController - version protocol.VersionNumber -} - -var _ ReceiveStream = &receiveStream{} -var _ receiveStreamI = &receiveStream{} - -func newReceiveStream( - streamID protocol.StreamID, - sender streamSender, - flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, -) *receiveStream { - return &receiveStream{ - streamID: streamID, - sender: sender, - flowController: flowController, - frameQueue: newFrameSorter(), - readChan: make(chan struct{}, 1), - finalOffset: protocol.MaxByteCount, - version: version, - } -} - -func (s *receiveStream) StreamID() protocol.StreamID { - return s.streamID -} - -// Read implements io.Reader. It is not thread safe! -func (s *receiveStream) Read(p []byte) (int, error) { - s.mutex.Lock() - completed, n, err := s.readImpl(p) - s.mutex.Unlock() - - if completed { - s.streamCompleted() - } - return n, err -} - -func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, error) { - if s.finRead { - return false, 0, io.EOF - } - if s.canceledRead { - return false, 0, s.cancelReadErr - } - if s.resetRemotely { - return false, 0, s.resetRemotelyErr - } - if s.closedForShutdown { - return false, 0, s.closeForShutdownErr - } - - bytesRead := 0 - for bytesRead < len(p) { - if s.currentFrame == nil || s.readPosInFrame >= len(s.currentFrame) { - s.dequeueNextFrame() - } - if s.currentFrame == nil && bytesRead > 0 { - return false, bytesRead, s.closeForShutdownErr - } - - var deadlineTimer *utils.Timer - for { - // Stop waiting on errors - if s.closedForShutdown { - return false, bytesRead, s.closeForShutdownErr - } - if s.canceledRead { - return false, bytesRead, s.cancelReadErr - } - if s.resetRemotely { - return false, bytesRead, s.resetRemotelyErr - } - - deadline := s.deadline - if !deadline.IsZero() { - if !time.Now().Before(deadline) { - return false, bytesRead, errDeadline - } - if deadlineTimer == nil { - deadlineTimer = utils.NewTimer() - } - deadlineTimer.Reset(deadline) - } - - if s.currentFrame != nil || s.currentFrameIsLast { - break - } - - s.mutex.Unlock() - if deadline.IsZero() { - <-s.readChan - } else { - select { - case <-s.readChan: - case <-deadlineTimer.Chan(): - deadlineTimer.SetRead() - } - } - s.mutex.Lock() - if s.currentFrame == nil { - s.dequeueNextFrame() - } - } - - if bytesRead > len(p) { - return false, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) - } - if s.readPosInFrame > len(s.currentFrame) { - return false, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame)) - } - - s.mutex.Unlock() - - m := copy(p[bytesRead:], s.currentFrame[s.readPosInFrame:]) - s.readPosInFrame += m - bytesRead += m - s.readOffset += protocol.ByteCount(m) - - s.mutex.Lock() - // when a RESET_STREAM was received, the was already informed about the final byteOffset for this stream - if !s.resetRemotely { - s.flowController.AddBytesRead(protocol.ByteCount(m)) - } - - if s.readPosInFrame >= len(s.currentFrame) && s.currentFrameIsLast { - s.finRead = true - return true, bytesRead, io.EOF - } - } - return false, bytesRead, nil -} - -func (s *receiveStream) dequeueNextFrame() { - var offset protocol.ByteCount - offset, s.currentFrame = s.frameQueue.Pop() - s.currentFrameIsLast = offset+protocol.ByteCount(len(s.currentFrame)) >= s.finalOffset - s.readPosInFrame = 0 -} - -func (s *receiveStream) CancelRead(errorCode protocol.ApplicationErrorCode) { - s.mutex.Lock() - completed := s.cancelReadImpl(errorCode) - s.mutex.Unlock() - - if completed { - s.streamCompleted() - } -} - -func (s *receiveStream) cancelReadImpl(errorCode protocol.ApplicationErrorCode) bool /* completed */ { - if s.finRead || s.canceledRead || s.resetRemotely { - return false - } - s.canceledRead = true - s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode) - s.signalRead() - s.sender.queueControlFrame(&wire.StopSendingFrame{ - StreamID: s.streamID, - ErrorCode: errorCode, - }) - // We're done with this stream if the final offset was already received. - return s.finalOffset != protocol.MaxByteCount -} - -func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error { - s.mutex.Lock() - completed, err := s.handleStreamFrameImpl(frame) - s.mutex.Unlock() - - if completed { - s.streamCompleted() - } - return err -} - -func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) (bool /* completed */, error) { - maxOffset := frame.Offset + frame.DataLen() - if err := s.flowController.UpdateHighestReceived(maxOffset, frame.FinBit); err != nil { - return false, err - } - if frame.FinBit { - s.finalOffset = maxOffset - } - if s.canceledRead { - return frame.FinBit, nil - } - if err := s.frameQueue.Push(frame.Data, frame.Offset); err != nil { - return false, err - } - s.signalRead() - return false, nil -} - -func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { - s.mutex.Lock() - completed, err := s.handleResetStreamFrameImpl(frame) - s.mutex.Unlock() - - if completed { - s.streamCompleted() - } - return err -} - -func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) (bool /*completed */, error) { - if s.closedForShutdown { - return false, nil - } - if err := s.flowController.UpdateHighestReceived(frame.ByteOffset, true); err != nil { - return false, err - } - s.finalOffset = frame.ByteOffset - - // ignore duplicate RESET_STREAM frames for this stream (after checking their final offset) - if s.resetRemotely { - return false, nil - } - s.resetRemotely = true - s.resetRemotelyErr = streamCanceledError{ - errorCode: frame.ErrorCode, - error: fmt.Errorf("Stream %d was reset with error code %d", s.streamID, frame.ErrorCode), - } - s.signalRead() - return true, nil -} - -func (s *receiveStream) CloseRemote(offset protocol.ByteCount) { - s.handleStreamFrame(&wire.StreamFrame{FinBit: true, Offset: offset}) -} - -func (s *receiveStream) SetReadDeadline(t time.Time) error { - s.mutex.Lock() - s.deadline = t - s.mutex.Unlock() - s.signalRead() - return nil -} - -// CloseForShutdown closes a stream abruptly. -// It makes Read unblock (and return the error) immediately. -// The peer will NOT be informed about this: the stream is closed without sending a FIN or RESET. -func (s *receiveStream) closeForShutdown(err error) { - s.mutex.Lock() - s.closedForShutdown = true - s.closeForShutdownErr = err - s.mutex.Unlock() - s.signalRead() -} - -func (s *receiveStream) getWindowUpdate() protocol.ByteCount { - return s.flowController.GetWindowUpdate() -} - -func (s *receiveStream) streamCompleted() { - s.mutex.Lock() - finRead := s.finRead - s.mutex.Unlock() - - if !finRead { - s.flowController.Abandon() - } - s.sender.onStreamCompleted(s.streamID) -} - -// signalRead performs a non-blocking send on the readChan -func (s *receiveStream) signalRead() { - select { - case s.readChan <- struct{}{}: - default: - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/send_stream.go b/vendor/github.com/lucas-clemente/quic-go/send_stream.go deleted file mode 100644 index 46f24c4be..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/send_stream.go +++ /dev/null @@ -1,332 +0,0 @@ -package quic - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type sendStreamI interface { - SendStream - handleStopSendingFrame(*wire.StopSendingFrame) - hasData() bool - popStreamFrame(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool) - closeForShutdown(error) - handleMaxStreamDataFrame(*wire.MaxStreamDataFrame) -} - -type sendStream struct { - mutex sync.Mutex - - ctx context.Context - ctxCancel context.CancelFunc - - streamID protocol.StreamID - sender streamSender - - writeOffset protocol.ByteCount - - cancelWriteErr error - closeForShutdownErr error - - closedForShutdown bool // set when CloseForShutdown() is called - finishedWriting bool // set once Close() is called - canceledWrite bool // set when CancelWrite() is called, or a STOP_SENDING frame is received - finSent bool // set when a STREAM_FRAME with FIN bit has b - - dataForWriting []byte - - writeChan chan struct{} - deadline time.Time - - flowController flowcontrol.StreamFlowController - - version protocol.VersionNumber -} - -var _ SendStream = &sendStream{} -var _ sendStreamI = &sendStream{} - -func newSendStream( - streamID protocol.StreamID, - sender streamSender, - flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, -) *sendStream { - s := &sendStream{ - streamID: streamID, - sender: sender, - flowController: flowController, - writeChan: make(chan struct{}, 1), - version: version, - } - s.ctx, s.ctxCancel = context.WithCancel(context.Background()) - return s -} - -func (s *sendStream) StreamID() protocol.StreamID { - return s.streamID // same for receiveStream and sendStream -} - -func (s *sendStream) Write(p []byte) (int, error) { - s.mutex.Lock() - defer s.mutex.Unlock() - - if s.finishedWriting { - return 0, fmt.Errorf("write on closed stream %d", s.streamID) - } - if s.canceledWrite { - return 0, s.cancelWriteErr - } - if s.closeForShutdownErr != nil { - return 0, s.closeForShutdownErr - } - if !s.deadline.IsZero() && !time.Now().Before(s.deadline) { - return 0, errDeadline - } - if len(p) == 0 { - return 0, nil - } - - s.dataForWriting = p - - var ( - deadlineTimer *utils.Timer - bytesWritten int - notifiedSender bool - ) - for { - bytesWritten = len(p) - len(s.dataForWriting) - deadline := s.deadline - if !deadline.IsZero() { - if !time.Now().Before(deadline) { - s.dataForWriting = nil - return bytesWritten, errDeadline - } - if deadlineTimer == nil { - deadlineTimer = utils.NewTimer() - } - deadlineTimer.Reset(deadline) - } - if s.dataForWriting == nil || s.canceledWrite || s.closedForShutdown { - break - } - - s.mutex.Unlock() - if !notifiedSender { - s.sender.onHasStreamData(s.streamID) // must be called without holding the mutex - notifiedSender = true - } - if deadline.IsZero() { - <-s.writeChan - } else { - select { - case <-s.writeChan: - case <-deadlineTimer.Chan(): - deadlineTimer.SetRead() - } - } - s.mutex.Lock() - } - - if s.closeForShutdownErr != nil { - return bytesWritten, s.closeForShutdownErr - } else if s.cancelWriteErr != nil { - return bytesWritten, s.cancelWriteErr - } - return bytesWritten, nil -} - -// popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream -// maxBytes is the maximum length this frame (including frame header) will have. -func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more data to send */) { - s.mutex.Lock() - completed, frame, hasMoreData := s.popStreamFrameImpl(maxBytes) - s.mutex.Unlock() - - if completed { - s.sender.onStreamCompleted(s.streamID) - } - return frame, hasMoreData -} - -func (s *sendStream) popStreamFrameImpl(maxBytes protocol.ByteCount) (bool /* completed */, *wire.StreamFrame, bool /* has more data to send */) { - if s.canceledWrite || s.closeForShutdownErr != nil { - return false, nil, false - } - - frame := &wire.StreamFrame{ - StreamID: s.streamID, - Offset: s.writeOffset, - DataLenPresent: true, - } - maxDataLen := frame.MaxDataLen(maxBytes, s.version) - if maxDataLen == 0 { // a STREAM frame must have at least one byte of data - return false, nil, s.dataForWriting != nil - } - frame.Data, frame.FinBit = s.getDataForWriting(maxDataLen) - if len(frame.Data) == 0 && !frame.FinBit { - // this can happen if: - // - popStreamFrame is called but there's no data for writing - // - there's data for writing, but the stream is stream-level flow control blocked - // - there's data for writing, but the stream is connection-level flow control blocked - if s.dataForWriting == nil { - return false, nil, false - } - if isBlocked, offset := s.flowController.IsNewlyBlocked(); isBlocked { - s.sender.queueControlFrame(&wire.StreamDataBlockedFrame{ - StreamID: s.streamID, - DataLimit: offset, - }) - return false, nil, false - } - return false, nil, true - } - if frame.FinBit { - s.finSent = true - } - return frame.FinBit, frame, s.dataForWriting != nil -} - -func (s *sendStream) hasData() bool { - s.mutex.Lock() - hasData := len(s.dataForWriting) > 0 - s.mutex.Unlock() - return hasData -} - -func (s *sendStream) getDataForWriting(maxBytes protocol.ByteCount) ([]byte, bool /* should send FIN */) { - if s.dataForWriting == nil { - return nil, s.finishedWriting && !s.finSent - } - - maxBytes = utils.MinByteCount(maxBytes, s.flowController.SendWindowSize()) - if maxBytes == 0 { - return nil, false - } - - var ret []byte - if protocol.ByteCount(len(s.dataForWriting)) > maxBytes { - ret = make([]byte, int(maxBytes)) - copy(ret, s.dataForWriting[:maxBytes]) - s.dataForWriting = s.dataForWriting[maxBytes:] - } else { - ret = make([]byte, len(s.dataForWriting)) - copy(ret, s.dataForWriting) - s.dataForWriting = nil - s.signalWrite() - } - s.writeOffset += protocol.ByteCount(len(ret)) - s.flowController.AddBytesSent(protocol.ByteCount(len(ret))) - return ret, s.finishedWriting && s.dataForWriting == nil && !s.finSent -} - -func (s *sendStream) Close() error { - s.mutex.Lock() - if s.canceledWrite { - s.mutex.Unlock() - return fmt.Errorf("Close called for canceled stream %d", s.streamID) - } - s.finishedWriting = true - s.mutex.Unlock() - - s.sender.onHasStreamData(s.streamID) // need to send the FIN, must be called without holding the mutex - s.ctxCancel() - return nil -} - -func (s *sendStream) CancelWrite(errorCode protocol.ApplicationErrorCode) { - s.mutex.Lock() - completed := s.cancelWriteImpl(errorCode, fmt.Errorf("Write on stream %d canceled with error code %d", s.streamID, errorCode)) - s.mutex.Unlock() - - if completed { - s.sender.onStreamCompleted(s.streamID) // must be called without holding the mutex - } -} - -// must be called after locking the mutex -func (s *sendStream) cancelWriteImpl(errorCode protocol.ApplicationErrorCode, writeErr error) bool /*completed */ { - if s.canceledWrite || s.finishedWriting { - return false - } - s.canceledWrite = true - s.cancelWriteErr = writeErr - s.signalWrite() - s.sender.queueControlFrame(&wire.ResetStreamFrame{ - StreamID: s.streamID, - ByteOffset: s.writeOffset, - ErrorCode: errorCode, - }) - // TODO(#991): cancel retransmissions for this stream - s.ctxCancel() - return true -} - -func (s *sendStream) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) { - s.mutex.Lock() - hasStreamData := s.dataForWriting != nil - s.mutex.Unlock() - - s.flowController.UpdateSendWindow(frame.ByteOffset) - if hasStreamData { - s.sender.onHasStreamData(s.streamID) - } -} - -func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) { - s.mutex.Lock() - completed := s.handleStopSendingFrameImpl(frame) - s.mutex.Unlock() - - if completed { - s.sender.onStreamCompleted(s.streamID) - } -} - -// must be called after locking the mutex -func (s *sendStream) handleStopSendingFrameImpl(frame *wire.StopSendingFrame) bool /*completed*/ { - writeErr := streamCanceledError{ - errorCode: frame.ErrorCode, - error: fmt.Errorf("Stream %d was reset with error code %d", s.streamID, frame.ErrorCode), - } - return s.cancelWriteImpl(errorCodeStopping, writeErr) -} - -func (s *sendStream) Context() context.Context { - return s.ctx -} - -func (s *sendStream) SetWriteDeadline(t time.Time) error { - s.mutex.Lock() - s.deadline = t - s.mutex.Unlock() - s.signalWrite() - return nil -} - -// CloseForShutdown closes a stream abruptly. -// It makes Write unblock (and return the error) immediately. -// The peer will NOT be informed about this: the stream is closed without sending a FIN or RST. -func (s *sendStream) closeForShutdown(err error) { - s.mutex.Lock() - s.closedForShutdown = true - s.closeForShutdownErr = err - s.mutex.Unlock() - s.signalWrite() - s.ctxCancel() -} - -// signalWrite performs a non-blocking send on the writeChan -func (s *sendStream) signalWrite() { - select { - case s.writeChan <- struct{}{}: - default: - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/server.go b/vendor/github.com/lucas-clemente/quic-go/server.go deleted file mode 100644 index fe2f9388a..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/server.go +++ /dev/null @@ -1,558 +0,0 @@ -package quic - -import ( - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -// packetHandler handles packets -type packetHandler interface { - handlePacket(*receivedPacket) - io.Closer - destroy(error) - getPerspective() protocol.Perspective -} - -type unknownPacketHandler interface { - handlePacket(*receivedPacket) - closeWithError(error) error -} - -type packetHandlerManager interface { - io.Closer - Add(protocol.ConnectionID, packetHandler) - Retire(protocol.ConnectionID) - Remove(protocol.ConnectionID) - AddResetToken([16]byte, packetHandler) - RemoveResetToken([16]byte) - GetStatelessResetToken(protocol.ConnectionID) [16]byte - SetServer(unknownPacketHandler) - CloseServer() -} - -type quicSession interface { - Session - handlePacket(*receivedPacket) - GetVersion() protocol.VersionNumber - getPerspective() protocol.Perspective - run() error - destroy(error) - closeForRecreating() protocol.PacketNumber - closeRemote(error) -} - -type sessionRunner interface { - OnHandshakeComplete(Session) - Retire(protocol.ConnectionID) - Remove(protocol.ConnectionID) - AddResetToken([16]byte, packetHandler) - RemoveResetToken([16]byte) -} - -type runner struct { - packetHandlerManager - - onHandshakeCompleteImpl func(Session) -} - -func (r *runner) OnHandshakeComplete(s Session) { r.onHandshakeCompleteImpl(s) } - -var _ sessionRunner = &runner{} - -// A Listener of QUIC -type server struct { - mutex sync.Mutex - - tlsConf *tls.Config - config *Config - - conn net.PacketConn - // If the server is started with ListenAddr, we create a packet conn. - // If it is started with Listen, we take a packet conn as a parameter. - createdPacketConn bool - - cookieGenerator *handshake.CookieGenerator - - sessionHandler packetHandlerManager - - // set as a member, so they can be set in the tests - newSession func(connection, sessionRunner, protocol.ConnectionID /* original connection ID */, protocol.ConnectionID /* destination connection ID */, protocol.ConnectionID /* source connection ID */, *Config, *tls.Config, *handshake.TransportParameters, utils.Logger, protocol.VersionNumber) (quicSession, error) - - serverError error - errorChan chan struct{} - closed bool - - sessionQueue chan Session - sessionQueueLen int32 // to be used as an atomic - - sessionRunner sessionRunner - - logger utils.Logger -} - -var _ Listener = &server{} -var _ unknownPacketHandler = &server{} - -// ListenAddr creates a QUIC server listening on a given address. -// The tls.Config must not be nil and must contain a certificate configuration. -// The quic.Config may be nil, in that case the default values will be used. -func ListenAddr(addr string, tlsConf *tls.Config, config *Config) (Listener, error) { - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return nil, err - } - conn, err := net.ListenUDP("udp", udpAddr) - if err != nil { - return nil, err - } - serv, err := listen(conn, tlsConf, config) - if err != nil { - return nil, err - } - serv.createdPacketConn = true - return serv, nil -} - -// Listen listens for QUIC connections on a given net.PacketConn. -// A single PacketConn only be used for a single call to Listen. -// The PacketConn can be used for simultaneous calls to Dial. -// QUIC connection IDs are used for demultiplexing the different connections. -// The tls.Config must not be nil and must contain a certificate configuration. -// The quic.Config may be nil, in that case the default values will be used. -func Listen(conn net.PacketConn, tlsConf *tls.Config, config *Config) (Listener, error) { - return listen(conn, tlsConf, config) -} - -func listen(conn net.PacketConn, tlsConf *tls.Config, config *Config) (*server, error) { - // TODO(#1655): only require that tls.Config.Certificates or tls.Config.GetCertificate is set - if tlsConf == nil || len(tlsConf.Certificates) == 0 { - return nil, errors.New("quic: Certificates not set in tls.Config") - } - config = populateServerConfig(config) - for _, v := range config.Versions { - if !protocol.IsValidVersion(v) { - return nil, fmt.Errorf("%s is not a valid QUIC version", v) - } - } - - sessionHandler, err := getMultiplexer().AddConn(conn, config.ConnectionIDLength, config.StatelessResetKey) - if err != nil { - return nil, err - } - s := &server{ - conn: conn, - tlsConf: tlsConf, - config: config, - sessionHandler: sessionHandler, - sessionQueue: make(chan Session), - errorChan: make(chan struct{}), - newSession: newSession, - logger: utils.DefaultLogger.WithPrefix("server"), - } - if err := s.setup(); err != nil { - return nil, err - } - sessionHandler.SetServer(s) - s.logger.Debugf("Listening for %s connections on %s", conn.LocalAddr().Network(), conn.LocalAddr().String()) - return s, nil -} - -func (s *server) setup() error { - s.sessionRunner = &runner{ - packetHandlerManager: s.sessionHandler, - onHandshakeCompleteImpl: func(sess Session) { - go func() { - atomic.AddInt32(&s.sessionQueueLen, 1) - defer atomic.AddInt32(&s.sessionQueueLen, -1) - select { - case s.sessionQueue <- sess: - // blocks until the session is accepted - case <-sess.Context().Done(): - // don't pass sessions that were already closed to Accept() - } - }() - }, - } - cookieGenerator, err := handshake.NewCookieGenerator() - if err != nil { - return err - } - s.cookieGenerator = cookieGenerator - return nil -} - -var defaultAcceptCookie = func(clientAddr net.Addr, cookie *Cookie) bool { - if cookie == nil { - return false - } - if time.Now().After(cookie.SentTime.Add(protocol.CookieExpiryTime)) { - return false - } - var sourceAddr string - if udpAddr, ok := clientAddr.(*net.UDPAddr); ok { - sourceAddr = udpAddr.IP.String() - } else { - sourceAddr = clientAddr.String() - } - return sourceAddr == cookie.RemoteAddr -} - -// populateServerConfig populates fields in the quic.Config with their default values, if none are set -// it may be called with nil -func populateServerConfig(config *Config) *Config { - if config == nil { - config = &Config{} - } - versions := config.Versions - if len(versions) == 0 { - versions = protocol.SupportedVersions - } - - vsa := defaultAcceptCookie - if config.AcceptCookie != nil { - vsa = config.AcceptCookie - } - - handshakeTimeout := protocol.DefaultHandshakeTimeout - if config.HandshakeTimeout != 0 { - handshakeTimeout = config.HandshakeTimeout - } - idleTimeout := protocol.DefaultIdleTimeout - if config.IdleTimeout != 0 { - idleTimeout = config.IdleTimeout - } - - maxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow - if maxReceiveStreamFlowControlWindow == 0 { - maxReceiveStreamFlowControlWindow = protocol.DefaultMaxReceiveStreamFlowControlWindow - } - maxReceiveConnectionFlowControlWindow := config.MaxReceiveConnectionFlowControlWindow - if maxReceiveConnectionFlowControlWindow == 0 { - maxReceiveConnectionFlowControlWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindow - } - maxIncomingStreams := config.MaxIncomingStreams - if maxIncomingStreams == 0 { - maxIncomingStreams = protocol.DefaultMaxIncomingStreams - } else if maxIncomingStreams < 0 { - maxIncomingStreams = 0 - } - maxIncomingUniStreams := config.MaxIncomingUniStreams - if maxIncomingUniStreams == 0 { - maxIncomingUniStreams = protocol.DefaultMaxIncomingUniStreams - } else if maxIncomingUniStreams < 0 { - maxIncomingUniStreams = 0 - } - connIDLen := config.ConnectionIDLength - if connIDLen == 0 { - connIDLen = protocol.DefaultConnectionIDLength - } - - return &Config{ - Versions: versions, - HandshakeTimeout: handshakeTimeout, - IdleTimeout: idleTimeout, - AcceptCookie: vsa, - KeepAlive: config.KeepAlive, - MaxReceiveStreamFlowControlWindow: maxReceiveStreamFlowControlWindow, - MaxReceiveConnectionFlowControlWindow: maxReceiveConnectionFlowControlWindow, - MaxIncomingStreams: maxIncomingStreams, - MaxIncomingUniStreams: maxIncomingUniStreams, - ConnectionIDLength: connIDLen, - StatelessResetKey: config.StatelessResetKey, - } -} - -// Accept returns newly openend sessions -func (s *server) Accept() (Session, error) { - var sess Session - select { - case sess = <-s.sessionQueue: - return sess, nil - case <-s.errorChan: - return nil, s.serverError - } -} - -// Close the server -func (s *server) Close() error { - s.mutex.Lock() - defer s.mutex.Unlock() - if s.closed { - return nil - } - return s.closeWithMutex() -} - -func (s *server) closeWithMutex() error { - s.sessionHandler.CloseServer() - if s.serverError == nil { - s.serverError = errors.New("server closed") - } - var err error - // If the server was started with ListenAddr, we created the packet conn. - // We need to close it in order to make the go routine reading from that conn return. - if s.createdPacketConn { - err = s.sessionHandler.Close() - } - s.closed = true - close(s.errorChan) - return err -} - -func (s *server) closeWithError(e error) error { - s.mutex.Lock() - defer s.mutex.Unlock() - if s.closed { - return nil - } - s.serverError = e - return s.closeWithMutex() -} - -// Addr returns the server's network address -func (s *server) Addr() net.Addr { - return s.conn.LocalAddr() -} - -func (s *server) handlePacket(p *receivedPacket) { - go func() { - if shouldReleaseBuffer := s.handlePacketImpl(p); !shouldReleaseBuffer { - p.buffer.Release() - } - }() -} - -func (s *server) handlePacketImpl(p *receivedPacket) bool /* was the packet passed on to a session */ { - if len(p.data) < protocol.MinInitialPacketSize { - s.logger.Debugf("Dropping a packet that is too small to be a valid Initial (%d bytes)", len(p.data)) - return false - } - // If we're creating a new session, the packet will be passed to the session. - // The header will then be parsed again. - hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDLength) - if err != nil { - s.logger.Debugf("Error parsing packet: %s", err) - return false - } - // Short header packets should never end up here in the first place - if !hdr.IsLongHeader { - return false - } - // send a Version Negotiation Packet if the client is speaking a different protocol version - if !protocol.IsSupportedVersion(s.config.Versions, hdr.Version) { - s.sendVersionNegotiationPacket(p, hdr) - return false - } - if hdr.IsLongHeader && hdr.Type != protocol.PacketTypeInitial { - // Drop long header packets. - // There's litte point in sending a Stateless Reset, since the client - // might not have received the token yet. - return false - } - - s.logger.Debugf("<- Received Initial packet.") - - sess, connID, err := s.handleInitialImpl(p, hdr) - if err != nil { - s.logger.Errorf("Error occurred handling initial packet: %s", err) - return false - } - if sess == nil { // a retry was done, or the connection attempt was rejected - return false - } - // Don't put the packet buffer back if a new session was created. - // The session will handle the packet and take of that. - s.sessionHandler.Add(connID, sess) - return true -} - -func (s *server) handleInitialImpl(p *receivedPacket, hdr *wire.Header) (quicSession, protocol.ConnectionID, error) { - if len(hdr.Token) == 0 && hdr.DestConnectionID.Len() < protocol.MinConnectionIDLenInitial { - return nil, nil, errors.New("too short connection ID") - } - - var cookie *Cookie - var origDestConnectionID protocol.ConnectionID - if len(hdr.Token) > 0 { - c, err := s.cookieGenerator.DecodeToken(hdr.Token) - if err == nil { - cookie = &Cookie{ - RemoteAddr: c.RemoteAddr, - SentTime: c.SentTime, - } - origDestConnectionID = c.OriginalDestConnectionID - } - } - if !s.config.AcceptCookie(p.remoteAddr, cookie) { - // Log the Initial packet now. - // If no Retry is sent, the packet will be logged by the session. - (&wire.ExtendedHeader{Header: *hdr}).Log(s.logger) - return nil, nil, s.sendRetry(p.remoteAddr, hdr) - } - - if queueLen := atomic.LoadInt32(&s.sessionQueueLen); queueLen >= protocol.MaxAcceptQueueSize { - s.logger.Debugf("Rejecting new connection. Server currently busy. Accept queue length: %d (max %d)", queueLen, protocol.MaxAcceptQueueSize) - return nil, nil, s.sendServerBusy(p.remoteAddr, hdr) - } - - connID, err := protocol.GenerateConnectionID(s.config.ConnectionIDLength) - if err != nil { - return nil, nil, err - } - s.logger.Debugf("Changing connection ID to %s.", connID) - sess, err := s.createNewSession( - p.remoteAddr, - origDestConnectionID, - hdr.DestConnectionID, - hdr.SrcConnectionID, - connID, - hdr.Version, - ) - if err != nil { - return nil, nil, err - } - sess.handlePacket(p) - return sess, connID, nil -} - -func (s *server) createNewSession( - remoteAddr net.Addr, - origDestConnID protocol.ConnectionID, - clientDestConnID protocol.ConnectionID, - destConnID protocol.ConnectionID, - srcConnID protocol.ConnectionID, - version protocol.VersionNumber, -) (quicSession, error) { - token := s.sessionHandler.GetStatelessResetToken(srcConnID) - params := &handshake.TransportParameters{ - InitialMaxStreamDataBidiLocal: protocol.InitialMaxStreamData, - InitialMaxStreamDataBidiRemote: protocol.InitialMaxStreamData, - InitialMaxStreamDataUni: protocol.InitialMaxStreamData, - InitialMaxData: protocol.InitialMaxData, - IdleTimeout: s.config.IdleTimeout, - MaxBidiStreams: uint64(s.config.MaxIncomingStreams), - MaxUniStreams: uint64(s.config.MaxIncomingUniStreams), - AckDelayExponent: protocol.AckDelayExponent, - DisableMigration: true, - StatelessResetToken: &token, - OriginalConnectionID: origDestConnID, - } - sess, err := s.newSession( - &conn{pconn: s.conn, currentAddr: remoteAddr}, - s.sessionRunner, - clientDestConnID, - destConnID, - srcConnID, - s.config, - s.tlsConf, - params, - s.logger, - version, - ) - if err != nil { - return nil, err - } - go sess.run() - return sess, nil -} - -func (s *server) sendRetry(remoteAddr net.Addr, hdr *wire.Header) error { - token, err := s.cookieGenerator.NewToken(remoteAddr, hdr.DestConnectionID) - if err != nil { - return err - } - connID, err := protocol.GenerateConnectionID(s.config.ConnectionIDLength) - if err != nil { - return err - } - replyHdr := &wire.ExtendedHeader{} - replyHdr.IsLongHeader = true - replyHdr.Type = protocol.PacketTypeRetry - replyHdr.Version = hdr.Version - replyHdr.SrcConnectionID = connID - replyHdr.DestConnectionID = hdr.SrcConnectionID - replyHdr.OrigDestConnectionID = hdr.DestConnectionID - replyHdr.Token = token - s.logger.Debugf("Changing connection ID to %s.\n-> Sending Retry", connID) - replyHdr.Log(s.logger) - buf := &bytes.Buffer{} - if err := replyHdr.Write(buf, hdr.Version); err != nil { - return err - } - if _, err := s.conn.WriteTo(buf.Bytes(), remoteAddr); err != nil { - s.logger.Debugf("Error sending Retry: %s", err) - } - return nil -} - -func (s *server) sendServerBusy(remoteAddr net.Addr, hdr *wire.Header) error { - sealer, _, err := handshake.NewInitialAEAD(hdr.DestConnectionID, protocol.PerspectiveServer) - if err != nil { - return err - } - packetBuffer := getPacketBuffer() - defer packetBuffer.Release() - buf := bytes.NewBuffer(packetBuffer.Slice[:0]) - - ccf := &wire.ConnectionCloseFrame{ErrorCode: qerr.ServerBusy} - - replyHdr := &wire.ExtendedHeader{} - replyHdr.IsLongHeader = true - replyHdr.Type = protocol.PacketTypeInitial - replyHdr.Version = hdr.Version - replyHdr.SrcConnectionID = hdr.DestConnectionID - replyHdr.DestConnectionID = hdr.SrcConnectionID - replyHdr.PacketNumberLen = protocol.PacketNumberLen4 - replyHdr.Length = 4 /* packet number len */ + ccf.Length(hdr.Version) + protocol.ByteCount(sealer.Overhead()) - if err := replyHdr.Write(buf, hdr.Version); err != nil { - return err - } - payloadOffset := buf.Len() - - if err := ccf.Write(buf, hdr.Version); err != nil { - return err - } - - raw := buf.Bytes() - _ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], replyHdr.PacketNumber, raw[:payloadOffset]) - raw = raw[0 : buf.Len()+sealer.Overhead()] - - pnOffset := payloadOffset - int(replyHdr.PacketNumberLen) - sealer.EncryptHeader( - raw[pnOffset+4:pnOffset+4+16], - &raw[0], - raw[pnOffset:payloadOffset], - ) - - replyHdr.Log(s.logger) - wire.LogFrame(s.logger, ccf, true) - if _, err := s.conn.WriteTo(raw, remoteAddr); err != nil { - s.logger.Debugf("Error rejecting connection: %s", err) - } - return nil -} - -func (s *server) sendVersionNegotiationPacket(p *receivedPacket, hdr *wire.Header) { - s.logger.Debugf("Client offered version %s, sending Version Negotiation", hdr.Version) - data, err := wire.ComposeVersionNegotiation(hdr.SrcConnectionID, hdr.DestConnectionID, s.config.Versions) - if err != nil { - s.logger.Debugf("Error composing Version Negotiation: %s", err) - return - } - if _, err := s.conn.WriteTo(data, p.remoteAddr); err != nil { - s.logger.Debugf("Error sending Version Negotiation: %s", err) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/session.go b/vendor/github.com/lucas-clemente/quic-go/session.go deleted file mode 100644 index ba1f55661..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/session.go +++ /dev/null @@ -1,1287 +0,0 @@ -package quic - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "reflect" - "sync" - "time" - - "github.com/lucas-clemente/quic-go/internal/ackhandler" - "github.com/lucas-clemente/quic-go/internal/congestion" - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/utils" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type unpacker interface { - Unpack(hdr *wire.Header, data []byte) (*unpackedPacket, error) -} - -type streamGetter interface { - GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) - GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) -} - -type streamManager interface { - GetOrOpenSendStream(protocol.StreamID) (sendStreamI, error) - GetOrOpenReceiveStream(protocol.StreamID) (receiveStreamI, error) - OpenStream() (Stream, error) - OpenUniStream() (SendStream, error) - OpenStreamSync() (Stream, error) - OpenUniStreamSync() (SendStream, error) - AcceptStream() (Stream, error) - AcceptUniStream() (ReceiveStream, error) - DeleteStream(protocol.StreamID) error - UpdateLimits(*handshake.TransportParameters) error - HandleMaxStreamsFrame(*wire.MaxStreamsFrame) error - CloseWithError(error) -} - -type cryptoStreamHandler interface { - RunHandshake() error - ChangeConnectionID(protocol.ConnectionID) error - io.Closer - ConnectionState() tls.ConnectionState -} - -type receivedPacket struct { - remoteAddr net.Addr - rcvTime time.Time - data []byte - - buffer *packetBuffer -} - -func (p *receivedPacket) Clone() *receivedPacket { - return &receivedPacket{ - remoteAddr: p.remoteAddr, - rcvTime: p.rcvTime, - data: p.data, - buffer: p.buffer, - } -} - -type closeError struct { - err error - remote bool - sendClose bool -} - -var errCloseForRecreating = errors.New("closing session in order to recreate it") - -// A Session is a QUIC session -type session struct { - sessionRunner sessionRunner - - destConnID protocol.ConnectionID - origDestConnID protocol.ConnectionID // if the server sends a Retry, this is the connection ID we used initially - srcConnID protocol.ConnectionID - - perspective protocol.Perspective - initialVersion protocol.VersionNumber // if version negotiation is performed, this is the version we initially tried - version protocol.VersionNumber - config *Config - - conn connection - - streamsMap streamManager - - rttStats *congestion.RTTStats - - cryptoStreamManager *cryptoStreamManager - sentPacketHandler ackhandler.SentPacketHandler - receivedPacketHandler ackhandler.ReceivedPacketHandler - framer framer - windowUpdateQueue *windowUpdateQueue - connFlowController flowcontrol.ConnectionFlowController - - unpacker unpacker - frameParser wire.FrameParser - packer packer - - cryptoStreamHandler cryptoStreamHandler - - receivedPackets chan *receivedPacket - sendingScheduled chan struct{} - - closeOnce sync.Once - closed utils.AtomicBool - // closeChan is used to notify the run loop that it should terminate - closeChan chan closeError - connectionClosePacket *packedPacket - packetsReceivedAfterClose int - - ctx context.Context - ctxCancel context.CancelFunc - - undecryptablePackets []*receivedPacket - - clientHelloWritten <-chan struct{} - handshakeCompleteChan chan struct{} // is closed when the handshake completes - handshakeComplete bool - - receivedRetry bool - receivedFirstPacket bool - receivedFirstForwardSecurePacket bool - - sessionCreationTime time.Time - // The idle timeout is set based on the max of the time we received the last packet... - lastPacketReceivedTime time.Time - // ... and the time we sent a new retransmittable packet after receiving a packet. - firstRetransmittablePacketAfterIdleSentTime time.Time - // pacingDeadline is the time when the next packet should be sent - pacingDeadline time.Time - - peerParams *handshake.TransportParameters - - timer *utils.Timer - // keepAlivePingSent stores whether a Ping frame was sent to the peer or not - // it is reset as soon as we receive a packet from the peer - keepAlivePingSent bool - - logger utils.Logger -} - -var _ Session = &session{} -var _ streamSender = &session{} - -var newSession = func( - conn connection, - runner sessionRunner, - clientDestConnID protocol.ConnectionID, - destConnID protocol.ConnectionID, - srcConnID protocol.ConnectionID, - conf *Config, - tlsConf *tls.Config, - params *handshake.TransportParameters, - logger utils.Logger, - v protocol.VersionNumber, -) (quicSession, error) { - s := &session{ - conn: conn, - sessionRunner: runner, - config: conf, - srcConnID: srcConnID, - destConnID: destConnID, - perspective: protocol.PerspectiveServer, - handshakeCompleteChan: make(chan struct{}), - logger: logger, - version: v, - } - s.preSetup() - s.sentPacketHandler = ackhandler.NewSentPacketHandler(0, s.rttStats, s.logger) - s.streamsMap = newStreamsMap( - s, - s.newFlowController, - uint64(s.config.MaxIncomingStreams), - uint64(s.config.MaxIncomingUniStreams), - s.perspective, - s.version, - ) - s.framer = newFramer(s.streamsMap, s.version) - initialStream := newCryptoStream() - handshakeStream := newCryptoStream() - oneRTTStream := newPostHandshakeCryptoStream(s.framer) - cs, err := handshake.NewCryptoSetupServer( - initialStream, - handshakeStream, - oneRTTStream, - clientDestConnID, - conn.RemoteAddr(), - params, - s.processTransportParameters, - tlsConf, - logger, - ) - if err != nil { - return nil, err - } - s.cryptoStreamHandler = cs - s.packer = newPacketPacker( - s.destConnID, - s.srcConnID, - initialStream, - handshakeStream, - s.sentPacketHandler, - s.RemoteAddr(), - cs, - s.framer, - s.receivedPacketHandler, - s.perspective, - s.version, - ) - s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, oneRTTStream) - - if err := s.postSetup(); err != nil { - return nil, err - } - s.unpacker = newPacketUnpacker(cs, s.version) - return s, nil -} - -// declare this as a variable, such that we can it mock it in the tests -var newClientSession = func( - conn connection, - runner sessionRunner, - destConnID protocol.ConnectionID, - srcConnID protocol.ConnectionID, - conf *Config, - tlsConf *tls.Config, - initialPacketNumber protocol.PacketNumber, - params *handshake.TransportParameters, - initialVersion protocol.VersionNumber, - logger utils.Logger, - v protocol.VersionNumber, -) (quicSession, error) { - s := &session{ - conn: conn, - sessionRunner: runner, - config: conf, - srcConnID: srcConnID, - destConnID: destConnID, - perspective: protocol.PerspectiveClient, - handshakeCompleteChan: make(chan struct{}), - logger: logger, - initialVersion: initialVersion, - version: v, - } - s.preSetup() - s.sentPacketHandler = ackhandler.NewSentPacketHandler(initialPacketNumber, s.rttStats, s.logger) - initialStream := newCryptoStream() - handshakeStream := newCryptoStream() - oneRTTStream := newPostHandshakeCryptoStream(s.framer) - cs, clientHelloWritten, err := handshake.NewCryptoSetupClient( - initialStream, - handshakeStream, - oneRTTStream, - s.destConnID, - conn.RemoteAddr(), - params, - s.processTransportParameters, - tlsConf, - logger, - ) - if err != nil { - return nil, err - } - s.clientHelloWritten = clientHelloWritten - s.cryptoStreamHandler = cs - s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, oneRTTStream) - s.unpacker = newPacketUnpacker(cs, s.version) - s.streamsMap = newStreamsMap( - s, - s.newFlowController, - uint64(s.config.MaxIncomingStreams), - uint64(s.config.MaxIncomingUniStreams), - s.perspective, - s.version, - ) - s.framer = newFramer(s.streamsMap, s.version) - s.packer = newPacketPacker( - s.destConnID, - s.srcConnID, - initialStream, - handshakeStream, - s.sentPacketHandler, - s.RemoteAddr(), - cs, - s.framer, - s.receivedPacketHandler, - s.perspective, - s.version, - ) - return s, s.postSetup() -} - -func (s *session) preSetup() { - s.frameParser = wire.NewFrameParser(s.version) - s.rttStats = &congestion.RTTStats{} - s.receivedPacketHandler = ackhandler.NewReceivedPacketHandler(s.rttStats, s.logger, s.version) - s.connFlowController = flowcontrol.NewConnectionFlowController( - protocol.InitialMaxData, - protocol.ByteCount(s.config.MaxReceiveConnectionFlowControlWindow), - s.onHasConnectionWindowUpdate, - s.rttStats, - s.logger, - ) -} - -func (s *session) postSetup() error { - s.receivedPackets = make(chan *receivedPacket, protocol.MaxSessionUnprocessedPackets) - s.closeChan = make(chan closeError, 1) - s.sendingScheduled = make(chan struct{}, 1) - s.undecryptablePackets = make([]*receivedPacket, 0, protocol.MaxUndecryptablePackets) - s.ctx, s.ctxCancel = context.WithCancel(context.Background()) - - s.timer = utils.NewTimer() - now := time.Now() - s.lastPacketReceivedTime = now - s.sessionCreationTime = now - - s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame) - return nil -} - -// run the session main loop -func (s *session) run() error { - defer s.ctxCancel() - - go func() { - if err := s.cryptoStreamHandler.RunHandshake(); err != nil { - s.closeLocal(err) - return - } - close(s.handshakeCompleteChan) - }() - if s.perspective == protocol.PerspectiveClient { - select { - case <-s.clientHelloWritten: - s.scheduleSending() - case closeErr := <-s.closeChan: - // put the close error back into the channel, so that the run loop can receive it - s.closeChan <- closeErr - } - } - - var closeErr closeError - -runLoop: - for { - // Close immediately if requested - select { - case closeErr = <-s.closeChan: - break runLoop - case <-s.handshakeCompleteChan: - s.handleHandshakeComplete() - default: - } - - s.maybeResetTimer() - - select { - case closeErr = <-s.closeChan: - break runLoop - case <-s.timer.Chan(): - s.timer.SetRead() - // We do all the interesting stuff after the switch statement, so - // nothing to see here. - case <-s.sendingScheduled: - // We do all the interesting stuff after the switch statement, so - // nothing to see here. - case p := <-s.receivedPackets: - // Only reset the timers if this packet was actually processed. - // This avoids modifying any state when handling undecryptable packets, - // which could be injected by an attacker. - if wasProcessed := s.handlePacketImpl(p); !wasProcessed { - continue - } - case <-s.handshakeCompleteChan: - s.handleHandshakeComplete() - } - - now := time.Now() - if timeout := s.sentPacketHandler.GetAlarmTimeout(); !timeout.IsZero() && timeout.Before(now) { - // This could cause packets to be retransmitted. - // Check it before trying to send packets. - if err := s.sentPacketHandler.OnAlarm(); err != nil { - s.closeLocal(err) - } - } - - var pacingDeadline time.Time - if s.pacingDeadline.IsZero() { // the timer didn't have a pacing deadline set - pacingDeadline = s.sentPacketHandler.TimeUntilSend() - } - if s.config.KeepAlive && !s.keepAlivePingSent && s.handshakeComplete && s.firstRetransmittablePacketAfterIdleSentTime.IsZero() && time.Since(s.lastPacketReceivedTime) >= s.peerParams.IdleTimeout/2 { - // send a PING frame since there is no activity in the session - s.logger.Debugf("Sending a keep-alive ping to keep the connection alive.") - s.framer.QueueControlFrame(&wire.PingFrame{}) - s.keepAlivePingSent = true - } else if !pacingDeadline.IsZero() && now.Before(pacingDeadline) { - // If we get to this point before the pacing deadline, we should wait until that deadline. - // This can happen when scheduleSending is called, or a packet is received. - // Set the timer and restart the run loop. - s.pacingDeadline = pacingDeadline - continue - } - - if !s.handshakeComplete && now.Sub(s.sessionCreationTime) >= s.config.HandshakeTimeout { - s.destroy(qerr.TimeoutError("Handshake did not complete in time")) - continue - } - if s.handshakeComplete && now.Sub(s.idleTimeoutStartTime()) >= s.config.IdleTimeout { - s.destroy(qerr.TimeoutError("No recent network activity")) - continue - } - - if err := s.sendPackets(); err != nil { - s.closeLocal(err) - } - } - - s.handleCloseError(closeErr) - s.closed.Set(true) - s.logger.Infof("Connection %s closed.", s.srcConnID) - s.cryptoStreamHandler.Close() - return closeErr.err -} - -func (s *session) Context() context.Context { - return s.ctx -} - -func (s *session) ConnectionState() tls.ConnectionState { - return s.cryptoStreamHandler.ConnectionState() -} - -func (s *session) maybeResetTimer() { - var deadline time.Time - if s.config.KeepAlive && s.handshakeComplete && !s.keepAlivePingSent { - deadline = s.idleTimeoutStartTime().Add(s.peerParams.IdleTimeout / 2) - } else { - deadline = s.idleTimeoutStartTime().Add(s.config.IdleTimeout) - } - - if ackAlarm := s.receivedPacketHandler.GetAlarmTimeout(); !ackAlarm.IsZero() { - deadline = utils.MinTime(deadline, ackAlarm) - } - if lossTime := s.sentPacketHandler.GetAlarmTimeout(); !lossTime.IsZero() { - deadline = utils.MinTime(deadline, lossTime) - } - if !s.handshakeComplete { - handshakeDeadline := s.sessionCreationTime.Add(s.config.HandshakeTimeout) - deadline = utils.MinTime(deadline, handshakeDeadline) - } - if !s.pacingDeadline.IsZero() { - deadline = utils.MinTime(deadline, s.pacingDeadline) - } - - s.timer.Reset(deadline) -} - -func (s *session) idleTimeoutStartTime() time.Time { - return utils.MaxTime(s.lastPacketReceivedTime, s.firstRetransmittablePacketAfterIdleSentTime) -} - -func (s *session) handleHandshakeComplete() { - s.handshakeComplete = true - s.handshakeCompleteChan = nil // prevent this case from ever being selected again - s.sessionRunner.OnHandshakeComplete(s) - - // The client completes the handshake first (after sending the CFIN). - // We need to make sure they learn about the peer completing the handshake, - // in order to stop retransmitting handshake packets. - // They will stop retransmitting handshake packets when receiving the first forward-secure packet. - // We need to make sure that a retransmittable forward-secure packet is sent, - // independent from the application protocol. - if s.perspective == protocol.PerspectiveServer { - s.queueControlFrame(&wire.PingFrame{}) - s.sentPacketHandler.SetHandshakeComplete() - } -} - -func (s *session) handlePacketImpl(rp *receivedPacket) bool { - var counter uint8 - var lastConnID protocol.ConnectionID - var processed bool - data := rp.data - p := rp - for len(data) > 0 { - if counter > 0 { - p = p.Clone() - p.data = data - } - - hdr, packetData, rest, err := wire.ParsePacket(p.data, s.srcConnID.Len()) - if err != nil { - s.logger.Debugf("error parsing packet: %s", err) - break - } - - if counter > 0 && !hdr.DestConnectionID.Equal(lastConnID) { - s.logger.Debugf("coalesced packet has different destination connection ID: %s, expected %s", hdr.DestConnectionID, lastConnID) - break - } - lastConnID = hdr.DestConnectionID - - if counter > 0 { - p.buffer.Split() - } - counter++ - - // only log if this actually a coalesced packet - if s.logger.Debug() && (counter > 1 || len(rest) > 0) { - s.logger.Debugf("Parsed a coalesced packet. Part %d: %d bytes. Remaining: %d bytes.", counter, len(packetData), len(rest)) - } - p.data = packetData - if wasProcessed := s.handleSinglePacket(p, hdr); wasProcessed { - processed = true - } - data = rest - } - p.buffer.MaybeRelease() - return processed -} - -func (s *session) handleSinglePacket(p *receivedPacket, hdr *wire.Header) bool /* was the packet successfully processed */ { - var wasQueued bool - - defer func() { - // Put back the packet buffer if the packet wasn't queued for later decryption. - if !wasQueued { - p.buffer.Decrement() - } - }() - - if hdr.Type == protocol.PacketTypeRetry { - return s.handleRetryPacket(p, hdr) - } - - // The server can change the source connection ID with the first Handshake packet. - // After this, all packets with a different source connection have to be ignored. - if s.receivedFirstPacket && hdr.IsLongHeader && !hdr.SrcConnectionID.Equal(s.destConnID) { - s.logger.Debugf("Dropping packet with unexpected source connection ID: %s (expected %s)", hdr.SrcConnectionID, s.destConnID) - return false - } - // drop 0-RTT packets - if hdr.Type == protocol.PacketType0RTT { - return false - } - - packet, err := s.unpacker.Unpack(hdr, p.data) - if err != nil { - if err == handshake.ErrOpenerNotYetAvailable { - // Sealer for this encryption level not yet available. - // Try again later. - wasQueued = true - s.tryQueueingUndecryptablePacket(p) - return false - } - // This might be a packet injected by an attacker. - // Drop it. - s.logger.Debugf("Dropping packet that could not be unpacked. Unpack error: %s", err) - return false - } - - if s.logger.Debug() { - s.logger.Debugf("<- Reading packet %#x (%d bytes) for connection %s, %s", packet.packetNumber, len(p.data), hdr.DestConnectionID, packet.encryptionLevel) - packet.hdr.Log(s.logger) - } - - if err := s.handleUnpackedPacket(packet, p.rcvTime); err != nil { - s.closeLocal(err) - return false - } - return true -} - -func (s *session) handleRetryPacket(p *receivedPacket, hdr *wire.Header) bool /* was this a valid Retry */ { - if s.perspective == protocol.PerspectiveServer { - s.logger.Debugf("Ignoring Retry.") - return false - } - if s.receivedFirstPacket { - s.logger.Debugf("Ignoring Retry, since we already received a packet.") - return false - } - (&wire.ExtendedHeader{Header: *hdr}).Log(s.logger) - if !hdr.OrigDestConnectionID.Equal(s.destConnID) { - s.logger.Debugf("Ignoring spoofed Retry. Original Destination Connection ID: %s, expected: %s", hdr.OrigDestConnectionID, s.destConnID) - return false - } - if hdr.SrcConnectionID.Equal(s.destConnID) { - s.logger.Debugf("Ignoring Retry, since the server didn't change the Source Connection ID.") - return false - } - // If a token is already set, this means that we already received a Retry from the server. - // Ignore this Retry packet. - if s.receivedRetry { - s.logger.Debugf("Ignoring Retry, since a Retry was already received.") - return false - } - s.logger.Debugf("<- Received Retry") - s.logger.Debugf("Switching destination connection ID to: %s", hdr.SrcConnectionID) - s.origDestConnID = s.destConnID - s.destConnID = hdr.SrcConnectionID - s.receivedRetry = true - if err := s.sentPacketHandler.ResetForRetry(); err != nil { - s.closeLocal(err) - return false - } - s.cryptoStreamHandler.ChangeConnectionID(s.destConnID) - s.packer.SetToken(hdr.Token) - s.packer.ChangeDestConnectionID(s.destConnID) - s.scheduleSending() - return true -} - -func (s *session) handleUnpackedPacket(packet *unpackedPacket, rcvTime time.Time) error { - if len(packet.data) == 0 { - return qerr.Error(qerr.ProtocolViolation, "empty packet") - } - - // The server can change the source connection ID with the first Handshake packet. - if s.perspective == protocol.PerspectiveClient && !s.receivedFirstPacket && packet.hdr.IsLongHeader && !packet.hdr.SrcConnectionID.Equal(s.destConnID) { - s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", packet.hdr.SrcConnectionID) - s.destConnID = packet.hdr.SrcConnectionID - s.packer.ChangeDestConnectionID(s.destConnID) - } - - s.receivedFirstPacket = true - s.lastPacketReceivedTime = rcvTime - s.firstRetransmittablePacketAfterIdleSentTime = time.Time{} - s.keepAlivePingSent = false - - // The client completes the handshake first (after sending the CFIN). - // We know that the server completed the handshake as soon as we receive a forward-secure packet. - if s.perspective == protocol.PerspectiveClient { - if !s.receivedFirstForwardSecurePacket && packet.encryptionLevel == protocol.Encryption1RTT { - s.receivedFirstForwardSecurePacket = true - s.sentPacketHandler.SetHandshakeComplete() - } - } - - r := bytes.NewReader(packet.data) - var isRetransmittable bool - for { - frame, err := s.frameParser.ParseNext(r, packet.encryptionLevel) - if err != nil { - return err - } - if frame == nil { - break - } - if ackhandler.IsFrameRetransmittable(frame) { - isRetransmittable = true - } - if err := s.handleFrame(frame, packet.packetNumber, packet.encryptionLevel); err != nil { - return err - } - } - - if err := s.receivedPacketHandler.ReceivedPacket(packet.packetNumber, packet.encryptionLevel, rcvTime, isRetransmittable); err != nil { - return err - } - return nil -} - -func (s *session) handleFrame(f wire.Frame, pn protocol.PacketNumber, encLevel protocol.EncryptionLevel) error { - var err error - wire.LogFrame(s.logger, f, false) - switch frame := f.(type) { - case *wire.CryptoFrame: - err = s.handleCryptoFrame(frame, encLevel) - case *wire.StreamFrame: - err = s.handleStreamFrame(frame, encLevel) - case *wire.AckFrame: - err = s.handleAckFrame(frame, pn, encLevel) - case *wire.ConnectionCloseFrame: - s.closeRemote(qerr.Error(frame.ErrorCode, frame.ReasonPhrase)) - case *wire.ResetStreamFrame: - err = s.handleResetStreamFrame(frame) - case *wire.MaxDataFrame: - s.handleMaxDataFrame(frame) - case *wire.MaxStreamDataFrame: - err = s.handleMaxStreamDataFrame(frame) - case *wire.MaxStreamsFrame: - err = s.handleMaxStreamsFrame(frame) - case *wire.DataBlockedFrame: - case *wire.StreamDataBlockedFrame: - case *wire.StreamsBlockedFrame: - case *wire.StopSendingFrame: - err = s.handleStopSendingFrame(frame) - case *wire.PingFrame: - case *wire.PathChallengeFrame: - s.handlePathChallengeFrame(frame) - case *wire.PathResponseFrame: - // since we don't send PATH_CHALLENGEs, we don't expect PATH_RESPONSEs - err = errors.New("unexpected PATH_RESPONSE frame") - case *wire.NewTokenFrame: - case *wire.NewConnectionIDFrame: - case *wire.RetireConnectionIDFrame: - // since we don't send new connection IDs, we don't expect retirements - err = errors.New("unexpected RETIRE_CONNECTION_ID frame") - default: - err = fmt.Errorf("unexpected frame type: %s", reflect.ValueOf(&frame).Elem().Type().Name()) - } - return err -} - -// handlePacket is called by the server with a new packet -func (s *session) handlePacket(p *receivedPacket) { - if s.closed.Get() { - s.handlePacketAfterClosed(p) - } - // Discard packets once the amount of queued packets is larger than - // the channel size, protocol.MaxSessionUnprocessedPackets - select { - case s.receivedPackets <- p: - default: - } -} - -func (s *session) handlePacketAfterClosed(p *receivedPacket) { - s.packetsReceivedAfterClose++ - if s.connectionClosePacket == nil { - return - } - // exponential backoff - // only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving - for n := s.packetsReceivedAfterClose; n > 1; n = n / 2 { - if n%2 != 0 { - return - } - } - s.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", s.packetsReceivedAfterClose) - if err := s.conn.Write(s.connectionClosePacket.raw); err != nil { - s.logger.Debugf("Error retransmitting CONNECTION_CLOSE: %s", err) - } -} - -func (s *session) handleCryptoFrame(frame *wire.CryptoFrame, encLevel protocol.EncryptionLevel) error { - encLevelChanged, err := s.cryptoStreamManager.HandleCryptoFrame(frame, encLevel) - if err != nil { - return err - } - s.logger.Debugf("Handled crypto frame at level %s. encLevelChanged: %t", encLevel, encLevelChanged) - if encLevelChanged { - s.tryDecryptingQueuedPackets() - } - return nil -} - -func (s *session) handleStreamFrame(frame *wire.StreamFrame, encLevel protocol.EncryptionLevel) error { - // TODO(#1261): implement strict rules for frames types in unencrypted packets - if encLevel < protocol.Encryption1RTT { - return qerr.Error(qerr.ProtocolViolation, fmt.Sprintf("received unencrypted stream data on stream %d", frame.StreamID)) - } - str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) - if err != nil { - return err - } - if str == nil { - // Stream is closed and already garbage collected - // ignore this StreamFrame - return nil - } - return str.handleStreamFrame(frame) -} - -func (s *session) handleMaxDataFrame(frame *wire.MaxDataFrame) { - s.connFlowController.UpdateSendWindow(frame.ByteOffset) -} - -func (s *session) handleMaxStreamDataFrame(frame *wire.MaxStreamDataFrame) error { - str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) - if err != nil { - return err - } - if str == nil { - // stream is closed and already garbage collected - return nil - } - str.handleMaxStreamDataFrame(frame) - return nil -} - -func (s *session) handleMaxStreamsFrame(frame *wire.MaxStreamsFrame) error { - return s.streamsMap.HandleMaxStreamsFrame(frame) -} - -func (s *session) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { - str, err := s.streamsMap.GetOrOpenReceiveStream(frame.StreamID) - if err != nil { - return err - } - if str == nil { - // stream is closed and already garbage collected - return nil - } - return str.handleResetStreamFrame(frame) -} - -func (s *session) handleStopSendingFrame(frame *wire.StopSendingFrame) error { - str, err := s.streamsMap.GetOrOpenSendStream(frame.StreamID) - if err != nil { - return err - } - if str == nil { - // stream is closed and already garbage collected - return nil - } - str.handleStopSendingFrame(frame) - return nil -} - -func (s *session) handlePathChallengeFrame(frame *wire.PathChallengeFrame) { - s.queueControlFrame(&wire.PathResponseFrame{Data: frame.Data}) -} - -func (s *session) handleAckFrame(frame *wire.AckFrame, pn protocol.PacketNumber, encLevel protocol.EncryptionLevel) error { - if err := s.sentPacketHandler.ReceivedAck(frame, pn, encLevel, s.lastPacketReceivedTime); err != nil { - return err - } - if encLevel == protocol.Encryption1RTT { - s.receivedPacketHandler.IgnoreBelow(s.sentPacketHandler.GetLowestPacketNotConfirmedAcked()) - } - return nil -} - -// closeLocal closes the session and send a CONNECTION_CLOSE containing the error -func (s *session) closeLocal(e error) { - s.closeOnce.Do(func() { - if e == nil { - s.logger.Infof("Closing session.") - } else { - s.logger.Errorf("Closing session with error: %s", e) - } - s.sessionRunner.Retire(s.srcConnID) - s.closeChan <- closeError{err: e, sendClose: true, remote: false} - }) -} - -// destroy closes the session without sending the error on the wire -func (s *session) destroy(e error) { - s.closeOnce.Do(func() { - if nerr, ok := e.(net.Error); ok && nerr.Timeout() { - s.logger.Errorf("Destroying session %s: %s", s.destConnID, e) - } else { - s.logger.Errorf("Destroying session %s with error: %s", s.destConnID, e) - } - s.sessionRunner.Remove(s.srcConnID) - s.closeChan <- closeError{err: e, sendClose: false, remote: false} - }) -} - -// closeForRecreating closes the session in order to recreate it immediately afterwards -// It returns the first packet number that should be used in the new session. -func (s *session) closeForRecreating() protocol.PacketNumber { - s.destroy(errCloseForRecreating) - nextPN, _ := s.sentPacketHandler.PeekPacketNumber(protocol.EncryptionInitial) - return nextPN -} - -func (s *session) closeRemote(e error) { - s.closeOnce.Do(func() { - s.logger.Errorf("Peer closed session with error: %s", e) - s.sessionRunner.Remove(s.srcConnID) - s.closeChan <- closeError{err: e, remote: true} - }) -} - -// Close the connection. It sends a qerr.NoError. -// It waits until the run loop has stopped before returning -func (s *session) Close() error { - s.closeLocal(nil) - <-s.ctx.Done() - return nil -} - -func (s *session) CloseWithError(code protocol.ApplicationErrorCode, e error) error { - s.closeLocal(qerr.Error(qerr.ErrorCode(code), e.Error())) - <-s.ctx.Done() - return nil -} - -func (s *session) handleCloseError(closeErr closeError) { - if closeErr.err == nil { - closeErr.err = qerr.NoError - } - - var quicErr *qerr.QuicError - var ok bool - if quicErr, ok = closeErr.err.(*qerr.QuicError); !ok { - quicErr = qerr.ToQuicError(closeErr.err) - } - - s.streamsMap.CloseWithError(quicErr) - - if !closeErr.sendClose { - return - } - // If this is a remote close we're done here - if closeErr.remote { - return - } - if err := s.sendConnectionClose(quicErr); err != nil { - s.logger.Debugf("Error sending CONNECTION_CLOSE: %s", err) - } -} - -func (s *session) processTransportParameters(data []byte) { - var params *handshake.TransportParameters - var err error - switch s.perspective { - case protocol.PerspectiveClient: - params, err = s.processTransportParametersForClient(data) - case protocol.PerspectiveServer: - params, err = s.processTransportParametersForServer(data) - } - if err != nil { - s.closeLocal(err) - return - } - s.logger.Debugf("Received Transport Parameters: %s", params) - s.peerParams = params - if err := s.streamsMap.UpdateLimits(params); err != nil { - s.closeLocal(err) - return - } - s.packer.HandleTransportParameters(params) - s.frameParser.SetAckDelayExponent(params.AckDelayExponent) - s.connFlowController.UpdateSendWindow(params.InitialMaxData) - if params.StatelessResetToken != nil { - s.sessionRunner.AddResetToken(*params.StatelessResetToken, s) - } -} - -func (s *session) processTransportParametersForClient(data []byte) (*handshake.TransportParameters, error) { - params := &handshake.TransportParameters{} - if err := params.Unmarshal(data, s.perspective.Opposite()); err != nil { - return nil, err - } - - // check the Retry token - if !params.OriginalConnectionID.Equal(s.origDestConnID) { - return nil, fmt.Errorf("expected original_connection_id to equal %s, is %s", s.origDestConnID, params.OriginalConnectionID) - } - - return params, nil -} - -func (s *session) processTransportParametersForServer(data []byte) (*handshake.TransportParameters, error) { - params := &handshake.TransportParameters{} - if err := params.Unmarshal(data, s.perspective.Opposite()); err != nil { - return nil, err - } - return params, nil -} - -func (s *session) sendPackets() error { - s.pacingDeadline = time.Time{} - - sendMode := s.sentPacketHandler.SendMode() - if sendMode == ackhandler.SendNone { // shortcut: return immediately if there's nothing to send - return nil - } - - numPackets := s.sentPacketHandler.ShouldSendNumPackets() - var numPacketsSent int -sendLoop: - for { - switch sendMode { - case ackhandler.SendNone: - break sendLoop - case ackhandler.SendAck: - // If we already sent packets, and the send mode switches to SendAck, - // we've just become congestion limited. - // There's no need to try to send an ACK at this moment. - if numPacketsSent > 0 { - return nil - } - // We can at most send a single ACK only packet. - // There will only be a new ACK after receiving new packets. - // SendAck is only returned when we're congestion limited, so we don't need to set the pacingt timer. - return s.maybeSendAckOnlyPacket() - case ackhandler.SendPTO: - if err := s.sendProbePacket(); err != nil { - return err - } - numPacketsSent++ - case ackhandler.SendRetransmission: - sentPacket, err := s.maybeSendRetransmission() - if err != nil { - return err - } - if sentPacket { - numPacketsSent++ - // This can happen if a retransmission queued, but it wasn't necessary to send it. - // e.g. when an Initial is queued, but we already received a packet from the server. - } - case ackhandler.SendAny: - sentPacket, err := s.sendPacket() - if err != nil { - return err - } - if !sentPacket { - break sendLoop - } - numPacketsSent++ - default: - return fmt.Errorf("BUG: invalid send mode %d", sendMode) - } - if numPacketsSent >= numPackets { - break - } - sendMode = s.sentPacketHandler.SendMode() - } - // Only start the pacing timer if we sent as many packets as we were allowed. - // There will probably be more to send when calling sendPacket again. - if numPacketsSent == numPackets { - s.pacingDeadline = s.sentPacketHandler.TimeUntilSend() - } - return nil -} - -func (s *session) maybeSendAckOnlyPacket() error { - packet, err := s.packer.MaybePackAckPacket() - if err != nil { - return err - } - if packet == nil { - return nil - } - s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket()) - return s.sendPackedPacket(packet) -} - -// maybeSendRetransmission sends retransmissions for at most one packet. -// It takes care that Initials aren't retransmitted, if a packet from the server was already received. -func (s *session) maybeSendRetransmission() (bool, error) { - retransmitPacket := s.sentPacketHandler.DequeuePacketForRetransmission() - if retransmitPacket == nil { - return false, nil - } - - s.logger.Debugf("Dequeueing retransmission for packet 0x%x (%s)", retransmitPacket.PacketNumber, retransmitPacket.EncryptionLevel) - packets, err := s.packer.PackRetransmission(retransmitPacket) - if err != nil { - return false, err - } - ackhandlerPackets := make([]*ackhandler.Packet, len(packets)) - for i, packet := range packets { - ackhandlerPackets[i] = packet.ToAckHandlerPacket() - } - s.sentPacketHandler.SentPacketsAsRetransmission(ackhandlerPackets, retransmitPacket.PacketNumber) - for _, packet := range packets { - if err := s.sendPackedPacket(packet); err != nil { - return false, err - } - } - return true, nil -} - -func (s *session) sendProbePacket() error { - p, err := s.sentPacketHandler.DequeueProbePacket() - if err != nil { - return err - } - s.logger.Debugf("Sending a retransmission for %#x as a probe packet.", p.PacketNumber) - - packets, err := s.packer.PackRetransmission(p) - if err != nil { - return err - } - ackhandlerPackets := make([]*ackhandler.Packet, len(packets)) - for i, packet := range packets { - ackhandlerPackets[i] = packet.ToAckHandlerPacket() - } - s.sentPacketHandler.SentPacketsAsRetransmission(ackhandlerPackets, p.PacketNumber) - for _, packet := range packets { - if err := s.sendPackedPacket(packet); err != nil { - return err - } - } - return nil -} - -func (s *session) sendPacket() (bool, error) { - if isBlocked, offset := s.connFlowController.IsNewlyBlocked(); isBlocked { - s.framer.QueueControlFrame(&wire.DataBlockedFrame{DataLimit: offset}) - } - s.windowUpdateQueue.QueueAll() - - packet, err := s.packer.PackPacket() - if err != nil || packet == nil { - return false, err - } - s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket()) - if err := s.sendPackedPacket(packet); err != nil { - return false, err - } - return true, nil -} - -func (s *session) sendPackedPacket(packet *packedPacket) error { - defer packet.buffer.Release() - if s.firstRetransmittablePacketAfterIdleSentTime.IsZero() && packet.IsRetransmittable() { - s.firstRetransmittablePacketAfterIdleSentTime = time.Now() - } - s.logPacket(packet) - return s.conn.Write(packet.raw) -} - -func (s *session) sendConnectionClose(quicErr *qerr.QuicError) error { - var reason string - // don't send details of crypto errors - if !quicErr.IsCryptoError() { - reason = quicErr.ErrorMessage - } - packet, err := s.packer.PackConnectionClose(&wire.ConnectionCloseFrame{ - ErrorCode: quicErr.ErrorCode, - ReasonPhrase: reason, - }) - if err != nil { - return err - } - s.connectionClosePacket = packet - s.logPacket(packet) - return s.conn.Write(packet.raw) -} - -func (s *session) logPacket(packet *packedPacket) { - if !s.logger.Debug() { - // We don't need to allocate the slices for calling the format functions - return - } - s.logger.Debugf("-> Sending packet 0x%x (%d bytes) for connection %s, %s", packet.header.PacketNumber, len(packet.raw), s.srcConnID, packet.EncryptionLevel()) - packet.header.Log(s.logger) - for _, frame := range packet.frames { - wire.LogFrame(s.logger, frame, true) - } -} - -// GetOrOpenStream either returns an existing stream, a newly opened stream, or nil if a stream with the provided ID is already closed. -// It is *only* needed for gQUIC's H2. -// It will be removed as soon as gQUIC moves towards the IETF H2/QUIC stream mapping. -func (s *session) GetOrOpenStream(id protocol.StreamID) (Stream, error) { - str, err := s.streamsMap.GetOrOpenSendStream(id) - if str != nil { - if bstr, ok := str.(Stream); ok { - return bstr, err - } - return nil, fmt.Errorf("Stream %d is not a bidirectional stream", id) - } - // make sure to return an actual nil value here, not an Stream with value nil - return nil, err -} - -// AcceptStream returns the next stream openend by the peer -func (s *session) AcceptStream() (Stream, error) { - return s.streamsMap.AcceptStream() -} - -func (s *session) AcceptUniStream() (ReceiveStream, error) { - return s.streamsMap.AcceptUniStream() -} - -// OpenStream opens a stream -func (s *session) OpenStream() (Stream, error) { - return s.streamsMap.OpenStream() -} - -func (s *session) OpenStreamSync() (Stream, error) { - return s.streamsMap.OpenStreamSync() -} - -func (s *session) OpenUniStream() (SendStream, error) { - return s.streamsMap.OpenUniStream() -} - -func (s *session) OpenUniStreamSync() (SendStream, error) { - return s.streamsMap.OpenUniStreamSync() -} - -func (s *session) newFlowController(id protocol.StreamID) flowcontrol.StreamFlowController { - var initialSendWindow protocol.ByteCount - if s.peerParams != nil { - if id.Type() == protocol.StreamTypeUni { - initialSendWindow = s.peerParams.InitialMaxStreamDataUni - } else { - if id.InitiatedBy() == s.perspective { - initialSendWindow = s.peerParams.InitialMaxStreamDataBidiRemote - } else { - initialSendWindow = s.peerParams.InitialMaxStreamDataBidiLocal - } - } - } - return flowcontrol.NewStreamFlowController( - id, - s.connFlowController, - protocol.InitialMaxStreamData, - protocol.ByteCount(s.config.MaxReceiveStreamFlowControlWindow), - initialSendWindow, - s.onHasStreamWindowUpdate, - s.rttStats, - s.logger, - ) -} - -// scheduleSending signals that we have data for sending -func (s *session) scheduleSending() { - select { - case s.sendingScheduled <- struct{}{}: - default: - } -} - -func (s *session) tryQueueingUndecryptablePacket(p *receivedPacket) { - if s.handshakeComplete { - s.logger.Debugf("Received undecryptable packet from %s after the handshake (%d bytes)", p.remoteAddr.String(), len(p.data)) - return - } - if len(s.undecryptablePackets)+1 > protocol.MaxUndecryptablePackets { - s.logger.Infof("Dropping undecrytable packet (%d bytes). Undecryptable packet queue full.", len(p.data)) - return - } - s.logger.Infof("Queueing packet (%d bytes) for later decryption", len(p.data)) - s.undecryptablePackets = append(s.undecryptablePackets, p) -} - -func (s *session) tryDecryptingQueuedPackets() { - for _, p := range s.undecryptablePackets { - s.handlePacket(p) - } - s.undecryptablePackets = s.undecryptablePackets[:0] -} - -func (s *session) queueControlFrame(f wire.Frame) { - s.framer.QueueControlFrame(f) - s.scheduleSending() -} - -func (s *session) onHasStreamWindowUpdate(id protocol.StreamID) { - s.windowUpdateQueue.AddStream(id) - s.scheduleSending() -} - -func (s *session) onHasConnectionWindowUpdate() { - s.windowUpdateQueue.AddConnection() - s.scheduleSending() -} - -func (s *session) onHasStreamData(id protocol.StreamID) { - s.framer.AddActiveStream(id) - s.scheduleSending() -} - -func (s *session) onStreamCompleted(id protocol.StreamID) { - if err := s.streamsMap.DeleteStream(id); err != nil { - s.closeLocal(err) - } -} - -func (s *session) LocalAddr() net.Addr { - return s.conn.LocalAddr() -} - -func (s *session) RemoteAddr() net.Addr { - return s.conn.RemoteAddr() -} - -func (s *session) getPerspective() protocol.Perspective { - return s.perspective -} - -func (s *session) GetVersion() protocol.VersionNumber { - return s.version -} diff --git a/vendor/github.com/lucas-clemente/quic-go/stream.go b/vendor/github.com/lucas-clemente/quic-go/stream.go deleted file mode 100644 index dfd0cc6a1..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/stream.go +++ /dev/null @@ -1,163 +0,0 @@ -package quic - -import ( - "net" - "sync" - "time" - - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -const errorCodeStopping protocol.ApplicationErrorCode = 0 - -// The streamSender is notified by the stream about various events. -type streamSender interface { - queueControlFrame(wire.Frame) - onHasStreamData(protocol.StreamID) - // must be called without holding the mutex that is acquired by closeForShutdown - onStreamCompleted(protocol.StreamID) -} - -// Each of the both stream halves gets its own uniStreamSender. -// This is necessary in order to keep track when both halves have been completed. -type uniStreamSender struct { - streamSender - onStreamCompletedImpl func() -} - -func (s *uniStreamSender) queueControlFrame(f wire.Frame) { - s.streamSender.queueControlFrame(f) -} - -func (s *uniStreamSender) onHasStreamData(id protocol.StreamID) { - s.streamSender.onHasStreamData(id) -} - -func (s *uniStreamSender) onStreamCompleted(protocol.StreamID) { - s.onStreamCompletedImpl() -} - -var _ streamSender = &uniStreamSender{} - -type streamI interface { - Stream - closeForShutdown(error) - // for receiving - handleStreamFrame(*wire.StreamFrame) error - handleResetStreamFrame(*wire.ResetStreamFrame) error - getWindowUpdate() protocol.ByteCount - // for sending - hasData() bool - handleStopSendingFrame(*wire.StopSendingFrame) - popStreamFrame(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool) - handleMaxStreamDataFrame(*wire.MaxStreamDataFrame) -} - -var _ receiveStreamI = (streamI)(nil) -var _ sendStreamI = (streamI)(nil) - -// A Stream assembles the data from StreamFrames and provides a super-convenient Read-Interface -// -// Read() and Write() may be called concurrently, but multiple calls to Read() or Write() individually must be synchronized manually. -type stream struct { - receiveStream - sendStream - - completedMutex sync.Mutex - sender streamSender - receiveStreamCompleted bool - sendStreamCompleted bool - - version protocol.VersionNumber -} - -var _ Stream = &stream{} - -type deadlineError struct{} - -func (deadlineError) Error() string { return "deadline exceeded" } -func (deadlineError) Temporary() bool { return true } -func (deadlineError) Timeout() bool { return true } - -var errDeadline net.Error = &deadlineError{} - -type streamCanceledError struct { - error - errorCode protocol.ApplicationErrorCode -} - -func (streamCanceledError) Canceled() bool { return true } -func (e streamCanceledError) ErrorCode() protocol.ApplicationErrorCode { return e.errorCode } - -var _ StreamError = &streamCanceledError{} - -// newStream creates a new Stream -func newStream(streamID protocol.StreamID, - sender streamSender, - flowController flowcontrol.StreamFlowController, - version protocol.VersionNumber, -) *stream { - s := &stream{sender: sender, version: version} - senderForSendStream := &uniStreamSender{ - streamSender: sender, - onStreamCompletedImpl: func() { - s.completedMutex.Lock() - s.sendStreamCompleted = true - s.checkIfCompleted() - s.completedMutex.Unlock() - }, - } - s.sendStream = *newSendStream(streamID, senderForSendStream, flowController, version) - senderForReceiveStream := &uniStreamSender{ - streamSender: sender, - onStreamCompletedImpl: func() { - s.completedMutex.Lock() - s.receiveStreamCompleted = true - s.checkIfCompleted() - s.completedMutex.Unlock() - }, - } - s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController, version) - return s -} - -// need to define StreamID() here, since both receiveStream and readStream have a StreamID() -func (s *stream) StreamID() protocol.StreamID { - // the result is same for receiveStream and sendStream - return s.sendStream.StreamID() -} - -func (s *stream) Close() error { - if err := s.sendStream.Close(); err != nil { - return err - } - return nil -} - -func (s *stream) SetDeadline(t time.Time) error { - _ = s.SetReadDeadline(t) // SetReadDeadline never errors - _ = s.SetWriteDeadline(t) // SetWriteDeadline never errors - return nil -} - -// CloseForShutdown closes a stream abruptly. -// It makes Read and Write unblock (and return the error) immediately. -// The peer will NOT be informed about this: the stream is closed without sending a FIN or RST. -func (s *stream) closeForShutdown(err error) { - s.sendStream.closeForShutdown(err) - s.receiveStream.closeForShutdown(err) -} - -func (s *stream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { - return s.receiveStream.handleResetStreamFrame(frame) -} - -// checkIfCompleted is called from the uniStreamSender, when one of the stream halves is completed. -// It makes sure that the onStreamCompleted callback is only called if both receive and send side have completed. -func (s *stream) checkIfCompleted() { - if s.sendStreamCompleted && s.receiveStreamCompleted { - s.sender.onStreamCompleted(s.StreamID()) - } -} diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map.go b/vendor/github.com/lucas-clemente/quic-go/streams_map.go deleted file mode 100644 index 846ece004..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map.go +++ /dev/null @@ -1,192 +0,0 @@ -package quic - -import ( - "errors" - "fmt" - "net" - - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/handshake" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type streamOpenErr struct{ error } - -var _ net.Error = &streamOpenErr{} - -func (e streamOpenErr) Temporary() bool { return e.error == errTooManyOpenStreams } -func (streamOpenErr) Timeout() bool { return false } - -// errTooManyOpenStreams is used internally by the outgoing streams maps. -var errTooManyOpenStreams = errors.New("too many open streams") - -type streamsMap struct { - perspective protocol.Perspective - - sender streamSender - newFlowController func(protocol.StreamID) flowcontrol.StreamFlowController - - outgoingBidiStreams *outgoingBidiStreamsMap - outgoingUniStreams *outgoingUniStreamsMap - incomingBidiStreams *incomingBidiStreamsMap - incomingUniStreams *incomingUniStreamsMap -} - -var _ streamManager = &streamsMap{} - -func newStreamsMap( - sender streamSender, - newFlowController func(protocol.StreamID) flowcontrol.StreamFlowController, - maxIncomingStreams uint64, - maxIncomingUniStreams uint64, - perspective protocol.Perspective, - version protocol.VersionNumber, -) streamManager { - m := &streamsMap{ - perspective: perspective, - newFlowController: newFlowController, - sender: sender, - } - newBidiStream := func(id protocol.StreamID) streamI { - return newStream(id, m.sender, m.newFlowController(id), version) - } - newUniSendStream := func(id protocol.StreamID) sendStreamI { - return newSendStream(id, m.sender, m.newFlowController(id), version) - } - newUniReceiveStream := func(id protocol.StreamID) receiveStreamI { - return newReceiveStream(id, m.sender, m.newFlowController(id), version) - } - m.outgoingBidiStreams = newOutgoingBidiStreamsMap( - protocol.FirstStream(protocol.StreamTypeBidi, perspective), - newBidiStream, - sender.queueControlFrame, - ) - m.incomingBidiStreams = newIncomingBidiStreamsMap( - protocol.FirstStream(protocol.StreamTypeBidi, perspective.Opposite()), - protocol.MaxStreamID(protocol.StreamTypeBidi, maxIncomingStreams, perspective.Opposite()), - maxIncomingStreams, - sender.queueControlFrame, - newBidiStream, - ) - m.outgoingUniStreams = newOutgoingUniStreamsMap( - protocol.FirstStream(protocol.StreamTypeUni, perspective), - newUniSendStream, - sender.queueControlFrame, - ) - m.incomingUniStreams = newIncomingUniStreamsMap( - protocol.FirstStream(protocol.StreamTypeUni, perspective.Opposite()), - protocol.MaxStreamID(protocol.StreamTypeUni, maxIncomingUniStreams, perspective.Opposite()), - maxIncomingUniStreams, - sender.queueControlFrame, - newUniReceiveStream, - ) - return m -} - -func (m *streamsMap) OpenStream() (Stream, error) { - return m.outgoingBidiStreams.OpenStream() -} - -func (m *streamsMap) OpenStreamSync() (Stream, error) { - return m.outgoingBidiStreams.OpenStreamSync() -} - -func (m *streamsMap) OpenUniStream() (SendStream, error) { - return m.outgoingUniStreams.OpenStream() -} - -func (m *streamsMap) OpenUniStreamSync() (SendStream, error) { - return m.outgoingUniStreams.OpenStreamSync() -} - -func (m *streamsMap) AcceptStream() (Stream, error) { - return m.incomingBidiStreams.AcceptStream() -} - -func (m *streamsMap) AcceptUniStream() (ReceiveStream, error) { - return m.incomingUniStreams.AcceptStream() -} - -func (m *streamsMap) DeleteStream(id protocol.StreamID) error { - switch id.Type() { - case protocol.StreamTypeUni: - if id.InitiatedBy() == m.perspective { - return m.outgoingUniStreams.DeleteStream(id) - } - return m.incomingUniStreams.DeleteStream(id) - case protocol.StreamTypeBidi: - if id.InitiatedBy() == m.perspective { - return m.outgoingBidiStreams.DeleteStream(id) - } - return m.incomingBidiStreams.DeleteStream(id) - } - panic("") -} - -func (m *streamsMap) GetOrOpenReceiveStream(id protocol.StreamID) (receiveStreamI, error) { - switch id.Type() { - case protocol.StreamTypeUni: - if id.InitiatedBy() == m.perspective { - // an outgoing unidirectional stream is a send stream, not a receive stream - return nil, fmt.Errorf("peer attempted to open receive stream %d", id) - } - return m.incomingUniStreams.GetOrOpenStream(id) - case protocol.StreamTypeBidi: - if id.InitiatedBy() == m.perspective { - return m.outgoingBidiStreams.GetStream(id) - } - return m.incomingBidiStreams.GetOrOpenStream(id) - } - panic("") -} - -func (m *streamsMap) GetOrOpenSendStream(id protocol.StreamID) (sendStreamI, error) { - switch id.Type() { - case protocol.StreamTypeUni: - if id.InitiatedBy() == m.perspective { - return m.outgoingUniStreams.GetStream(id) - } - // an incoming unidirectional stream is a receive stream, not a send stream - return nil, fmt.Errorf("peer attempted to open send stream %d", id) - case protocol.StreamTypeBidi: - if id.InitiatedBy() == m.perspective { - return m.outgoingBidiStreams.GetStream(id) - } - return m.incomingBidiStreams.GetOrOpenStream(id) - } - panic("") -} - -func (m *streamsMap) HandleMaxStreamsFrame(f *wire.MaxStreamsFrame) error { - if f.MaxStreams > protocol.MaxStreamCount { - return qerr.StreamLimitError - } - id := protocol.MaxStreamID(f.Type, f.MaxStreams, m.perspective) - switch id.Type() { - case protocol.StreamTypeUni: - m.outgoingUniStreams.SetMaxStream(id) - case protocol.StreamTypeBidi: - fmt.Printf("") - m.outgoingBidiStreams.SetMaxStream(id) - } - return nil -} - -func (m *streamsMap) UpdateLimits(p *handshake.TransportParameters) error { - if p.MaxBidiStreams > protocol.MaxStreamCount || p.MaxUniStreams > protocol.MaxStreamCount { - return qerr.StreamLimitError - } - // Max{Uni,Bidi}StreamID returns the highest stream ID that the peer is allowed to open. - m.outgoingBidiStreams.SetMaxStream(protocol.MaxStreamID(protocol.StreamTypeBidi, p.MaxBidiStreams, m.perspective)) - m.outgoingUniStreams.SetMaxStream(protocol.MaxStreamID(protocol.StreamTypeUni, p.MaxUniStreams, m.perspective)) - return nil -} - -func (m *streamsMap) CloseWithError(err error) { - m.outgoingBidiStreams.CloseWithError(err) - m.outgoingUniStreams.CloseWithError(err) - m.incomingBidiStreams.CloseWithError(err) - m.incomingUniStreams.CloseWithError(err) -} diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go deleted file mode 100644 index 692f093e4..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_generic_helper.go +++ /dev/null @@ -1,17 +0,0 @@ -package quic - -import ( - "github.com/cheekybits/genny/generic" - - "github.com/lucas-clemente/quic-go/internal/protocol" -) - -// In the auto-generated streams maps, we need to be able to close the streams. -// Therefore, extend the generic.Type with the stream close method. -// This definition must be in a file that Genny doesn't process. -type item interface { - generic.Type - closeForShutdown(error) -} - -const streamTypeGeneric protocol.StreamType = protocol.StreamTypeUni diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go deleted file mode 100644 index 787c3d903..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_bidi.go +++ /dev/null @@ -1,162 +0,0 @@ -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package quic - -import ( - "fmt" - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type incomingBidiStreamsMap struct { - mutex sync.RWMutex - cond sync.Cond - - streams map[protocol.StreamID]streamI - // When a stream is deleted before it was accepted, we can't delete it immediately. - // We need to wait until the application accepts it, and delete it immediately then. - streamsToDelete map[protocol.StreamID]struct{} // used as a set - - nextStreamToAccept protocol.StreamID // the next stream that will be returned by AcceptStream() - nextStreamToOpen protocol.StreamID // the highest stream that the peer openend - maxStream protocol.StreamID // the highest stream that the peer is allowed to open - maxNumStreams uint64 // maximum number of streams - - newStream func(protocol.StreamID) streamI - queueMaxStreamID func(*wire.MaxStreamsFrame) - - closeErr error -} - -func newIncomingBidiStreamsMap( - nextStreamToAccept protocol.StreamID, - initialMaxStreamID protocol.StreamID, - maxNumStreams uint64, - queueControlFrame func(wire.Frame), - newStream func(protocol.StreamID) streamI, -) *incomingBidiStreamsMap { - m := &incomingBidiStreamsMap{ - streams: make(map[protocol.StreamID]streamI), - streamsToDelete: make(map[protocol.StreamID]struct{}), - nextStreamToAccept: nextStreamToAccept, - nextStreamToOpen: nextStreamToAccept, - maxStream: initialMaxStreamID, - maxNumStreams: maxNumStreams, - newStream: newStream, - queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) }, - } - m.cond.L = &m.mutex - return m -} - -func (m *incomingBidiStreamsMap) AcceptStream() (streamI, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - var id protocol.StreamID - var str streamI - for { - id = m.nextStreamToAccept - var ok bool - if m.closeErr != nil { - return nil, m.closeErr - } - str, ok = m.streams[id] - if ok { - break - } - m.cond.Wait() - } - m.nextStreamToAccept += 4 - // If this stream was completed before being accepted, we can delete it now. - if _, ok := m.streamsToDelete[id]; ok { - delete(m.streamsToDelete, id) - if err := m.deleteStream(id); err != nil { - return nil, err - } - } - return str, nil -} - -func (m *incomingBidiStreamsMap) GetOrOpenStream(id protocol.StreamID) (streamI, error) { - m.mutex.RLock() - if id > m.maxStream { - m.mutex.RUnlock() - return nil, fmt.Errorf("peer tried to open stream %d (current limit: %d)", id, m.maxStream) - } - // if the id is smaller than the highest we accepted - // * this stream exists in the map, and we can return it, or - // * this stream was already closed, then we can return the nil - if id < m.nextStreamToOpen { - var s streamI - // If the stream was already queued for deletion, and is just waiting to be accepted, don't return it. - if _, ok := m.streamsToDelete[id]; !ok { - s = m.streams[id] - } - m.mutex.RUnlock() - return s, nil - } - m.mutex.RUnlock() - - m.mutex.Lock() - // no need to check the two error conditions from above again - // * maxStream can only increase, so if the id was valid before, it definitely is valid now - // * highestStream is only modified by this function - for newID := m.nextStreamToOpen; newID <= id; newID += 4 { - m.streams[newID] = m.newStream(newID) - m.cond.Signal() - } - m.nextStreamToOpen = id + 4 - s := m.streams[id] - m.mutex.Unlock() - return s, nil -} - -func (m *incomingBidiStreamsMap) DeleteStream(id protocol.StreamID) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - return m.deleteStream(id) -} - -func (m *incomingBidiStreamsMap) deleteStream(id protocol.StreamID) error { - if _, ok := m.streams[id]; !ok { - return fmt.Errorf("Tried to delete unknown stream %d", id) - } - - // Don't delete this stream yet, if it was not yet accepted. - // Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted. - if id >= m.nextStreamToAccept { - if _, ok := m.streamsToDelete[id]; ok { - return fmt.Errorf("Tried to delete stream %d multiple times", id) - } - m.streamsToDelete[id] = struct{}{} - return nil - } - - delete(m.streams, id) - // queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream - if m.maxNumStreams > uint64(len(m.streams)) { - numNewStreams := m.maxNumStreams - uint64(len(m.streams)) - m.maxStream = m.nextStreamToOpen + protocol.StreamID((numNewStreams-1)*4) - m.queueMaxStreamID(&wire.MaxStreamsFrame{ - Type: protocol.StreamTypeBidi, - MaxStreams: m.maxStream.StreamNum(), - }) - } - return nil -} - -func (m *incomingBidiStreamsMap) CloseWithError(err error) { - m.mutex.Lock() - m.closeErr = err - for _, str := range m.streams { - str.closeForShutdown(err) - } - m.mutex.Unlock() - m.cond.Broadcast() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go deleted file mode 100644 index 503f02908..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_generic.go +++ /dev/null @@ -1,160 +0,0 @@ -package quic - -import ( - "fmt" - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -//go:generate genny -in $GOFILE -out streams_map_incoming_bidi.go gen "item=streamI Item=BidiStream streamTypeGeneric=protocol.StreamTypeBidi" -//go:generate genny -in $GOFILE -out streams_map_incoming_uni.go gen "item=receiveStreamI Item=UniStream streamTypeGeneric=protocol.StreamTypeUni" -type incomingItemsMap struct { - mutex sync.RWMutex - cond sync.Cond - - streams map[protocol.StreamID]item - // When a stream is deleted before it was accepted, we can't delete it immediately. - // We need to wait until the application accepts it, and delete it immediately then. - streamsToDelete map[protocol.StreamID]struct{} // used as a set - - nextStreamToAccept protocol.StreamID // the next stream that will be returned by AcceptStream() - nextStreamToOpen protocol.StreamID // the highest stream that the peer openend - maxStream protocol.StreamID // the highest stream that the peer is allowed to open - maxNumStreams uint64 // maximum number of streams - - newStream func(protocol.StreamID) item - queueMaxStreamID func(*wire.MaxStreamsFrame) - - closeErr error -} - -func newIncomingItemsMap( - nextStreamToAccept protocol.StreamID, - initialMaxStreamID protocol.StreamID, - maxNumStreams uint64, - queueControlFrame func(wire.Frame), - newStream func(protocol.StreamID) item, -) *incomingItemsMap { - m := &incomingItemsMap{ - streams: make(map[protocol.StreamID]item), - streamsToDelete: make(map[protocol.StreamID]struct{}), - nextStreamToAccept: nextStreamToAccept, - nextStreamToOpen: nextStreamToAccept, - maxStream: initialMaxStreamID, - maxNumStreams: maxNumStreams, - newStream: newStream, - queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) }, - } - m.cond.L = &m.mutex - return m -} - -func (m *incomingItemsMap) AcceptStream() (item, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - var id protocol.StreamID - var str item - for { - id = m.nextStreamToAccept - var ok bool - if m.closeErr != nil { - return nil, m.closeErr - } - str, ok = m.streams[id] - if ok { - break - } - m.cond.Wait() - } - m.nextStreamToAccept += 4 - // If this stream was completed before being accepted, we can delete it now. - if _, ok := m.streamsToDelete[id]; ok { - delete(m.streamsToDelete, id) - if err := m.deleteStream(id); err != nil { - return nil, err - } - } - return str, nil -} - -func (m *incomingItemsMap) GetOrOpenStream(id protocol.StreamID) (item, error) { - m.mutex.RLock() - if id > m.maxStream { - m.mutex.RUnlock() - return nil, fmt.Errorf("peer tried to open stream %d (current limit: %d)", id, m.maxStream) - } - // if the id is smaller than the highest we accepted - // * this stream exists in the map, and we can return it, or - // * this stream was already closed, then we can return the nil - if id < m.nextStreamToOpen { - var s item - // If the stream was already queued for deletion, and is just waiting to be accepted, don't return it. - if _, ok := m.streamsToDelete[id]; !ok { - s = m.streams[id] - } - m.mutex.RUnlock() - return s, nil - } - m.mutex.RUnlock() - - m.mutex.Lock() - // no need to check the two error conditions from above again - // * maxStream can only increase, so if the id was valid before, it definitely is valid now - // * highestStream is only modified by this function - for newID := m.nextStreamToOpen; newID <= id; newID += 4 { - m.streams[newID] = m.newStream(newID) - m.cond.Signal() - } - m.nextStreamToOpen = id + 4 - s := m.streams[id] - m.mutex.Unlock() - return s, nil -} - -func (m *incomingItemsMap) DeleteStream(id protocol.StreamID) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - return m.deleteStream(id) -} - -func (m *incomingItemsMap) deleteStream(id protocol.StreamID) error { - if _, ok := m.streams[id]; !ok { - return fmt.Errorf("Tried to delete unknown stream %d", id) - } - - // Don't delete this stream yet, if it was not yet accepted. - // Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted. - if id >= m.nextStreamToAccept { - if _, ok := m.streamsToDelete[id]; ok { - return fmt.Errorf("Tried to delete stream %d multiple times", id) - } - m.streamsToDelete[id] = struct{}{} - return nil - } - - delete(m.streams, id) - // queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream - if m.maxNumStreams > uint64(len(m.streams)) { - numNewStreams := m.maxNumStreams - uint64(len(m.streams)) - m.maxStream = m.nextStreamToOpen + protocol.StreamID((numNewStreams-1)*4) - m.queueMaxStreamID(&wire.MaxStreamsFrame{ - Type: streamTypeGeneric, - MaxStreams: m.maxStream.StreamNum(), - }) - } - return nil -} - -func (m *incomingItemsMap) CloseWithError(err error) { - m.mutex.Lock() - m.closeErr = err - for _, str := range m.streams { - str.closeForShutdown(err) - } - m.mutex.Unlock() - m.cond.Broadcast() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_uni.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_uni.go deleted file mode 100644 index f36fcee54..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_incoming_uni.go +++ /dev/null @@ -1,162 +0,0 @@ -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package quic - -import ( - "fmt" - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type incomingUniStreamsMap struct { - mutex sync.RWMutex - cond sync.Cond - - streams map[protocol.StreamID]receiveStreamI - // When a stream is deleted before it was accepted, we can't delete it immediately. - // We need to wait until the application accepts it, and delete it immediately then. - streamsToDelete map[protocol.StreamID]struct{} // used as a set - - nextStreamToAccept protocol.StreamID // the next stream that will be returned by AcceptStream() - nextStreamToOpen protocol.StreamID // the highest stream that the peer openend - maxStream protocol.StreamID // the highest stream that the peer is allowed to open - maxNumStreams uint64 // maximum number of streams - - newStream func(protocol.StreamID) receiveStreamI - queueMaxStreamID func(*wire.MaxStreamsFrame) - - closeErr error -} - -func newIncomingUniStreamsMap( - nextStreamToAccept protocol.StreamID, - initialMaxStreamID protocol.StreamID, - maxNumStreams uint64, - queueControlFrame func(wire.Frame), - newStream func(protocol.StreamID) receiveStreamI, -) *incomingUniStreamsMap { - m := &incomingUniStreamsMap{ - streams: make(map[protocol.StreamID]receiveStreamI), - streamsToDelete: make(map[protocol.StreamID]struct{}), - nextStreamToAccept: nextStreamToAccept, - nextStreamToOpen: nextStreamToAccept, - maxStream: initialMaxStreamID, - maxNumStreams: maxNumStreams, - newStream: newStream, - queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) }, - } - m.cond.L = &m.mutex - return m -} - -func (m *incomingUniStreamsMap) AcceptStream() (receiveStreamI, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - var id protocol.StreamID - var str receiveStreamI - for { - id = m.nextStreamToAccept - var ok bool - if m.closeErr != nil { - return nil, m.closeErr - } - str, ok = m.streams[id] - if ok { - break - } - m.cond.Wait() - } - m.nextStreamToAccept += 4 - // If this stream was completed before being accepted, we can delete it now. - if _, ok := m.streamsToDelete[id]; ok { - delete(m.streamsToDelete, id) - if err := m.deleteStream(id); err != nil { - return nil, err - } - } - return str, nil -} - -func (m *incomingUniStreamsMap) GetOrOpenStream(id protocol.StreamID) (receiveStreamI, error) { - m.mutex.RLock() - if id > m.maxStream { - m.mutex.RUnlock() - return nil, fmt.Errorf("peer tried to open stream %d (current limit: %d)", id, m.maxStream) - } - // if the id is smaller than the highest we accepted - // * this stream exists in the map, and we can return it, or - // * this stream was already closed, then we can return the nil - if id < m.nextStreamToOpen { - var s receiveStreamI - // If the stream was already queued for deletion, and is just waiting to be accepted, don't return it. - if _, ok := m.streamsToDelete[id]; !ok { - s = m.streams[id] - } - m.mutex.RUnlock() - return s, nil - } - m.mutex.RUnlock() - - m.mutex.Lock() - // no need to check the two error conditions from above again - // * maxStream can only increase, so if the id was valid before, it definitely is valid now - // * highestStream is only modified by this function - for newID := m.nextStreamToOpen; newID <= id; newID += 4 { - m.streams[newID] = m.newStream(newID) - m.cond.Signal() - } - m.nextStreamToOpen = id + 4 - s := m.streams[id] - m.mutex.Unlock() - return s, nil -} - -func (m *incomingUniStreamsMap) DeleteStream(id protocol.StreamID) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - return m.deleteStream(id) -} - -func (m *incomingUniStreamsMap) deleteStream(id protocol.StreamID) error { - if _, ok := m.streams[id]; !ok { - return fmt.Errorf("Tried to delete unknown stream %d", id) - } - - // Don't delete this stream yet, if it was not yet accepted. - // Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted. - if id >= m.nextStreamToAccept { - if _, ok := m.streamsToDelete[id]; ok { - return fmt.Errorf("Tried to delete stream %d multiple times", id) - } - m.streamsToDelete[id] = struct{}{} - return nil - } - - delete(m.streams, id) - // queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream - if m.maxNumStreams > uint64(len(m.streams)) { - numNewStreams := m.maxNumStreams - uint64(len(m.streams)) - m.maxStream = m.nextStreamToOpen + protocol.StreamID((numNewStreams-1)*4) - m.queueMaxStreamID(&wire.MaxStreamsFrame{ - Type: protocol.StreamTypeUni, - MaxStreams: m.maxStream.StreamNum(), - }) - } - return nil -} - -func (m *incomingUniStreamsMap) CloseWithError(err error) { - m.mutex.Lock() - m.closeErr = err - for _, str := range m.streams { - str.closeForShutdown(err) - } - m.mutex.Unlock() - m.cond.Broadcast() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go deleted file mode 100644 index a4457775c..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_bidi.go +++ /dev/null @@ -1,147 +0,0 @@ -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package quic - -import ( - "fmt" - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type outgoingBidiStreamsMap struct { - mutex sync.RWMutex - cond sync.Cond - - streams map[protocol.StreamID]streamI - - nextStream protocol.StreamID // stream ID of the stream returned by OpenStream(Sync) - maxStream protocol.StreamID // the maximum stream ID we're allowed to open - maxStreamSet bool // was maxStream set. If not, it's not possible to any stream (also works for stream 0) - blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream - - newStream func(protocol.StreamID) streamI - queueStreamIDBlocked func(*wire.StreamsBlockedFrame) - - closeErr error -} - -func newOutgoingBidiStreamsMap( - nextStream protocol.StreamID, - newStream func(protocol.StreamID) streamI, - queueControlFrame func(wire.Frame), -) *outgoingBidiStreamsMap { - m := &outgoingBidiStreamsMap{ - streams: make(map[protocol.StreamID]streamI), - nextStream: nextStream, - newStream: newStream, - queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) }, - } - m.cond.L = &m.mutex - return m -} - -func (m *outgoingBidiStreamsMap) OpenStream() (streamI, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if m.closeErr != nil { - return nil, m.closeErr - } - - str, err := m.openStreamImpl() - if err != nil { - return nil, streamOpenErr{err} - } - return str, nil -} - -func (m *outgoingBidiStreamsMap) OpenStreamSync() (streamI, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - for { - if m.closeErr != nil { - return nil, m.closeErr - } - str, err := m.openStreamImpl() - if err == nil { - return str, nil - } - if err != nil && err != errTooManyOpenStreams { - return nil, streamOpenErr{err} - } - m.cond.Wait() - } -} - -func (m *outgoingBidiStreamsMap) openStreamImpl() (streamI, error) { - if !m.maxStreamSet || m.nextStream > m.maxStream { - if !m.blockedSent { - if m.maxStreamSet { - m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{ - Type: protocol.StreamTypeBidi, - StreamLimit: m.maxStream.StreamNum(), - }) - } else { - m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{ - Type: protocol.StreamTypeBidi, - StreamLimit: 0, - }) - } - m.blockedSent = true - } - return nil, errTooManyOpenStreams - } - s := m.newStream(m.nextStream) - m.streams[m.nextStream] = s - m.nextStream += 4 - return s, nil -} - -func (m *outgoingBidiStreamsMap) GetStream(id protocol.StreamID) (streamI, error) { - m.mutex.RLock() - if id >= m.nextStream { - m.mutex.RUnlock() - return nil, qerr.Error(qerr.StreamStateError, fmt.Sprintf("peer attempted to open stream %d", id)) - } - s := m.streams[id] - m.mutex.RUnlock() - return s, nil -} - -func (m *outgoingBidiStreamsMap) DeleteStream(id protocol.StreamID) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - if _, ok := m.streams[id]; !ok { - return fmt.Errorf("Tried to delete unknown stream %d", id) - } - delete(m.streams, id) - return nil -} - -func (m *outgoingBidiStreamsMap) SetMaxStream(id protocol.StreamID) { - m.mutex.Lock() - if !m.maxStreamSet || id > m.maxStream { - m.maxStream = id - m.maxStreamSet = true - m.blockedSent = false - m.cond.Broadcast() - } - m.mutex.Unlock() -} - -func (m *outgoingBidiStreamsMap) CloseWithError(err error) { - m.mutex.Lock() - m.closeErr = err - for _, str := range m.streams { - str.closeForShutdown(err) - } - m.cond.Broadcast() - m.mutex.Unlock() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go deleted file mode 100644 index c0657b902..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_generic.go +++ /dev/null @@ -1,145 +0,0 @@ -package quic - -import ( - "fmt" - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -//go:generate genny -in $GOFILE -out streams_map_outgoing_bidi.go gen "item=streamI Item=BidiStream streamTypeGeneric=protocol.StreamTypeBidi" -//go:generate genny -in $GOFILE -out streams_map_outgoing_uni.go gen "item=sendStreamI Item=UniStream streamTypeGeneric=protocol.StreamTypeUni" -type outgoingItemsMap struct { - mutex sync.RWMutex - cond sync.Cond - - streams map[protocol.StreamID]item - - nextStream protocol.StreamID // stream ID of the stream returned by OpenStream(Sync) - maxStream protocol.StreamID // the maximum stream ID we're allowed to open - maxStreamSet bool // was maxStream set. If not, it's not possible to any stream (also works for stream 0) - blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream - - newStream func(protocol.StreamID) item - queueStreamIDBlocked func(*wire.StreamsBlockedFrame) - - closeErr error -} - -func newOutgoingItemsMap( - nextStream protocol.StreamID, - newStream func(protocol.StreamID) item, - queueControlFrame func(wire.Frame), -) *outgoingItemsMap { - m := &outgoingItemsMap{ - streams: make(map[protocol.StreamID]item), - nextStream: nextStream, - newStream: newStream, - queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) }, - } - m.cond.L = &m.mutex - return m -} - -func (m *outgoingItemsMap) OpenStream() (item, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if m.closeErr != nil { - return nil, m.closeErr - } - - str, err := m.openStreamImpl() - if err != nil { - return nil, streamOpenErr{err} - } - return str, nil -} - -func (m *outgoingItemsMap) OpenStreamSync() (item, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - for { - if m.closeErr != nil { - return nil, m.closeErr - } - str, err := m.openStreamImpl() - if err == nil { - return str, nil - } - if err != nil && err != errTooManyOpenStreams { - return nil, streamOpenErr{err} - } - m.cond.Wait() - } -} - -func (m *outgoingItemsMap) openStreamImpl() (item, error) { - if !m.maxStreamSet || m.nextStream > m.maxStream { - if !m.blockedSent { - if m.maxStreamSet { - m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{ - Type: streamTypeGeneric, - StreamLimit: m.maxStream.StreamNum(), - }) - } else { - m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{ - Type: streamTypeGeneric, - StreamLimit: 0, - }) - } - m.blockedSent = true - } - return nil, errTooManyOpenStreams - } - s := m.newStream(m.nextStream) - m.streams[m.nextStream] = s - m.nextStream += 4 - return s, nil -} - -func (m *outgoingItemsMap) GetStream(id protocol.StreamID) (item, error) { - m.mutex.RLock() - if id >= m.nextStream { - m.mutex.RUnlock() - return nil, qerr.Error(qerr.StreamStateError, fmt.Sprintf("peer attempted to open stream %d", id)) - } - s := m.streams[id] - m.mutex.RUnlock() - return s, nil -} - -func (m *outgoingItemsMap) DeleteStream(id protocol.StreamID) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - if _, ok := m.streams[id]; !ok { - return fmt.Errorf("Tried to delete unknown stream %d", id) - } - delete(m.streams, id) - return nil -} - -func (m *outgoingItemsMap) SetMaxStream(id protocol.StreamID) { - m.mutex.Lock() - if !m.maxStreamSet || id > m.maxStream { - m.maxStream = id - m.maxStreamSet = true - m.blockedSent = false - m.cond.Broadcast() - } - m.mutex.Unlock() -} - -func (m *outgoingItemsMap) CloseWithError(err error) { - m.mutex.Lock() - m.closeErr = err - for _, str := range m.streams { - str.closeForShutdown(err) - } - m.cond.Broadcast() - m.mutex.Unlock() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_uni.go b/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_uni.go deleted file mode 100644 index a38240a63..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/streams_map_outgoing_uni.go +++ /dev/null @@ -1,147 +0,0 @@ -// This file was automatically generated by genny. -// Any changes will be lost if this file is regenerated. -// see https://github.com/cheekybits/genny - -package quic - -import ( - "fmt" - "sync" - - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/qerr" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type outgoingUniStreamsMap struct { - mutex sync.RWMutex - cond sync.Cond - - streams map[protocol.StreamID]sendStreamI - - nextStream protocol.StreamID // stream ID of the stream returned by OpenStream(Sync) - maxStream protocol.StreamID // the maximum stream ID we're allowed to open - maxStreamSet bool // was maxStream set. If not, it's not possible to any stream (also works for stream 0) - blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream - - newStream func(protocol.StreamID) sendStreamI - queueStreamIDBlocked func(*wire.StreamsBlockedFrame) - - closeErr error -} - -func newOutgoingUniStreamsMap( - nextStream protocol.StreamID, - newStream func(protocol.StreamID) sendStreamI, - queueControlFrame func(wire.Frame), -) *outgoingUniStreamsMap { - m := &outgoingUniStreamsMap{ - streams: make(map[protocol.StreamID]sendStreamI), - nextStream: nextStream, - newStream: newStream, - queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) }, - } - m.cond.L = &m.mutex - return m -} - -func (m *outgoingUniStreamsMap) OpenStream() (sendStreamI, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - if m.closeErr != nil { - return nil, m.closeErr - } - - str, err := m.openStreamImpl() - if err != nil { - return nil, streamOpenErr{err} - } - return str, nil -} - -func (m *outgoingUniStreamsMap) OpenStreamSync() (sendStreamI, error) { - m.mutex.Lock() - defer m.mutex.Unlock() - - for { - if m.closeErr != nil { - return nil, m.closeErr - } - str, err := m.openStreamImpl() - if err == nil { - return str, nil - } - if err != nil && err != errTooManyOpenStreams { - return nil, streamOpenErr{err} - } - m.cond.Wait() - } -} - -func (m *outgoingUniStreamsMap) openStreamImpl() (sendStreamI, error) { - if !m.maxStreamSet || m.nextStream > m.maxStream { - if !m.blockedSent { - if m.maxStreamSet { - m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{ - Type: protocol.StreamTypeUni, - StreamLimit: m.maxStream.StreamNum(), - }) - } else { - m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{ - Type: protocol.StreamTypeUni, - StreamLimit: 0, - }) - } - m.blockedSent = true - } - return nil, errTooManyOpenStreams - } - s := m.newStream(m.nextStream) - m.streams[m.nextStream] = s - m.nextStream += 4 - return s, nil -} - -func (m *outgoingUniStreamsMap) GetStream(id protocol.StreamID) (sendStreamI, error) { - m.mutex.RLock() - if id >= m.nextStream { - m.mutex.RUnlock() - return nil, qerr.Error(qerr.StreamStateError, fmt.Sprintf("peer attempted to open stream %d", id)) - } - s := m.streams[id] - m.mutex.RUnlock() - return s, nil -} - -func (m *outgoingUniStreamsMap) DeleteStream(id protocol.StreamID) error { - m.mutex.Lock() - defer m.mutex.Unlock() - - if _, ok := m.streams[id]; !ok { - return fmt.Errorf("Tried to delete unknown stream %d", id) - } - delete(m.streams, id) - return nil -} - -func (m *outgoingUniStreamsMap) SetMaxStream(id protocol.StreamID) { - m.mutex.Lock() - if !m.maxStreamSet || id > m.maxStream { - m.maxStream = id - m.maxStreamSet = true - m.blockedSent = false - m.cond.Broadcast() - } - m.mutex.Unlock() -} - -func (m *outgoingUniStreamsMap) CloseWithError(err error) { - m.mutex.Lock() - m.closeErr = err - for _, str := range m.streams { - str.closeForShutdown(err) - } - m.cond.Broadcast() - m.mutex.Unlock() -} diff --git a/vendor/github.com/lucas-clemente/quic-go/window_update_queue.go b/vendor/github.com/lucas-clemente/quic-go/window_update_queue.go deleted file mode 100644 index 64b912a3f..000000000 --- a/vendor/github.com/lucas-clemente/quic-go/window_update_queue.go +++ /dev/null @@ -1,71 +0,0 @@ -package quic - -import ( - "sync" - - "github.com/lucas-clemente/quic-go/internal/flowcontrol" - "github.com/lucas-clemente/quic-go/internal/protocol" - "github.com/lucas-clemente/quic-go/internal/wire" -) - -type windowUpdateQueue struct { - mutex sync.Mutex - - queue map[protocol.StreamID]bool // used as a set - queuedConn bool // connection-level window update - - streamGetter streamGetter - connFlowController flowcontrol.ConnectionFlowController - callback func(wire.Frame) -} - -func newWindowUpdateQueue( - streamGetter streamGetter, - connFC flowcontrol.ConnectionFlowController, - cb func(wire.Frame), -) *windowUpdateQueue { - return &windowUpdateQueue{ - queue: make(map[protocol.StreamID]bool), - streamGetter: streamGetter, - connFlowController: connFC, - callback: cb, - } -} - -func (q *windowUpdateQueue) AddStream(id protocol.StreamID) { - q.mutex.Lock() - q.queue[id] = true - q.mutex.Unlock() -} - -func (q *windowUpdateQueue) AddConnection() { - q.mutex.Lock() - q.queuedConn = true - q.mutex.Unlock() -} - -func (q *windowUpdateQueue) QueueAll() { - q.mutex.Lock() - // queue a connection-level window update - if q.queuedConn { - q.callback(&wire.MaxDataFrame{ByteOffset: q.connFlowController.GetWindowUpdate()}) - q.queuedConn = false - } - // queue all stream-level window updates - for id := range q.queue { - str, err := q.streamGetter.GetOrOpenReceiveStream(id) - if err != nil || str == nil { // the stream can be nil if it was completed before dequeing the window update - continue - } - offset := str.getWindowUpdate() - if offset == 0 { // can happen if we received a final offset, right after queueing the window update - continue - } - q.callback(&wire.MaxStreamDataFrame{ - StreamID: id, - ByteOffset: offset, - }) - delete(q.queue, id) - } - q.mutex.Unlock() -} diff --git a/vendor/github.com/marten-seemann/qtls/alert.go b/vendor/github.com/marten-seemann/qtls/alert.go deleted file mode 100644 index ba1c9ce39..000000000 --- a/vendor/github.com/marten-seemann/qtls/alert.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import "strconv" - -type alert uint8 - -// Alert is a TLS alert -type Alert = alert - -const ( - // alert level - alertLevelWarning = 1 - alertLevelError = 2 -) - -const ( - alertCloseNotify alert = 0 - alertUnexpectedMessage alert = 10 - alertBadRecordMAC alert = 20 - alertDecryptionFailed alert = 21 - alertRecordOverflow alert = 22 - alertDecompressionFailure alert = 30 - alertHandshakeFailure alert = 40 - alertBadCertificate alert = 42 - alertUnsupportedCertificate alert = 43 - alertCertificateRevoked alert = 44 - alertCertificateExpired alert = 45 - alertCertificateUnknown alert = 46 - alertIllegalParameter alert = 47 - alertUnknownCA alert = 48 - alertAccessDenied alert = 49 - alertDecodeError alert = 50 - alertDecryptError alert = 51 - alertProtocolVersion alert = 70 - alertInsufficientSecurity alert = 71 - alertInternalError alert = 80 - alertInappropriateFallback alert = 86 - alertUserCanceled alert = 90 - alertNoRenegotiation alert = 100 - alertMissingExtension alert = 109 - alertUnsupportedExtension alert = 110 - alertNoApplicationProtocol alert = 120 -) - -var alertText = map[alert]string{ - alertCloseNotify: "close notify", - alertUnexpectedMessage: "unexpected message", - alertBadRecordMAC: "bad record MAC", - alertDecryptionFailed: "decryption failed", - alertRecordOverflow: "record overflow", - alertDecompressionFailure: "decompression failure", - alertHandshakeFailure: "handshake failure", - alertBadCertificate: "bad certificate", - alertUnsupportedCertificate: "unsupported certificate", - alertCertificateRevoked: "revoked certificate", - alertCertificateExpired: "expired certificate", - alertCertificateUnknown: "unknown certificate", - alertIllegalParameter: "illegal parameter", - alertUnknownCA: "unknown certificate authority", - alertAccessDenied: "access denied", - alertDecodeError: "error decoding message", - alertDecryptError: "error decrypting message", - alertProtocolVersion: "protocol version not supported", - alertInsufficientSecurity: "insufficient security level", - alertInternalError: "internal error", - alertInappropriateFallback: "inappropriate fallback", - alertUserCanceled: "user canceled", - alertNoRenegotiation: "no renegotiation", - alertMissingExtension: "missing extension", - alertUnsupportedExtension: "unsupported extension", - alertNoApplicationProtocol: "no application protocol", -} - -func (e alert) String() string { - s, ok := alertText[e] - if ok { - return "tls: " + s - } - return "tls: alert(" + strconv.Itoa(int(e)) + ")" -} - -func (e alert) Error() string { - return e.String() -} diff --git a/vendor/github.com/marten-seemann/qtls/auth.go b/vendor/github.com/marten-seemann/qtls/auth.go deleted file mode 100644 index b8fd7f9ba..000000000 --- a/vendor/github.com/marten-seemann/qtls/auth.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/asn1" - "errors" - "fmt" - "hash" - "io" -) - -// pickSignatureAlgorithm selects a signature algorithm that is compatible with -// the given public key and the list of algorithms from the peer and this side. -// The lists of signature algorithms (peerSigAlgs and ourSigAlgs) are ignored -// for tlsVersion < VersionTLS12. -// -// The returned SignatureScheme codepoint is only meaningful for TLS 1.2, -// previous TLS versions have a fixed hash function. -func pickSignatureAlgorithm(pubkey crypto.PublicKey, peerSigAlgs, ourSigAlgs []SignatureScheme, tlsVersion uint16) (sigAlg SignatureScheme, sigType uint8, hashFunc crypto.Hash, err error) { - if tlsVersion < VersionTLS12 || len(peerSigAlgs) == 0 { - // For TLS 1.1 and before, the signature algorithm could not be - // negotiated and the hash is fixed based on the signature type. For TLS - // 1.2, if the client didn't send signature_algorithms extension then we - // can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1. - switch pubkey.(type) { - case *rsa.PublicKey: - if tlsVersion < VersionTLS12 { - return 0, signaturePKCS1v15, crypto.MD5SHA1, nil - } else { - return PKCS1WithSHA1, signaturePKCS1v15, crypto.SHA1, nil - } - case *ecdsa.PublicKey: - return ECDSAWithSHA1, signatureECDSA, crypto.SHA1, nil - default: - return 0, 0, 0, fmt.Errorf("tls: unsupported public key: %T", pubkey) - } - } - for _, sigAlg := range peerSigAlgs { - if !isSupportedSignatureAlgorithm(sigAlg, ourSigAlgs) { - continue - } - hashAlg, err := hashFromSignatureScheme(sigAlg) - if err != nil { - panic("tls: supported signature algorithm has an unknown hash function") - } - sigType := signatureFromSignatureScheme(sigAlg) - switch pubkey.(type) { - case *rsa.PublicKey: - if sigType == signaturePKCS1v15 || sigType == signatureRSAPSS { - return sigAlg, sigType, hashAlg, nil - } - case *ecdsa.PublicKey: - if sigType == signatureECDSA { - return sigAlg, sigType, hashAlg, nil - } - default: - return 0, 0, 0, fmt.Errorf("tls: unsupported public key: %T", pubkey) - } - } - return 0, 0, 0, errors.New("tls: peer doesn't support any common signature algorithms") -} - -// verifyHandshakeSignature verifies a signature against pre-hashed handshake -// contents. -func verifyHandshakeSignature(sigType uint8, pubkey crypto.PublicKey, hashFunc crypto.Hash, digest, sig []byte) error { - switch sigType { - case signatureECDSA: - pubKey, ok := pubkey.(*ecdsa.PublicKey) - if !ok { - return errors.New("tls: ECDSA signing requires a ECDSA public key") - } - ecdsaSig := new(ecdsaSignature) - if _, err := asn1.Unmarshal(sig, ecdsaSig); err != nil { - return err - } - if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { - return errors.New("tls: ECDSA signature contained zero or negative values") - } - if !ecdsa.Verify(pubKey, digest, ecdsaSig.R, ecdsaSig.S) { - return errors.New("tls: ECDSA verification failure") - } - case signaturePKCS1v15: - pubKey, ok := pubkey.(*rsa.PublicKey) - if !ok { - return errors.New("tls: RSA signing requires a RSA public key") - } - if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, digest, sig); err != nil { - return err - } - case signatureRSAPSS: - pubKey, ok := pubkey.(*rsa.PublicKey) - if !ok { - return errors.New("tls: RSA signing requires a RSA public key") - } - signOpts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash} - if err := rsa.VerifyPSS(pubKey, hashFunc, digest, sig, signOpts); err != nil { - return err - } - default: - return errors.New("tls: unknown signature algorithm") - } - return nil -} - -const ( - serverSignatureContext = "TLS 1.3, server CertificateVerify\x00" - clientSignatureContext = "TLS 1.3, client CertificateVerify\x00" -) - -var signaturePadding = []byte{ - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, -} - -// writeSignedMessage writes the content to be signed by certificate keys in TLS -// 1.3 to sigHash. See RFC 8446, Section 4.4.3. -func writeSignedMessage(sigHash io.Writer, context string, transcript hash.Hash) { - sigHash.Write(signaturePadding) - io.WriteString(sigHash, context) - sigHash.Write(transcript.Sum(nil)) -} - -// signatureSchemesForCertificate returns the list of supported SignatureSchemes -// for a given certificate, based on the public key and the protocol version. It -// does not support the crypto.Decrypter interface, so shouldn't be used on the -// server side in TLS 1.2 and earlier. -func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme { - priv, ok := cert.PrivateKey.(crypto.Signer) - if !ok { - return nil - } - - switch pub := priv.Public().(type) { - case *ecdsa.PublicKey: - if version != VersionTLS13 { - // In TLS 1.2 and earlier, ECDSA algorithms are not - // constrained to a single curve. - return []SignatureScheme{ - ECDSAWithP256AndSHA256, - ECDSAWithP384AndSHA384, - ECDSAWithP521AndSHA512, - ECDSAWithSHA1, - } - } - switch pub.Curve { - case elliptic.P256(): - return []SignatureScheme{ECDSAWithP256AndSHA256} - case elliptic.P384(): - return []SignatureScheme{ECDSAWithP384AndSHA384} - case elliptic.P521(): - return []SignatureScheme{ECDSAWithP521AndSHA512} - default: - return nil - } - case *rsa.PublicKey: - if version != VersionTLS13 { - return []SignatureScheme{ - PSSWithSHA256, - PSSWithSHA384, - PSSWithSHA512, - PKCS1WithSHA256, - PKCS1WithSHA384, - PKCS1WithSHA512, - PKCS1WithSHA1, - } - } - // RSA keys with RSA-PSS OID are not supported by crypto/x509. - return []SignatureScheme{ - PSSWithSHA256, - PSSWithSHA384, - PSSWithSHA512, - } - default: - return nil - } -} - -// unsupportedCertificateError returns a helpful error for certificates with -// an unsupported private key. -func unsupportedCertificateError(cert *Certificate) error { - switch cert.PrivateKey.(type) { - case rsa.PrivateKey, ecdsa.PrivateKey: - return fmt.Errorf("tls: unsupported certificate: private key is %T, expected *%T", - cert.PrivateKey, cert.PrivateKey) - } - - signer, ok := cert.PrivateKey.(crypto.Signer) - if !ok { - return fmt.Errorf("tls: certificate private key (%T) does not implement crypto.Signer", - cert.PrivateKey) - } - - switch pub := signer.Public().(type) { - case *ecdsa.PublicKey: - switch pub.Curve { - case elliptic.P256(): - case elliptic.P384(): - case elliptic.P521(): - default: - return fmt.Errorf("tls: unsupported certificate curve (%s)", pub.Curve.Params().Name) - } - case *rsa.PublicKey: - default: - return fmt.Errorf("tls: unsupported certificate key (%T)", pub) - } - - return fmt.Errorf("tls: internal error: unsupported key (%T)", cert.PrivateKey) -} diff --git a/vendor/github.com/marten-seemann/qtls/cipher_suites.go b/vendor/github.com/marten-seemann/qtls/cipher_suites.go deleted file mode 100644 index e0b7e2f19..000000000 --- a/vendor/github.com/marten-seemann/qtls/cipher_suites.go +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/hmac" - "crypto/rc4" - "crypto/sha1" - "crypto/sha256" - "crypto/x509" - "hash" - - "golang.org/x/crypto/chacha20poly1305" -) - -// a keyAgreement implements the client and server side of a TLS key agreement -// protocol by generating and processing key exchange messages. -type keyAgreement interface { - // On the server side, the first two methods are called in order. - - // In the case that the key agreement protocol doesn't use a - // ServerKeyExchange message, generateServerKeyExchange can return nil, - // nil. - generateServerKeyExchange(*Config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error) - processClientKeyExchange(*Config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error) - - // On the client side, the next two methods are called in order. - - // This method may not be called if the server doesn't send a - // ServerKeyExchange message. - processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error - generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) -} - -const ( - // suiteECDH indicates that the cipher suite involves elliptic curve - // Diffie-Hellman. This means that it should only be selected when the - // client indicates that it supports ECC with a curve and point format - // that we're happy with. - suiteECDHE = 1 << iota - // suiteECDSA indicates that the cipher suite involves an ECDSA - // signature and therefore may only be selected when the server's - // certificate is ECDSA. If this is not set then the cipher suite is - // RSA based. - suiteECDSA - // suiteTLS12 indicates that the cipher suite should only be advertised - // and accepted when using TLS 1.2. - suiteTLS12 - // suiteSHA384 indicates that the cipher suite uses SHA384 as the - // handshake hash. - suiteSHA384 - // suiteDefaultOff indicates that this cipher suite is not included by - // default. - suiteDefaultOff -) - -type CipherSuite struct { - *cipherSuiteTLS13 -} - -func (c *CipherSuite) Hash() crypto.Hash { return c.hash } -func (c *CipherSuite) KeyLen() int { return c.keyLen } -func (c *CipherSuite) IVLen() int { return aeadNonceLength } -func (c *CipherSuite) AEAD(key, fixedNonce []byte) cipher.AEAD { return c.aead(key, fixedNonce) } - -// A cipherSuite is a specific combination of key agreement, cipher and MAC function. -type cipherSuite struct { - id uint16 - // the lengths, in bytes, of the key material needed for each component. - keyLen int - macLen int - ivLen int - ka func(version uint16) keyAgreement - // flags is a bitmask of the suite* values, above. - flags int - cipher func(key, iv []byte, isRead bool) interface{} - mac func(version uint16, macKey []byte) macFunction - aead func(key, fixedNonce []byte) aead -} - -var cipherSuites = []*cipherSuite{ - // Ciphersuite order is chosen so that ECDHE comes before plain RSA and - // AEADs are the top preference. - {TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305}, - {TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadChaCha20Poly1305}, - {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM}, - {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadAESGCM}, - {TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM}, - {TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM}, - {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil}, - {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, - {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil}, - {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, - {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, - {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, - {TLS_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, rsaKA, suiteTLS12, nil, nil, aeadAESGCM}, - {TLS_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, rsaKA, suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM}, - {TLS_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, rsaKA, suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil}, - {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, - {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, - {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil}, - {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil}, - - // RC4-based cipher suites are disabled by default. - {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, suiteDefaultOff, cipherRC4, macSHA1, nil}, - {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE | suiteDefaultOff, cipherRC4, macSHA1, nil}, - {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteDefaultOff, cipherRC4, macSHA1, nil}, -} - -// A cipherSuiteTLS13 defines only the pair of the AEAD algorithm and hash -// algorithm to be used with HKDF. See RFC 8446, Appendix B.4. -type cipherSuiteTLS13 struct { - id uint16 - keyLen int - aead func(key, fixedNonce []byte) aead - hash crypto.Hash -} - -var cipherSuitesTLS13 = []*cipherSuiteTLS13{ - {TLS_AES_128_GCM_SHA256, 16, aeadAESGCMTLS13, crypto.SHA256}, - {TLS_CHACHA20_POLY1305_SHA256, 32, aeadChaCha20Poly1305, crypto.SHA256}, - {TLS_AES_256_GCM_SHA384, 32, aeadAESGCMTLS13, crypto.SHA384}, -} - -func cipherRC4(key, iv []byte, isRead bool) interface{} { - cipher, _ := rc4.NewCipher(key) - return cipher -} - -func cipher3DES(key, iv []byte, isRead bool) interface{} { - block, _ := des.NewTripleDESCipher(key) - if isRead { - return cipher.NewCBCDecrypter(block, iv) - } - return cipher.NewCBCEncrypter(block, iv) -} - -func cipherAES(key, iv []byte, isRead bool) interface{} { - block, _ := aes.NewCipher(key) - if isRead { - return cipher.NewCBCDecrypter(block, iv) - } - return cipher.NewCBCEncrypter(block, iv) -} - -// macSHA1 returns a macFunction for the given protocol version. -func macSHA1(version uint16, key []byte) macFunction { - if version == VersionSSL30 { - mac := ssl30MAC{ - h: sha1.New(), - key: make([]byte, len(key)), - } - copy(mac.key, key) - return mac - } - return tls10MAC{h: hmac.New(newConstantTimeHash(sha1.New), key)} -} - -// macSHA256 returns a SHA-256 based MAC. These are only supported in TLS 1.2 -// so the given version is ignored. -func macSHA256(version uint16, key []byte) macFunction { - return tls10MAC{h: hmac.New(sha256.New, key)} -} - -type macFunction interface { - // Size returns the length of the MAC. - Size() int - // MAC appends the MAC of (seq, header, data) to out. The extra data is fed - // into the MAC after obtaining the result to normalize timing. The result - // is only valid until the next invocation of MAC as the buffer is reused. - MAC(seq, header, data, extra []byte) []byte -} - -type aead interface { - cipher.AEAD - - // explicitNonceLen returns the number of bytes of explicit nonce - // included in each record. This is eight for older AEADs and - // zero for modern ones. - explicitNonceLen() int -} - -const ( - aeadNonceLength = 12 - noncePrefixLength = 4 -) - -// prefixNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to -// each call. -type prefixNonceAEAD struct { - // nonce contains the fixed part of the nonce in the first four bytes. - nonce [aeadNonceLength]byte - aead cipher.AEAD -} - -func (f *prefixNonceAEAD) NonceSize() int { return aeadNonceLength - noncePrefixLength } -func (f *prefixNonceAEAD) Overhead() int { return f.aead.Overhead() } -func (f *prefixNonceAEAD) explicitNonceLen() int { return f.NonceSize() } - -func (f *prefixNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte { - copy(f.nonce[4:], nonce) - return f.aead.Seal(out, f.nonce[:], plaintext, additionalData) -} - -func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) { - copy(f.nonce[4:], nonce) - return f.aead.Open(out, f.nonce[:], ciphertext, additionalData) -} - -// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce -// before each call. -type xorNonceAEAD struct { - nonceMask [aeadNonceLength]byte - aead cipher.AEAD -} - -func (f *xorNonceAEAD) NonceSize() int { return 8 } // 64-bit sequence number -func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() } -func (f *xorNonceAEAD) explicitNonceLen() int { return 0 } - -func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte { - for i, b := range nonce { - f.nonceMask[4+i] ^= b - } - result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData) - for i, b := range nonce { - f.nonceMask[4+i] ^= b - } - - return result -} - -func (f *xorNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) { - for i, b := range nonce { - f.nonceMask[4+i] ^= b - } - result, err := f.aead.Open(out, f.nonceMask[:], ciphertext, additionalData) - for i, b := range nonce { - f.nonceMask[4+i] ^= b - } - - return result, err -} - -func aeadAESGCM(key, noncePrefix []byte) aead { - if len(noncePrefix) != noncePrefixLength { - panic("tls: internal error: wrong nonce length") - } - aes, err := aes.NewCipher(key) - if err != nil { - panic(err) - } - aead, err := cipher.NewGCM(aes) - if err != nil { - panic(err) - } - - ret := &prefixNonceAEAD{aead: aead} - copy(ret.nonce[:], noncePrefix) - return ret -} - -// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3 -func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD { - return aeadAESGCMTLS13(key, fixedNonce) -} - -func aeadAESGCMTLS13(key, nonceMask []byte) aead { - if len(nonceMask) != aeadNonceLength { - panic("tls: internal error: wrong nonce length") - } - aes, err := aes.NewCipher(key) - if err != nil { - panic(err) - } - aead, err := cipher.NewGCM(aes) - if err != nil { - panic(err) - } - - ret := &xorNonceAEAD{aead: aead} - copy(ret.nonceMask[:], nonceMask) - return ret -} - -func aeadChaCha20Poly1305(key, nonceMask []byte) aead { - if len(nonceMask) != aeadNonceLength { - panic("tls: internal error: wrong nonce length") - } - aead, err := chacha20poly1305.New(key) - if err != nil { - panic(err) - } - - ret := &xorNonceAEAD{aead: aead} - copy(ret.nonceMask[:], nonceMask) - return ret -} - -// ssl30MAC implements the SSLv3 MAC function, as defined in -// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 5.2.3.1 -type ssl30MAC struct { - h hash.Hash - key []byte - buf []byte -} - -func (s ssl30MAC) Size() int { - return s.h.Size() -} - -var ssl30Pad1 = [48]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36} - -var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c} - -// MAC does not offer constant timing guarantees for SSL v3.0, since it's deemed -// useless considering the similar, protocol-level POODLE vulnerability. -func (s ssl30MAC) MAC(seq, header, data, extra []byte) []byte { - padLength := 48 - if s.h.Size() == 20 { - padLength = 40 - } - - s.h.Reset() - s.h.Write(s.key) - s.h.Write(ssl30Pad1[:padLength]) - s.h.Write(seq) - s.h.Write(header[:1]) - s.h.Write(header[3:5]) - s.h.Write(data) - s.buf = s.h.Sum(s.buf[:0]) - - s.h.Reset() - s.h.Write(s.key) - s.h.Write(ssl30Pad2[:padLength]) - s.h.Write(s.buf) - return s.h.Sum(s.buf[:0]) -} - -type constantTimeHash interface { - hash.Hash - ConstantTimeSum(b []byte) []byte -} - -// cthWrapper wraps any hash.Hash that implements ConstantTimeSum, and replaces -// with that all calls to Sum. It's used to obtain a ConstantTimeSum-based HMAC. -type cthWrapper struct { - h constantTimeHash -} - -func (c *cthWrapper) Size() int { return c.h.Size() } -func (c *cthWrapper) BlockSize() int { return c.h.BlockSize() } -func (c *cthWrapper) Reset() { c.h.Reset() } -func (c *cthWrapper) Write(p []byte) (int, error) { return c.h.Write(p) } -func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) } - -func newConstantTimeHash(h func() hash.Hash) func() hash.Hash { - return func() hash.Hash { - return &cthWrapper{h().(constantTimeHash)} - } -} - -// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, Section 6.2.3. -type tls10MAC struct { - h hash.Hash - buf []byte -} - -func (s tls10MAC) Size() int { - return s.h.Size() -} - -// MAC is guaranteed to take constant time, as long as -// len(seq)+len(header)+len(data)+len(extra) is constant. extra is not fed into -// the MAC, but is only provided to make the timing profile constant. -func (s tls10MAC) MAC(seq, header, data, extra []byte) []byte { - s.h.Reset() - s.h.Write(seq) - s.h.Write(header) - s.h.Write(data) - res := s.h.Sum(s.buf[:0]) - if extra != nil { - s.h.Write(extra) - } - return res -} - -func rsaKA(version uint16) keyAgreement { - return rsaKeyAgreement{} -} - -func ecdheECDSAKA(version uint16) keyAgreement { - return &ecdheKeyAgreement{ - isRSA: false, - version: version, - } -} - -func ecdheRSAKA(version uint16) keyAgreement { - return &ecdheKeyAgreement{ - isRSA: true, - version: version, - } -} - -// mutualCipherSuite returns a cipherSuite given a list of supported -// ciphersuites and the id requested by the peer. -func mutualCipherSuite(have []uint16, want uint16) *cipherSuite { - for _, id := range have { - if id == want { - return cipherSuiteByID(id) - } - } - return nil -} - -func cipherSuiteByID(id uint16) *cipherSuite { - for _, cipherSuite := range cipherSuites { - if cipherSuite.id == id { - return cipherSuite - } - } - return nil -} - -func mutualCipherSuiteTLS13(have []uint16, want uint16) *cipherSuiteTLS13 { - for _, id := range have { - if id == want { - return cipherSuiteTLS13ByID(id) - } - } - return nil -} - -func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 { - for _, cipherSuite := range cipherSuitesTLS13 { - if cipherSuite.id == id { - return cipherSuite - } - } - return nil -} - -// A list of cipher suite IDs that are, or have been, implemented by this -// package. -// -// Taken from https://www.iana.org/assignments/tls-parameters/tls-parameters.xml -const ( - // TLS 1.0 - 1.2 cipher suites. - TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 - TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a - TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f - TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 - TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003c - TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009c - TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009d - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009 - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a - TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011 - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014 - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc023 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc027 - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc030 - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc02c - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 uint16 = 0xcca8 - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 uint16 = 0xcca9 - - // TLS 1.3 cipher suites. - TLS_AES_128_GCM_SHA256 uint16 = 0x1301 - TLS_AES_256_GCM_SHA384 uint16 = 0x1302 - TLS_CHACHA20_POLY1305_SHA256 uint16 = 0x1303 - - // TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator - // that the client is doing version fallback. See RFC 7507. - TLS_FALLBACK_SCSV uint16 = 0x5600 -) diff --git a/vendor/github.com/marten-seemann/qtls/common.go b/vendor/github.com/marten-seemann/qtls/common.go deleted file mode 100644 index df4c98e86..000000000 --- a/vendor/github.com/marten-seemann/qtls/common.go +++ /dev/null @@ -1,1148 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "container/list" - "crypto/rand" - "crypto/sha512" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io" - "math/big" - "os" - "strings" - "sync" - "time" - - "golang.org/x/sys/cpu" -) - -const ( - VersionSSL30 = 0x0300 - VersionTLS10 = 0x0301 - VersionTLS11 = 0x0302 - VersionTLS12 = 0x0303 - VersionTLS13 = 0x0304 -) - -const ( - maxPlaintext = 16384 // maximum plaintext payload length - maxCiphertext = 16384 + 2048 // maximum ciphertext payload length - maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3 - recordHeaderLen = 5 // record header length - maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) - maxUselessRecords = 16 // maximum number of consecutive non-advancing records -) - -// TLS record types. -type recordType uint8 - -const ( - recordTypeChangeCipherSpec recordType = 20 - recordTypeAlert recordType = 21 - recordTypeHandshake recordType = 22 - recordTypeApplicationData recordType = 23 -) - -// TLS handshake message types. -const ( - typeHelloRequest uint8 = 0 - typeClientHello uint8 = 1 - typeServerHello uint8 = 2 - typeNewSessionTicket uint8 = 4 - typeEndOfEarlyData uint8 = 5 - typeEncryptedExtensions uint8 = 8 - typeCertificate uint8 = 11 - typeServerKeyExchange uint8 = 12 - typeCertificateRequest uint8 = 13 - typeServerHelloDone uint8 = 14 - typeCertificateVerify uint8 = 15 - typeClientKeyExchange uint8 = 16 - typeFinished uint8 = 20 - typeCertificateStatus uint8 = 22 - typeKeyUpdate uint8 = 24 - typeNextProtocol uint8 = 67 // Not IANA assigned - typeMessageHash uint8 = 254 // synthetic message -) - -// TLS compression types. -const ( - compressionNone uint8 = 0 -) - -type Extension struct { - Type uint16 - Data []byte -} - -// TLS extension numbers -const ( - extensionServerName uint16 = 0 - extensionStatusRequest uint16 = 5 - extensionSupportedCurves uint16 = 10 // supported_groups in TLS 1.3, see RFC 8446, Section 4.2.7 - extensionSupportedPoints uint16 = 11 - extensionSignatureAlgorithms uint16 = 13 - extensionALPN uint16 = 16 - extensionSCT uint16 = 18 - extensionSessionTicket uint16 = 35 - extensionPreSharedKey uint16 = 41 - extensionEarlyData uint16 = 42 - extensionSupportedVersions uint16 = 43 - extensionCookie uint16 = 44 - extensionPSKModes uint16 = 45 - extensionCertificateAuthorities uint16 = 47 - extensionSignatureAlgorithmsCert uint16 = 50 - extensionKeyShare uint16 = 51 - extensionNextProtoNeg uint16 = 13172 // not IANA assigned - extensionRenegotiationInfo uint16 = 0xff01 -) - -// TLS signaling cipher suite values -const ( - scsvRenegotiation uint16 = 0x00ff -) - -// CurveID is a tls.CurveID -type CurveID = tls.CurveID - -const ( - CurveP256 CurveID = 23 - CurveP384 CurveID = 24 - CurveP521 CurveID = 25 - X25519 CurveID = 29 -) - -// TLS 1.3 Key Share. See RFC 8446, Section 4.2.8. -type keyShare struct { - group CurveID - data []byte -} - -// TLS 1.3 PSK Key Exchange Modes. See RFC 8446, Section 4.2.9. -const ( - pskModePlain uint8 = 0 - pskModeDHE uint8 = 1 -) - -// TLS 1.3 PSK Identity. Can be a Session Ticket, or a reference to a saved -// session. See RFC 8446, Section 4.2.11. -type pskIdentity struct { - label []byte - obfuscatedTicketAge uint32 -} - -// TLS Elliptic Curve Point Formats -// https://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9 -const ( - pointFormatUncompressed uint8 = 0 -) - -// TLS CertificateStatusType (RFC 3546) -const ( - statusTypeOCSP uint8 = 1 -) - -// Certificate types (for certificateRequestMsg) -const ( - certTypeRSASign = 1 - certTypeECDSASign = 64 // RFC 4492, Section 5.5 -) - -// Signature algorithms (for internal signaling use). Starting at 16 to avoid overlap with -// TLS 1.2 codepoints (RFC 5246, Appendix A.4.1), with which these have nothing to do. -const ( - signaturePKCS1v15 uint8 = iota + 16 - signatureECDSA - signatureRSAPSS -) - -// supportedSignatureAlgorithms contains the signature and hash algorithms that -// the code advertises as supported in a TLS 1.2+ ClientHello and in a TLS 1.2+ -// CertificateRequest. The two fields are merged to match with TLS 1.3. -// Note that in TLS 1.2, the ECDSA algorithms are not constrained to P-256, etc. -var supportedSignatureAlgorithms = []SignatureScheme{ - PSSWithSHA256, - PSSWithSHA384, - PSSWithSHA512, - PKCS1WithSHA256, - ECDSAWithP256AndSHA256, - PKCS1WithSHA384, - ECDSAWithP384AndSHA384, - PKCS1WithSHA512, - ECDSAWithP521AndSHA512, - PKCS1WithSHA1, - ECDSAWithSHA1, -} - -// RSA-PSS is disabled in TLS 1.2 for Go 1.12. See Issue 30055. -var supportedSignatureAlgorithmsTLS12 = supportedSignatureAlgorithms[3:] - -// helloRetryRequestRandom is set as the Random value of a ServerHello -// to signal that the message is actually a HelloRetryRequest. -var helloRetryRequestRandom = []byte{ // See RFC 8446, Section 4.1.3. - 0xCF, 0x21, 0xAD, 0x74, 0xE5, 0x9A, 0x61, 0x11, - 0xBE, 0x1D, 0x8C, 0x02, 0x1E, 0x65, 0xB8, 0x91, - 0xC2, 0xA2, 0x11, 0x16, 0x7A, 0xBB, 0x8C, 0x5E, - 0x07, 0x9E, 0x09, 0xE2, 0xC8, 0xA8, 0x33, 0x9C, -} - -const ( - // downgradeCanaryTLS12 or downgradeCanaryTLS11 is embedded in the server - // random as a downgrade protection if the server would be capable of - // negotiating a higher version. See RFC 8446, Section 4.1.3. - downgradeCanaryTLS12 = "DOWNGRD\x01" - downgradeCanaryTLS11 = "DOWNGRD\x00" -) - -// ConnectionState records basic TLS details about the connection. -type ConnectionState struct { - Version uint16 // TLS version used by the connection (e.g. VersionTLS12) - HandshakeComplete bool // TLS handshake is complete - DidResume bool // connection resumes a previous TLS connection - CipherSuite uint16 // cipher suite in use (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, ...) - NegotiatedProtocol string // negotiated next protocol (not guaranteed to be from Config.NextProtos) - NegotiatedProtocolIsMutual bool // negotiated protocol was advertised by server (client side only) - ServerName string // server name requested by client, if any (server side only) - PeerCertificates []*x509.Certificate // certificate chain presented by remote peer - VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates - SignedCertificateTimestamps [][]byte // SCTs from the peer, if any - OCSPResponse []byte // stapled OCSP response from peer, if any - - // ekm is a closure exposed via ExportKeyingMaterial. - ekm func(label string, context []byte, length int) ([]byte, error) - - // TLSUnique contains the "tls-unique" channel binding value (see RFC - // 5929, section 3). For resumed sessions this value will be nil - // because resumption does not include enough context (see - // https://mitls.org/pages/attacks/3SHAKE#channelbindings). This will - // change in future versions of Go once the TLS master-secret fix has - // been standardized and implemented. It is not defined in TLS 1.3. - TLSUnique []byte -} - -// ExportKeyingMaterial returns length bytes of exported key material in a new -// slice as defined in RFC 5705. If context is nil, it is not used as part of -// the seed. If the connection was set to allow renegotiation via -// Config.Renegotiation, this function will return an error. -func (cs *ConnectionState) ExportKeyingMaterial(label string, context []byte, length int) ([]byte, error) { - return cs.ekm(label, context, length) -} - -// ClientAuthType is tls.ClientAuthType -type ClientAuthType = tls.ClientAuthType - -const ( - NoClientCert ClientAuthType = iota - RequestClientCert - RequireAnyClientCert - VerifyClientCertIfGiven - RequireAndVerifyClientCert -) - -// requiresClientCert reports whether the ClientAuthType requires a client -// certificate to be provided. -func requiresClientCert(c ClientAuthType) bool { - switch c { - case RequireAnyClientCert, RequireAndVerifyClientCert: - return true - default: - return false - } -} - -// ClientSessionState contains the state needed by clients to resume TLS -// sessions. -type ClientSessionState struct { - sessionTicket []uint8 // Encrypted ticket used for session resumption with server - vers uint16 // SSL/TLS version negotiated for the session - cipherSuite uint16 // Ciphersuite negotiated for the session - masterSecret []byte // Full handshake MasterSecret, or TLS 1.3 resumption_master_secret - serverCertificates []*x509.Certificate // Certificate chain presented by the server - verifiedChains [][]*x509.Certificate // Certificate chains we built for verification - receivedAt time.Time // When the session ticket was received from the server - - // TLS 1.3 fields. - nonce []byte // Ticket nonce sent by the server, to derive PSK - useBy time.Time // Expiration of the ticket lifetime as set by the server - ageAdd uint32 // Random obfuscation factor for sending the ticket age -} - -// ClientSessionCache is a cache of ClientSessionState objects that can be used -// by a client to resume a TLS session with a given server. ClientSessionCache -// implementations should expect to be called concurrently from different -// goroutines. Up to TLS 1.2, only ticket-based resumption is supported, not -// SessionID-based resumption. In TLS 1.3 they were merged into PSK modes, which -// are supported via this interface. -type ClientSessionCache interface { - // Get searches for a ClientSessionState associated with the given key. - // On return, ok is true if one was found. - Get(sessionKey string) (session *ClientSessionState, ok bool) - - // Put adds the ClientSessionState to the cache with the given key. It might - // get called multiple times in a connection if a TLS 1.3 server provides - // more than one session ticket. If called with a nil *ClientSessionState, - // it should remove the cache entry. - Put(sessionKey string, cs *ClientSessionState) -} - -// SignatureScheme is a tls.SignatureScheme -type SignatureScheme = tls.SignatureScheme - -const ( - // RSASSA-PKCS1-v1_5 algorithms. - PKCS1WithSHA256 SignatureScheme = 0x0401 - PKCS1WithSHA384 SignatureScheme = 0x0501 - PKCS1WithSHA512 SignatureScheme = 0x0601 - - // RSASSA-PSS algorithms with public key OID rsaEncryption. - PSSWithSHA256 SignatureScheme = 0x0804 - PSSWithSHA384 SignatureScheme = 0x0805 - PSSWithSHA512 SignatureScheme = 0x0806 - - // ECDSA algorithms. Only constrained to a specific curve in TLS 1.3. - ECDSAWithP256AndSHA256 SignatureScheme = 0x0403 - ECDSAWithP384AndSHA384 SignatureScheme = 0x0503 - ECDSAWithP521AndSHA512 SignatureScheme = 0x0603 - - // Legacy signature and hash algorithms for TLS 1.2. - PKCS1WithSHA1 SignatureScheme = 0x0201 - ECDSAWithSHA1 SignatureScheme = 0x0203 -) - -// A ClientHelloInfo is a tls.ClientHelloInfo -type ClientHelloInfo = tls.ClientHelloInfo - -// The CertificateRequestInfo is a tls.CertificateRequestInfo -type CertificateRequestInfo = tls.CertificateRequestInfo - -// RenegotiationSupport enumerates the different levels of support for TLS -// renegotiation. TLS renegotiation is the act of performing subsequent -// handshakes on a connection after the first. This significantly complicates -// the state machine and has been the source of numerous, subtle security -// issues. Initiating a renegotiation is not supported, but support for -// accepting renegotiation requests may be enabled. -// -// Even when enabled, the server may not change its identity between handshakes -// (i.e. the leaf certificate must be the same). Additionally, concurrent -// handshake and application data flow is not permitted so renegotiation can -// only be used with protocols that synchronise with the renegotiation, such as -// HTTPS. -// -// Renegotiation is not defined in TLS 1.3. -type RenegotiationSupport int - -const ( - // RenegotiateNever disables renegotiation. - RenegotiateNever RenegotiationSupport = iota - - // RenegotiateOnceAsClient allows a remote server to request - // renegotiation once per connection. - RenegotiateOnceAsClient - - // RenegotiateFreelyAsClient allows a remote server to repeatedly - // request renegotiation. - RenegotiateFreelyAsClient -) - -// A Config structure is used to configure a TLS client or server. -// After one has been passed to a TLS function it must not be -// modified. A Config may be reused; the tls package will also not -// modify it. -type Config struct { - // Rand provides the source of entropy for nonces and RSA blinding. - // If Rand is nil, TLS uses the cryptographic random reader in package - // crypto/rand. - // The Reader must be safe for use by multiple goroutines. - Rand io.Reader - - // Time returns the current time as the number of seconds since the epoch. - // If Time is nil, TLS uses time.Now. - Time func() time.Time - - // Certificates contains one or more certificate chains to present to - // the other side of the connection. Server configurations must include - // at least one certificate or else set GetCertificate. Clients doing - // client-authentication may set either Certificates or - // GetClientCertificate. - Certificates []Certificate - - // NameToCertificate maps from a certificate name to an element of - // Certificates. Note that a certificate name can be of the form - // '*.example.com' and so doesn't have to be a domain name as such. - // See Config.BuildNameToCertificate - // The nil value causes the first element of Certificates to be used - // for all connections. - NameToCertificate map[string]*Certificate - - // GetCertificate returns a Certificate based on the given - // ClientHelloInfo. It will only be called if the client supplies SNI - // information or if Certificates is empty. - // - // If GetCertificate is nil or returns nil, then the certificate is - // retrieved from NameToCertificate. If NameToCertificate is nil, the - // first element of Certificates will be used. - GetCertificate func(*ClientHelloInfo) (*Certificate, error) - - // GetClientCertificate, if not nil, is called when a server requests a - // certificate from a client. If set, the contents of Certificates will - // be ignored. - // - // If GetClientCertificate returns an error, the handshake will be - // aborted and that error will be returned. Otherwise - // GetClientCertificate must return a non-nil Certificate. If - // Certificate.Certificate is empty then no certificate will be sent to - // the server. If this is unacceptable to the server then it may abort - // the handshake. - // - // GetClientCertificate may be called multiple times for the same - // connection if renegotiation occurs or if TLS 1.3 is in use. - GetClientCertificate func(*CertificateRequestInfo) (*Certificate, error) - - // GetConfigForClient, if not nil, is called after a ClientHello is - // received from a client. It may return a non-nil Config in order to - // change the Config that will be used to handle this connection. If - // the returned Config is nil, the original Config will be used. The - // Config returned by this callback may not be subsequently modified. - // - // If GetConfigForClient is nil, the Config passed to Server() will be - // used for all connections. - // - // Uniquely for the fields in the returned Config, session ticket keys - // will be duplicated from the original Config if not set. - // Specifically, if SetSessionTicketKeys was called on the original - // config but not on the returned config then the ticket keys from the - // original config will be copied into the new config before use. - // Otherwise, if SessionTicketKey was set in the original config but - // not in the returned config then it will be copied into the returned - // config before use. If neither of those cases applies then the key - // material from the returned config will be used for session tickets. - GetConfigForClient func(*ClientHelloInfo) (*Config, error) - - // VerifyPeerCertificate, if not nil, is called after normal - // certificate verification by either a TLS client or server. It - // receives the raw ASN.1 certificates provided by the peer and also - // any verified chains that normal processing found. If it returns a - // non-nil error, the handshake is aborted and that error results. - // - // If normal verification fails then the handshake will abort before - // considering this callback. If normal verification is disabled by - // setting InsecureSkipVerify, or (for a server) when ClientAuth is - // RequestClientCert or RequireAnyClientCert, then this callback will - // be considered but the verifiedChains argument will always be nil. - VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error - - // RootCAs defines the set of root certificate authorities - // that clients use when verifying server certificates. - // If RootCAs is nil, TLS uses the host's root CA set. - RootCAs *x509.CertPool - - // NextProtos is a list of supported application level protocols, in - // order of preference. - NextProtos []string - - // ServerName is used to verify the hostname on the returned - // certificates unless InsecureSkipVerify is given. It is also included - // in the client's handshake to support virtual hosting unless it is - // an IP address. - ServerName string - - // ClientAuth determines the server's policy for - // TLS Client Authentication. The default is NoClientCert. - ClientAuth ClientAuthType - - // ClientCAs defines the set of root certificate authorities - // that servers use if required to verify a client certificate - // by the policy in ClientAuth. - ClientCAs *x509.CertPool - - // InsecureSkipVerify controls whether a client verifies the - // server's certificate chain and host name. - // If InsecureSkipVerify is true, TLS accepts any certificate - // presented by the server and any host name in that certificate. - // In this mode, TLS is susceptible to man-in-the-middle attacks. - // This should be used only for testing. - InsecureSkipVerify bool - - // CipherSuites is a list of supported cipher suites for TLS versions up to - // TLS 1.2. If CipherSuites is nil, a default list of secure cipher suites - // is used, with a preference order based on hardware performance. The - // default cipher suites might change over Go versions. Note that TLS 1.3 - // ciphersuites are not configurable. - CipherSuites []uint16 - - // PreferServerCipherSuites controls whether the server selects the - // client's most preferred ciphersuite, or the server's most preferred - // ciphersuite. If true then the server's preference, as expressed in - // the order of elements in CipherSuites, is used. - PreferServerCipherSuites bool - - // SessionTicketsDisabled may be set to true to disable session ticket and - // PSK (resumption) support. Note that on clients, session ticket support is - // also disabled if ClientSessionCache is nil. - SessionTicketsDisabled bool - - // SessionTicketKey is used by TLS servers to provide session resumption. - // See RFC 5077 and the PSK mode of RFC 8446. If zero, it will be filled - // with random data before the first server handshake. - // - // If multiple servers are terminating connections for the same host - // they should all have the same SessionTicketKey. If the - // SessionTicketKey leaks, previously recorded and future TLS - // connections using that key might be compromised. - SessionTicketKey [32]byte - - // ClientSessionCache is a cache of ClientSessionState entries for TLS - // session resumption. It is only used by clients. - ClientSessionCache ClientSessionCache - - // MinVersion contains the minimum SSL/TLS version that is acceptable. - // If zero, then TLS 1.0 is taken as the minimum. - MinVersion uint16 - - // MaxVersion contains the maximum SSL/TLS version that is acceptable. - // If zero, then the maximum version supported by this package is used, - // which is currently TLS 1.3. - MaxVersion uint16 - - // CurvePreferences contains the elliptic curves that will be used in - // an ECDHE handshake, in preference order. If empty, the default will - // be used. The client will use the first preference as the type for - // its key share in TLS 1.3. This may change in the future. - CurvePreferences []CurveID - - // DynamicRecordSizingDisabled disables adaptive sizing of TLS records. - // When true, the largest possible TLS record size is always used. When - // false, the size of TLS records may be adjusted in an attempt to - // improve latency. - DynamicRecordSizingDisabled bool - - // Renegotiation controls what types of renegotiation are supported. - // The default, none, is correct for the vast majority of applications. - Renegotiation RenegotiationSupport - - // KeyLogWriter optionally specifies a destination for TLS master secrets - // in NSS key log format that can be used to allow external programs - // such as Wireshark to decrypt TLS connections. - // See https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format. - // Use of KeyLogWriter compromises security and should only be - // used for debugging. - KeyLogWriter io.Writer - - // GetExtensions, if not nil, is called before a message that allows - // sending of extensions is sent. - // Currently only implemented for the ClientHello message (for the client) - // and for the EncryptedExtensions message (for the server). - // Only valid for TLS 1.3. - GetExtensions func(handshakeMessageType uint8) []Extension - - // ReceivedExtensions, if not nil, is called when a message that allows the - // inclusion of extensions is received. - // It is called with an empty slice of extensions, if the message didn't - // contain any extensions. - // Currently only implemented for the ClientHello message (sent by the - // client) and for the EncryptedExtensions message (sent by the server). - // Only valid for TLS 1.3. - ReceivedExtensions func(handshakeMessageType uint8, exts []Extension) - - serverInitOnce sync.Once // guards calling (*Config).serverInit - - // mutex protects sessionTicketKeys. - mutex sync.RWMutex - // sessionTicketKeys contains zero or more ticket keys. If the length - // is zero, SessionTicketsDisabled must be true. The first key is used - // for new tickets and any subsequent keys can be used to decrypt old - // tickets. - sessionTicketKeys []ticketKey - - // AlternativeRecordLayer is used by QUIC - AlternativeRecordLayer RecordLayer -} - -type RecordLayer interface { - SetReadKey(suite *CipherSuite, trafficSecret []byte) - SetWriteKey(suite *CipherSuite, trafficSecret []byte) - ReadHandshakeMessage() ([]byte, error) - WriteRecord([]byte) (int, error) - SendAlert(uint8) -} - -// ticketKeyNameLen is the number of bytes of identifier that is prepended to -// an encrypted session ticket in order to identify the key used to encrypt it. -const ticketKeyNameLen = 16 - -// ticketKey is the internal representation of a session ticket key. -type ticketKey struct { - // keyName is an opaque byte string that serves to identify the session - // ticket key. It's exposed as plaintext in every session ticket. - keyName [ticketKeyNameLen]byte - aesKey [16]byte - hmacKey [16]byte -} - -// ticketKeyFromBytes converts from the external representation of a session -// ticket key to a ticketKey. Externally, session ticket keys are 32 random -// bytes and this function expands that into sufficient name and key material. -func ticketKeyFromBytes(b [32]byte) (key ticketKey) { - hashed := sha512.Sum512(b[:]) - copy(key.keyName[:], hashed[:ticketKeyNameLen]) - copy(key.aesKey[:], hashed[ticketKeyNameLen:ticketKeyNameLen+16]) - copy(key.hmacKey[:], hashed[ticketKeyNameLen+16:ticketKeyNameLen+32]) - return key -} - -// maxSessionTicketLifetime is the maximum allowed lifetime of a TLS 1.3 session -// ticket, and the lifetime we set for tickets we send. -const maxSessionTicketLifetime = 7 * 24 * time.Hour - -// Clone returns a shallow clone of c. It is safe to clone a Config that is -// being used concurrently by a TLS client or server. -func (c *Config) Clone() *Config { - // Running serverInit ensures that it's safe to read - // SessionTicketsDisabled. - c.serverInitOnce.Do(func() { c.serverInit(nil) }) - - var sessionTicketKeys []ticketKey - c.mutex.RLock() - sessionTicketKeys = c.sessionTicketKeys - c.mutex.RUnlock() - - return &Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - GetClientCertificate: c.GetClientCertificate, - GetConfigForClient: c.GetConfigForClient, - VerifyPeerCertificate: c.VerifyPeerCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - KeyLogWriter: c.KeyLogWriter, - GetExtensions: c.GetExtensions, - ReceivedExtensions: c.ReceivedExtensions, - sessionTicketKeys: sessionTicketKeys, - } -} - -// serverInit is run under c.serverInitOnce to do initialization of c. If c was -// returned by a GetConfigForClient callback then the argument should be the -// Config that was passed to Server, otherwise it should be nil. -func (c *Config) serverInit(originalConfig *Config) { - if c.SessionTicketsDisabled || len(c.ticketKeys()) != 0 { - return - } - - alreadySet := false - for _, b := range c.SessionTicketKey { - if b != 0 { - alreadySet = true - break - } - } - - if !alreadySet { - if originalConfig != nil { - copy(c.SessionTicketKey[:], originalConfig.SessionTicketKey[:]) - } else if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil { - c.SessionTicketsDisabled = true - return - } - } - - if originalConfig != nil { - originalConfig.mutex.RLock() - c.sessionTicketKeys = originalConfig.sessionTicketKeys - originalConfig.mutex.RUnlock() - } else { - c.sessionTicketKeys = []ticketKey{ticketKeyFromBytes(c.SessionTicketKey)} - } -} - -func (c *Config) ticketKeys() []ticketKey { - c.mutex.RLock() - // c.sessionTicketKeys is constant once created. SetSessionTicketKeys - // will only update it by replacing it with a new value. - ret := c.sessionTicketKeys - c.mutex.RUnlock() - return ret -} - -// SetSessionTicketKeys updates the session ticket keys for a server. The first -// key will be used when creating new tickets, while all keys can be used for -// decrypting tickets. It is safe to call this function while the server is -// running in order to rotate the session ticket keys. The function will panic -// if keys is empty. -func (c *Config) SetSessionTicketKeys(keys [][32]byte) { - if len(keys) == 0 { - panic("tls: keys must have at least one key") - } - - newKeys := make([]ticketKey, len(keys)) - for i, bytes := range keys { - newKeys[i] = ticketKeyFromBytes(bytes) - } - - c.mutex.Lock() - c.sessionTicketKeys = newKeys - c.mutex.Unlock() -} - -func (c *Config) rand() io.Reader { - r := c.Rand - if r == nil { - return rand.Reader - } - return r -} - -func (c *Config) time() time.Time { - t := c.Time - if t == nil { - t = time.Now - } - return t() -} - -func (c *Config) cipherSuites() []uint16 { - s := c.CipherSuites - if s == nil { - s = defaultCipherSuites() - } - return s -} - -var supportedVersions = []uint16{ - VersionTLS13, - VersionTLS12, - VersionTLS11, - VersionTLS10, - VersionSSL30, -} - -func (c *Config) supportedVersions(isClient bool) []uint16 { - versions := make([]uint16, 0, len(supportedVersions)) - for _, v := range supportedVersions { - if c != nil && c.MinVersion != 0 && v < c.MinVersion { - continue - } - if c != nil && c.MaxVersion != 0 && v > c.MaxVersion { - continue - } - // TLS 1.0 is the minimum version supported as a client. - if isClient && v < VersionTLS10 { - continue - } - // TLS 1.3 is opt-in in Go 1.12. - if v == VersionTLS13 && !isTLS13Supported() { - continue - } - versions = append(versions, v) - } - return versions -} - -// tls13Support caches the result for isTLS13Supported. -var tls13Support struct { - sync.Once - cached bool -} - -// isTLS13Supported returns whether the program opted into TLS 1.3 via -// GODEBUG=tls13=1. It's cached after the first execution. -func isTLS13Supported() bool { - return true - tls13Support.Do(func() { - tls13Support.cached = goDebugString("tls13") == "1" - }) - return tls13Support.cached -} - -// goDebugString returns the value of the named GODEBUG key. -// GODEBUG is of the form "key=val,key2=val2". -func goDebugString(key string) string { - s := os.Getenv("GODEBUG") - for i := 0; i < len(s)-len(key)-1; i++ { - if i > 0 && s[i-1] != ',' { - continue - } - afterKey := s[i+len(key):] - if afterKey[0] != '=' || s[i:i+len(key)] != key { - continue - } - val := afterKey[1:] - for i, b := range val { - if b == ',' { - return val[:i] - } - } - return val - } - return "" -} - -func (c *Config) maxSupportedVersion(isClient bool) uint16 { - supportedVersions := c.supportedVersions(isClient) - if len(supportedVersions) == 0 { - return 0 - } - return supportedVersions[0] -} - -// supportedVersionsFromMax returns a list of supported versions derived from a -// legacy maximum version value. Note that only versions supported by this -// library are returned. Any newer peer will use supportedVersions anyway. -func supportedVersionsFromMax(maxVersion uint16) []uint16 { - versions := make([]uint16, 0, len(supportedVersions)) - for _, v := range supportedVersions { - if v > maxVersion { - continue - } - versions = append(versions, v) - } - return versions -} - -var defaultCurvePreferences = []CurveID{X25519, CurveP256, CurveP384, CurveP521} - -func (c *Config) curvePreferences() []CurveID { - if c == nil || len(c.CurvePreferences) == 0 { - return defaultCurvePreferences - } - return c.CurvePreferences -} - -// mutualVersion returns the protocol version to use given the advertised -// versions of the peer. Priority is given to the peer preference order. -func (c *Config) mutualVersion(isClient bool, peerVersions []uint16) (uint16, bool) { - supportedVersions := c.supportedVersions(isClient) - for _, peerVersion := range peerVersions { - for _, v := range supportedVersions { - if v == peerVersion { - return v, true - } - } - } - return 0, false -} - -// getCertificate returns the best certificate for the given ClientHelloInfo, -// defaulting to the first element of c.Certificates. -func (c *Config) getCertificate(clientHello *ClientHelloInfo) (*Certificate, error) { - if c.GetCertificate != nil && - (len(c.Certificates) == 0 || len(clientHello.ServerName) > 0) { - cert, err := c.GetCertificate(clientHello) - if cert != nil || err != nil { - return cert, err - } - } - - if len(c.Certificates) == 0 { - return nil, errors.New("tls: no certificates configured") - } - - if len(c.Certificates) == 1 || c.NameToCertificate == nil { - // There's only one choice, so no point doing any work. - return &c.Certificates[0], nil - } - - name := strings.ToLower(clientHello.ServerName) - for len(name) > 0 && name[len(name)-1] == '.' { - name = name[:len(name)-1] - } - - if cert, ok := c.NameToCertificate[name]; ok { - return cert, nil - } - - // try replacing labels in the name with wildcards until we get a - // match. - labels := strings.Split(name, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - if cert, ok := c.NameToCertificate[candidate]; ok { - return cert, nil - } - } - - // If nothing matches, return the first certificate. - return &c.Certificates[0], nil -} - -// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate -// from the CommonName and SubjectAlternateName fields of each of the leaf -// certificates. -func (c *Config) BuildNameToCertificate() { - c.NameToCertificate = make(map[string]*Certificate) - for i := range c.Certificates { - cert := &c.Certificates[i] - x509Cert := cert.Leaf - if x509Cert == nil { - var err error - x509Cert, err = x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - continue - } - } - if len(x509Cert.Subject.CommonName) > 0 { - c.NameToCertificate[x509Cert.Subject.CommonName] = cert - } - for _, san := range x509Cert.DNSNames { - c.NameToCertificate[san] = cert - } - } -} - -const ( - keyLogLabelTLS12 = "CLIENT_RANDOM" - keyLogLabelClientHandshake = "CLIENT_HANDSHAKE_TRAFFIC_SECRET" - keyLogLabelServerHandshake = "SERVER_HANDSHAKE_TRAFFIC_SECRET" - keyLogLabelClientTraffic = "CLIENT_TRAFFIC_SECRET_0" - keyLogLabelServerTraffic = "SERVER_TRAFFIC_SECRET_0" -) - -func (c *Config) writeKeyLog(label string, clientRandom, secret []byte) error { - if c.KeyLogWriter == nil { - return nil - } - - logLine := []byte(fmt.Sprintf("%s %x %x\n", label, clientRandom, secret)) - - writerMutex.Lock() - _, err := c.KeyLogWriter.Write(logLine) - writerMutex.Unlock() - - return err -} - -// writerMutex protects all KeyLogWriters globally. It is rarely enabled, -// and is only for debugging, so a global mutex saves space. -var writerMutex sync.Mutex - -// A Certificate is a tls.Certificate -type Certificate = tls.Certificate - -type handshakeMessage interface { - marshal() []byte - unmarshal([]byte) bool -} - -// lruSessionCache is a ClientSessionCache implementation that uses an LRU -// caching strategy. -type lruSessionCache struct { - sync.Mutex - - m map[string]*list.Element - q *list.List - capacity int -} - -type lruSessionCacheEntry struct { - sessionKey string - state *ClientSessionState -} - -// NewLRUClientSessionCache returns a ClientSessionCache with the given -// capacity that uses an LRU strategy. If capacity is < 1, a default capacity -// is used instead. -func NewLRUClientSessionCache(capacity int) ClientSessionCache { - const defaultSessionCacheCapacity = 64 - - if capacity < 1 { - capacity = defaultSessionCacheCapacity - } - return &lruSessionCache{ - m: make(map[string]*list.Element), - q: list.New(), - capacity: capacity, - } -} - -// Put adds the provided (sessionKey, cs) pair to the cache. If cs is nil, the entry -// corresponding to sessionKey is removed from the cache instead. -func (c *lruSessionCache) Put(sessionKey string, cs *ClientSessionState) { - c.Lock() - defer c.Unlock() - - if elem, ok := c.m[sessionKey]; ok { - if cs == nil { - c.q.Remove(elem) - delete(c.m, sessionKey) - } else { - entry := elem.Value.(*lruSessionCacheEntry) - entry.state = cs - c.q.MoveToFront(elem) - } - return - } - - if c.q.Len() < c.capacity { - entry := &lruSessionCacheEntry{sessionKey, cs} - c.m[sessionKey] = c.q.PushFront(entry) - return - } - - elem := c.q.Back() - entry := elem.Value.(*lruSessionCacheEntry) - delete(c.m, entry.sessionKey) - entry.sessionKey = sessionKey - entry.state = cs - c.q.MoveToFront(elem) - c.m[sessionKey] = elem -} - -// Get returns the ClientSessionState value associated with a given key. It -// returns (nil, false) if no value is found. -func (c *lruSessionCache) Get(sessionKey string) (*ClientSessionState, bool) { - c.Lock() - defer c.Unlock() - - if elem, ok := c.m[sessionKey]; ok { - c.q.MoveToFront(elem) - return elem.Value.(*lruSessionCacheEntry).state, true - } - return nil, false -} - -// TODO(jsing): Make these available to both crypto/x509 and crypto/tls. -type dsaSignature struct { - R, S *big.Int -} - -type ecdsaSignature dsaSignature - -var emptyConfig Config - -func defaultConfig() *Config { - return &emptyConfig -} - -var ( - once sync.Once - varDefaultCipherSuites []uint16 - varDefaultCipherSuitesTLS13 []uint16 -) - -func defaultCipherSuites() []uint16 { - once.Do(initDefaultCipherSuites) - return varDefaultCipherSuites -} - -func defaultCipherSuitesTLS13() []uint16 { - once.Do(initDefaultCipherSuites) - return varDefaultCipherSuitesTLS13 -} - -func initDefaultCipherSuites() { - var topCipherSuites []uint16 - - // Check the cpu flags for each platform that has optimized GCM implementations. - // Worst case, these variables will just all be false. - var ( - hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ - hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL - // Keep in sync with crypto/aes/cipher_s390x.go. - // TODO: check for s390 - // hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM) - hasGCMAsmS390X = false - - hasGCMAsm = hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X - ) - - if hasGCMAsm { - // If AES-GCM hardware is provided then prioritise AES-GCM - // cipher suites. - topCipherSuites = []uint16{ - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - } - varDefaultCipherSuitesTLS13 = []uint16{ - TLS_AES_128_GCM_SHA256, - TLS_CHACHA20_POLY1305_SHA256, - TLS_AES_256_GCM_SHA384, - } - } else { - // Without AES-GCM hardware, we put the ChaCha20-Poly1305 - // cipher suites first. - topCipherSuites = []uint16{ - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - } - varDefaultCipherSuitesTLS13 = []uint16{ - TLS_CHACHA20_POLY1305_SHA256, - TLS_AES_128_GCM_SHA256, - TLS_AES_256_GCM_SHA384, - } - } - - varDefaultCipherSuites = make([]uint16, 0, len(cipherSuites)) - varDefaultCipherSuites = append(varDefaultCipherSuites, topCipherSuites...) - -NextCipherSuite: - for _, suite := range cipherSuites { - if suite.flags&suiteDefaultOff != 0 { - continue - } - for _, existing := range varDefaultCipherSuites { - if existing == suite.id { - continue NextCipherSuite - } - } - varDefaultCipherSuites = append(varDefaultCipherSuites, suite.id) - } -} - -func unexpectedMessageError(wanted, got interface{}) error { - return fmt.Errorf("tls: received unexpected handshake message of type %T when waiting for %T", got, wanted) -} - -func isSupportedSignatureAlgorithm(sigAlg SignatureScheme, supportedSignatureAlgorithms []SignatureScheme) bool { - for _, s := range supportedSignatureAlgorithms { - if s == sigAlg { - return true - } - } - return false -} - -// signatureFromSignatureScheme maps a signature algorithm to the underlying -// signature method (without hash function). -func signatureFromSignatureScheme(signatureAlgorithm SignatureScheme) uint8 { - switch signatureAlgorithm { - case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512: - return signaturePKCS1v15 - case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512: - return signatureRSAPSS - case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512: - return signatureECDSA - default: - return 0 - } -} diff --git a/vendor/github.com/marten-seemann/qtls/conn.go b/vendor/github.com/marten-seemann/qtls/conn.go deleted file mode 100644 index bbb67e563..000000000 --- a/vendor/github.com/marten-seemann/qtls/conn.go +++ /dev/null @@ -1,1467 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TLS low level connection and record layer - -package qtls - -import ( - "bytes" - "crypto/cipher" - "crypto/subtle" - "crypto/x509" - "errors" - "fmt" - "io" - "net" - "sync" - "sync/atomic" - "time" -) - -// A Conn represents a secured connection. -// It implements the net.Conn interface. -type Conn struct { - // constant - conn net.Conn - isClient bool - - // handshakeStatus is 1 if the connection is currently transferring - // application data (i.e. is not currently processing a handshake). - // This field is only to be accessed with sync/atomic. - handshakeStatus uint32 - // constant after handshake; protected by handshakeMutex - handshakeMutex sync.Mutex - handshakeErr error // error resulting from handshake - vers uint16 // TLS version - haveVers bool // version has been negotiated - config *Config // configuration passed to constructor - // handshakes counts the number of handshakes performed on the - // connection so far. If renegotiation is disabled then this is either - // zero or one. - handshakes int - didResume bool // whether this connection was a session resumption - cipherSuite uint16 - ocspResponse []byte // stapled OCSP response - scts [][]byte // signed certificate timestamps from server - peerCertificates []*x509.Certificate - // verifiedChains contains the certificate chains that we built, as - // opposed to the ones presented by the server. - verifiedChains [][]*x509.Certificate - // serverName contains the server name indicated by the client, if any. - serverName string - // secureRenegotiation is true if the server echoed the secure - // renegotiation extension. (This is meaningless as a server because - // renegotiation is not supported in that case.) - secureRenegotiation bool - // ekm is a closure for exporting keying material. - ekm func(label string, context []byte, length int) ([]byte, error) - // resumptionSecret is the resumption_master_secret for handling - // NewSessionTicket messages. nil if config.SessionTicketsDisabled. - resumptionSecret []byte - - // clientFinishedIsFirst is true if the client sent the first Finished - // message during the most recent handshake. This is recorded because - // the first transmitted Finished message is the tls-unique - // channel-binding value. - clientFinishedIsFirst bool - - // closeNotifyErr is any error from sending the alertCloseNotify record. - closeNotifyErr error - // closeNotifySent is true if the Conn attempted to send an - // alertCloseNotify record. - closeNotifySent bool - - // clientFinished and serverFinished contain the Finished message sent - // by the client or server in the most recent handshake. This is - // retained to support the renegotiation extension and tls-unique - // channel-binding. - clientFinished [12]byte - serverFinished [12]byte - - clientProtocol string - clientProtocolFallback bool - - // input/output - in, out halfConn - rawInput bytes.Buffer // raw input, starting with a record header - input bytes.Reader // application data waiting to be read, from rawInput.Next - hand bytes.Buffer // handshake data waiting to be read - outBuf []byte // scratch buffer used by out.encrypt - buffering bool // whether records are buffered in sendBuf - sendBuf []byte // a buffer of records waiting to be sent - - // bytesSent counts the bytes of application data sent. - // packetsSent counts packets. - bytesSent int64 - packetsSent int64 - - // retryCount counts the number of consecutive non-advancing records - // received by Conn.readRecord. That is, records that neither advance the - // handshake, nor deliver application data. Protected by in.Mutex. - retryCount int - - // activeCall is an atomic int32; the low bit is whether Close has - // been called. the rest of the bits are the number of goroutines - // in Conn.Write. - activeCall int32 - - tmp [16]byte -} - -// Access to net.Conn methods. -// Cannot just embed net.Conn because that would -// export the struct field too. - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// SetDeadline sets the read and write deadlines associated with the connection. -// A zero value for t means Read and Write will not time out. -// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. -func (c *Conn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -// SetReadDeadline sets the read deadline on the underlying connection. -// A zero value for t means Read will not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetWriteDeadline sets the write deadline on the underlying connection. -// A zero value for t means Write will not time out. -// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. -func (c *Conn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -// A halfConn represents one direction of the record layer -// connection, either sending or receiving. -type halfConn struct { - sync.Mutex - - err error // first permanent error - version uint16 // protocol version - cipher interface{} // cipher algorithm - mac macFunction - seq [8]byte // 64-bit sequence number - additionalData [13]byte // to avoid allocs; interface method args escape - - nextCipher interface{} // next encryption state - nextMac macFunction // next MAC algorithm - - trafficSecret []byte // current TLS 1.3 traffic secret - - setKeyCallback func(suite *CipherSuite, trafficSecret []byte) -} - -func (hc *halfConn) setErrorLocked(err error) error { - hc.err = err - return err -} - -// prepareCipherSpec sets the encryption and MAC states -// that a subsequent changeCipherSpec will use. -func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac macFunction) { - hc.version = version - hc.nextCipher = cipher - hc.nextMac = mac -} - -// changeCipherSpec changes the encryption and MAC states -// to the ones previously passed to prepareCipherSpec. -func (hc *halfConn) changeCipherSpec() error { - if hc.nextCipher == nil || hc.version == VersionTLS13 { - return alertInternalError - } - hc.cipher = hc.nextCipher - hc.mac = hc.nextMac - hc.nextCipher = nil - hc.nextMac = nil - for i := range hc.seq { - hc.seq[i] = 0 - } - return nil -} - -func (hc *halfConn) exportKey(suite *cipherSuiteTLS13, trafficSecret []byte) { - if hc.setKeyCallback != nil { - hc.setKeyCallback(&CipherSuite{suite}, trafficSecret) - } -} - -func (hc *halfConn) setTrafficSecret(suite *cipherSuiteTLS13, secret []byte) { - hc.trafficSecret = secret - key, iv := suite.trafficKey(secret) - hc.cipher = suite.aead(key, iv) - for i := range hc.seq { - hc.seq[i] = 0 - } -} - -// incSeq increments the sequence number. -func (hc *halfConn) incSeq() { - for i := 7; i >= 0; i-- { - hc.seq[i]++ - if hc.seq[i] != 0 { - return - } - } - - // Not allowed to let sequence number wrap. - // Instead, must renegotiate before it does. - // Not likely enough to bother. - panic("TLS: sequence number wraparound") -} - -// explicitNonceLen returns the number of bytes of explicit nonce or IV included -// in each record. Explicit nonces are present only in CBC modes after TLS 1.0 -// and in certain AEAD modes in TLS 1.2. -func (hc *halfConn) explicitNonceLen() int { - if hc.cipher == nil { - return 0 - } - - switch c := hc.cipher.(type) { - case cipher.Stream: - return 0 - case aead: - return c.explicitNonceLen() - case cbcMode: - // TLS 1.1 introduced a per-record explicit IV to fix the BEAST attack. - if hc.version >= VersionTLS11 { - return c.BlockSize() - } - return 0 - default: - panic("unknown cipher type") - } -} - -// extractPadding returns, in constant time, the length of the padding to remove -// from the end of payload. It also returns a byte which is equal to 255 if the -// padding was valid and 0 otherwise. See RFC 2246, Section 6.2.3.2. -func extractPadding(payload []byte) (toRemove int, good byte) { - if len(payload) < 1 { - return 0, 0 - } - - paddingLen := payload[len(payload)-1] - t := uint(len(payload)-1) - uint(paddingLen) - // if len(payload) >= (paddingLen - 1) then the MSB of t is zero - good = byte(int32(^t) >> 31) - - // The maximum possible padding length plus the actual length field - toCheck := 256 - // The length of the padded data is public, so we can use an if here - if toCheck > len(payload) { - toCheck = len(payload) - } - - for i := 0; i < toCheck; i++ { - t := uint(paddingLen) - uint(i) - // if i <= paddingLen then the MSB of t is zero - mask := byte(int32(^t) >> 31) - b := payload[len(payload)-1-i] - good &^= mask&paddingLen ^ mask&b - } - - // We AND together the bits of good and replicate the result across - // all the bits. - good &= good << 4 - good &= good << 2 - good &= good << 1 - good = uint8(int8(good) >> 7) - - toRemove = int(paddingLen) + 1 - return -} - -// extractPaddingSSL30 is a replacement for extractPadding in the case that the -// protocol version is SSLv3. In this version, the contents of the padding -// are random and cannot be checked. -func extractPaddingSSL30(payload []byte) (toRemove int, good byte) { - if len(payload) < 1 { - return 0, 0 - } - - paddingLen := int(payload[len(payload)-1]) + 1 - if paddingLen > len(payload) { - return 0, 0 - } - - return paddingLen, 255 -} - -func roundUp(a, b int) int { - return a + (b-a%b)%b -} - -// cbcMode is an interface for block ciphers using cipher block chaining. -type cbcMode interface { - cipher.BlockMode - SetIV([]byte) -} - -// decrypt authenticates and decrypts the record if protection is active at -// this stage. The returned plaintext might overlap with the input. -func (hc *halfConn) decrypt(record []byte) ([]byte, recordType, error) { - var plaintext []byte - typ := recordType(record[0]) - payload := record[recordHeaderLen:] - - // In TLS 1.3, change_cipher_spec messages are to be ignored without being - // decrypted. See RFC 8446, Appendix D.4. - if hc.version == VersionTLS13 && typ == recordTypeChangeCipherSpec { - return payload, typ, nil - } - - paddingGood := byte(255) - paddingLen := 0 - - explicitNonceLen := hc.explicitNonceLen() - - if hc.cipher != nil { - switch c := hc.cipher.(type) { - case cipher.Stream: - c.XORKeyStream(payload, payload) - case aead: - if len(payload) < explicitNonceLen { - return nil, 0, alertBadRecordMAC - } - nonce := payload[:explicitNonceLen] - if len(nonce) == 0 { - nonce = hc.seq[:] - } - payload = payload[explicitNonceLen:] - - additionalData := hc.additionalData[:] - if hc.version == VersionTLS13 { - additionalData = record[:recordHeaderLen] - } else { - copy(additionalData, hc.seq[:]) - copy(additionalData[8:], record[:3]) - n := len(payload) - c.Overhead() - additionalData[11] = byte(n >> 8) - additionalData[12] = byte(n) - } - - var err error - plaintext, err = c.Open(payload[:0], nonce, payload, additionalData) - if err != nil { - return nil, 0, alertBadRecordMAC - } - case cbcMode: - blockSize := c.BlockSize() - minPayload := explicitNonceLen + roundUp(hc.mac.Size()+1, blockSize) - if len(payload)%blockSize != 0 || len(payload) < minPayload { - return nil, 0, alertBadRecordMAC - } - - if explicitNonceLen > 0 { - c.SetIV(payload[:explicitNonceLen]) - payload = payload[explicitNonceLen:] - } - c.CryptBlocks(payload, payload) - - // In a limited attempt to protect against CBC padding oracles like - // Lucky13, the data past paddingLen (which is secret) is passed to - // the MAC function as extra data, to be fed into the HMAC after - // computing the digest. This makes the MAC roughly constant time as - // long as the digest computation is constant time and does not - // affect the subsequent write, modulo cache effects. - if hc.version == VersionSSL30 { - paddingLen, paddingGood = extractPaddingSSL30(payload) - } else { - paddingLen, paddingGood = extractPadding(payload) - } - default: - panic("unknown cipher type") - } - - if hc.version == VersionTLS13 { - if typ != recordTypeApplicationData { - return nil, 0, alertUnexpectedMessage - } - if len(plaintext) > maxPlaintext+1 { - return nil, 0, alertRecordOverflow - } - // Remove padding and find the ContentType scanning from the end. - for i := len(plaintext) - 1; i >= 0; i-- { - if plaintext[i] != 0 { - typ = recordType(plaintext[i]) - plaintext = plaintext[:i] - break - } - if i == 0 { - return nil, 0, alertUnexpectedMessage - } - } - } - } else { - plaintext = payload - } - - if hc.mac != nil { - macSize := hc.mac.Size() - if len(payload) < macSize { - return nil, 0, alertBadRecordMAC - } - - n := len(payload) - macSize - paddingLen - n = subtle.ConstantTimeSelect(int(uint32(n)>>31), 0, n) // if n < 0 { n = 0 } - record[3] = byte(n >> 8) - record[4] = byte(n) - remoteMAC := payload[n : n+macSize] - localMAC := hc.mac.MAC(hc.seq[0:], record[:recordHeaderLen], payload[:n], payload[n+macSize:]) - - if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 { - return nil, 0, alertBadRecordMAC - } - - plaintext = payload[:n] - } - - hc.incSeq() - return plaintext, typ, nil -} - -func (c *Conn) setAlternativeRecordLayer() { - if c.config.AlternativeRecordLayer != nil { - c.in.setKeyCallback = c.config.AlternativeRecordLayer.SetReadKey - c.out.setKeyCallback = c.config.AlternativeRecordLayer.SetWriteKey - } -} - -// sliceForAppend extends the input slice by n bytes. head is the full extended -// slice, while tail is the appended part. If the original slice has sufficient -// capacity no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} - -// encrypt encrypts payload, adding the appropriate nonce and/or MAC, and -// appends it to record, which contains the record header. -func (hc *halfConn) encrypt(record, payload []byte, rand io.Reader) ([]byte, error) { - if hc.cipher == nil { - return append(record, payload...), nil - } - - var explicitNonce []byte - if explicitNonceLen := hc.explicitNonceLen(); explicitNonceLen > 0 { - record, explicitNonce = sliceForAppend(record, explicitNonceLen) - if _, isCBC := hc.cipher.(cbcMode); !isCBC && explicitNonceLen < 16 { - // The AES-GCM construction in TLS has an explicit nonce so that the - // nonce can be random. However, the nonce is only 8 bytes which is - // too small for a secure, random nonce. Therefore we use the - // sequence number as the nonce. The 3DES-CBC construction also has - // an 8 bytes nonce but its nonces must be unpredictable (see RFC - // 5246, Appendix F.3), forcing us to use randomness. That's not - // 3DES' biggest problem anyway because the birthday bound on block - // collision is reached first due to its simlarly small block size - // (see the Sweet32 attack). - copy(explicitNonce, hc.seq[:]) - } else { - if _, err := io.ReadFull(rand, explicitNonce); err != nil { - return nil, err - } - } - } - - var mac []byte - if hc.mac != nil { - mac = hc.mac.MAC(hc.seq[:], record[:recordHeaderLen], payload, nil) - } - - var dst []byte - switch c := hc.cipher.(type) { - case cipher.Stream: - record, dst = sliceForAppend(record, len(payload)+len(mac)) - c.XORKeyStream(dst[:len(payload)], payload) - c.XORKeyStream(dst[len(payload):], mac) - case aead: - nonce := explicitNonce - if len(nonce) == 0 { - nonce = hc.seq[:] - } - - if hc.version == VersionTLS13 { - record = append(record, payload...) - - // Encrypt the actual ContentType and replace the plaintext one. - record = append(record, record[0]) - record[0] = byte(recordTypeApplicationData) - - n := len(payload) + 1 + c.Overhead() - record[3] = byte(n >> 8) - record[4] = byte(n) - - record = c.Seal(record[:recordHeaderLen], - nonce, record[recordHeaderLen:], record[:recordHeaderLen]) - } else { - copy(hc.additionalData[:], hc.seq[:]) - copy(hc.additionalData[8:], record) - record = c.Seal(record, nonce, payload, hc.additionalData[:]) - } - case cbcMode: - blockSize := c.BlockSize() - plaintextLen := len(payload) + len(mac) - paddingLen := blockSize - plaintextLen%blockSize - record, dst = sliceForAppend(record, plaintextLen+paddingLen) - copy(dst, payload) - copy(dst[len(payload):], mac) - for i := plaintextLen; i < len(dst); i++ { - dst[i] = byte(paddingLen - 1) - } - if len(explicitNonce) > 0 { - c.SetIV(explicitNonce) - } - c.CryptBlocks(dst, dst) - default: - panic("unknown cipher type") - } - - // Update length to include nonce, MAC and any block padding needed. - n := len(record) - recordHeaderLen - record[3] = byte(n >> 8) - record[4] = byte(n) - hc.incSeq() - - return record, nil -} - -// RecordHeaderError is returned when a TLS record header is invalid. -type RecordHeaderError struct { - // Msg contains a human readable string that describes the error. - Msg string - // RecordHeader contains the five bytes of TLS record header that - // triggered the error. - RecordHeader [5]byte - // Conn provides the underlying net.Conn in the case that a client - // sent an initial handshake that didn't look like TLS. - // It is nil if there's already been a handshake or a TLS alert has - // been written to the connection. - Conn net.Conn -} - -func (e RecordHeaderError) Error() string { return "tls: " + e.Msg } - -func (c *Conn) newRecordHeaderError(conn net.Conn, msg string) (err RecordHeaderError) { - err.Msg = msg - err.Conn = conn - copy(err.RecordHeader[:], c.rawInput.Bytes()) - return err -} - -func (c *Conn) readRecord() error { - return c.readRecordOrCCS(false) -} - -func (c *Conn) readChangeCipherSpec() error { - return c.readRecordOrCCS(true) -} - -// readRecordOrCCS reads one or more TLS records from the connection and -// updates the record layer state. Some invariants: -// * c.in must be locked -// * c.input must be empty -// During the handshake one and only one of the following will happen: -// - c.hand grows -// - c.in.changeCipherSpec is called -// - an error is returned -// After the handshake one and only one of the following will happen: -// - c.hand grows -// - c.input is set -// - an error is returned -func (c *Conn) readRecordOrCCS(expectChangeCipherSpec bool) error { - if c.in.err != nil { - return c.in.err - } - handshakeComplete := c.handshakeComplete() - - // This function modifies c.rawInput, which owns the c.input memory. - if c.input.Len() != 0 { - return c.in.setErrorLocked(errors.New("tls: internal error: attempted to read record with pending application data")) - } - c.input.Reset(nil) - - // Read header, payload. - if err := c.readFromUntil(c.conn, recordHeaderLen); err != nil { - // RFC 8446, Section 6.1 suggests that EOF without an alertCloseNotify - // is an error, but popular web sites seem to do this, so we accept it - // if and only if at the record boundary. - if err == io.ErrUnexpectedEOF && c.rawInput.Len() == 0 { - err = io.EOF - } - if e, ok := err.(net.Error); !ok || !e.Temporary() { - c.in.setErrorLocked(err) - } - return err - } - hdr := c.rawInput.Bytes()[:recordHeaderLen] - typ := recordType(hdr[0]) - - // No valid TLS record has a type of 0x80, however SSLv2 handshakes - // start with a uint16 length where the MSB is set and the first record - // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests - // an SSLv2 client. - if !handshakeComplete && typ == 0x80 { - c.sendAlert(alertProtocolVersion) - return c.in.setErrorLocked(c.newRecordHeaderError(nil, "unsupported SSLv2 handshake received")) - } - - vers := uint16(hdr[1])<<8 | uint16(hdr[2]) - n := int(hdr[3])<<8 | int(hdr[4]) - if c.haveVers && c.vers != VersionTLS13 && vers != c.vers { - c.sendAlert(alertProtocolVersion) - msg := fmt.Sprintf("received record with version %x when expecting version %x", vers, c.vers) - return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg)) - } - if !c.haveVers { - // First message, be extra suspicious: this might not be a TLS - // client. Bail out before reading a full 'body', if possible. - // The current max version is 3.3 so if the version is >= 16.0, - // it's probably not real. - if (typ != recordTypeAlert && typ != recordTypeHandshake) || vers >= 0x1000 { - return c.in.setErrorLocked(c.newRecordHeaderError(c.conn, "first record does not look like a TLS handshake")) - } - } - if c.vers == VersionTLS13 && n > maxCiphertextTLS13 || n > maxCiphertext { - c.sendAlert(alertRecordOverflow) - msg := fmt.Sprintf("oversized record received with length %d", n) - return c.in.setErrorLocked(c.newRecordHeaderError(nil, msg)) - } - if err := c.readFromUntil(c.conn, recordHeaderLen+n); err != nil { - if e, ok := err.(net.Error); !ok || !e.Temporary() { - c.in.setErrorLocked(err) - } - return err - } - - // Process message. - record := c.rawInput.Next(recordHeaderLen + n) - data, typ, err := c.in.decrypt(record) - if err != nil { - return c.in.setErrorLocked(c.sendAlert(err.(alert))) - } - if len(data) > maxPlaintext { - return c.in.setErrorLocked(c.sendAlert(alertRecordOverflow)) - } - - // Application Data messages are always protected. - if c.in.cipher == nil && typ == recordTypeApplicationData { - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - - if typ != recordTypeAlert && typ != recordTypeChangeCipherSpec && len(data) > 0 { - // This is a state-advancing message: reset the retry count. - c.retryCount = 0 - } - - // Handshake messages MUST NOT be interleaved with other record types in TLS 1.3. - if c.vers == VersionTLS13 && typ != recordTypeHandshake && c.hand.Len() > 0 { - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - - switch typ { - default: - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - - case recordTypeAlert: - if len(data) != 2 { - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - if alert(data[1]) == alertCloseNotify { - return c.in.setErrorLocked(io.EOF) - } - if c.vers == VersionTLS13 { - return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])}) - } - switch data[0] { - case alertLevelWarning: - // Drop the record on the floor and retry. - return c.retryReadRecord(expectChangeCipherSpec) - case alertLevelError: - return c.in.setErrorLocked(&net.OpError{Op: "remote error", Err: alert(data[1])}) - default: - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - - case recordTypeChangeCipherSpec: - if len(data) != 1 || data[0] != 1 { - return c.in.setErrorLocked(c.sendAlert(alertDecodeError)) - } - // Handshake messages are not allowed to fragment across the CCS. - if c.hand.Len() > 0 { - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - // In TLS 1.3, change_cipher_spec records are ignored until the - // Finished. See RFC 8446, Appendix D.4. Note that according to Section - // 5, a server can send a ChangeCipherSpec before its ServerHello, when - // c.vers is still unset. That's not useful though and suspicious if the - // server then selects a lower protocol version, so don't allow that. - if c.vers == VersionTLS13 { - return c.retryReadRecord(expectChangeCipherSpec) - } - if !expectChangeCipherSpec { - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - if err := c.in.changeCipherSpec(); err != nil { - return c.in.setErrorLocked(c.sendAlert(err.(alert))) - } - - case recordTypeApplicationData: - if !handshakeComplete || expectChangeCipherSpec { - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - // Some OpenSSL servers send empty records in order to randomize the - // CBC IV. Ignore a limited number of empty records. - if len(data) == 0 { - return c.retryReadRecord(expectChangeCipherSpec) - } - // Note that data is owned by c.rawInput, following the Next call above, - // to avoid copying the plaintext. This is safe because c.rawInput is - // not read from or written to until c.input is drained. - c.input.Reset(data) - - case recordTypeHandshake: - if len(data) == 0 || expectChangeCipherSpec { - return c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - c.hand.Write(data) - } - - return nil -} - -// retryReadRecord recurses into readRecordOrCCS to drop a non-advancing record, like -// a warning alert, empty application_data, or a change_cipher_spec in TLS 1.3. -func (c *Conn) retryReadRecord(expectChangeCipherSpec bool) error { - c.retryCount++ - if c.retryCount > maxUselessRecords { - c.sendAlert(alertUnexpectedMessage) - return c.in.setErrorLocked(errors.New("tls: too many ignored records")) - } - return c.readRecordOrCCS(expectChangeCipherSpec) -} - -// atLeastReader reads from R, stopping with EOF once at least N bytes have been -// read. It is different from an io.LimitedReader in that it doesn't cut short -// the last Read call, and in that it considers an early EOF an error. -type atLeastReader struct { - R io.Reader - N int64 -} - -func (r *atLeastReader) Read(p []byte) (int, error) { - if r.N <= 0 { - return 0, io.EOF - } - n, err := r.R.Read(p) - r.N -= int64(n) // won't underflow unless len(p) >= n > 9223372036854775809 - if r.N > 0 && err == io.EOF { - return n, io.ErrUnexpectedEOF - } - if r.N <= 0 && err == nil { - return n, io.EOF - } - return n, err -} - -// readFromUntil reads from r into c.rawInput until c.rawInput contains -// at least n bytes or else returns an error. -func (c *Conn) readFromUntil(r io.Reader, n int) error { - if c.rawInput.Len() >= n { - return nil - } - needs := n - c.rawInput.Len() - // There might be extra input waiting on the wire. Make a best effort - // attempt to fetch it so that it can be used in (*Conn).Read to - // "predict" closeNotify alerts. - c.rawInput.Grow(needs + bytes.MinRead) - _, err := c.rawInput.ReadFrom(&atLeastReader{r, int64(needs)}) - return err -} - -// sendAlert sends a TLS alert message. -func (c *Conn) sendAlertLocked(err alert) error { - switch err { - case alertNoRenegotiation, alertCloseNotify: - c.tmp[0] = alertLevelWarning - default: - c.tmp[0] = alertLevelError - } - c.tmp[1] = byte(err) - - _, writeErr := c.writeRecordLocked(recordTypeAlert, c.tmp[0:2]) - if err == alertCloseNotify { - // closeNotify is a special case in that it isn't an error. - return writeErr - } - - return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err}) -} - -// sendAlert sends a TLS alert message. -func (c *Conn) sendAlert(err alert) error { - if c.config.AlternativeRecordLayer != nil { - c.config.AlternativeRecordLayer.SendAlert(uint8(err)) - return &net.OpError{Op: "local error", Err: err} - } - - c.out.Lock() - defer c.out.Unlock() - return c.sendAlertLocked(err) -} - -const ( - // tcpMSSEstimate is a conservative estimate of the TCP maximum segment - // size (MSS). A constant is used, rather than querying the kernel for - // the actual MSS, to avoid complexity. The value here is the IPv6 - // minimum MTU (1280 bytes) minus the overhead of an IPv6 header (40 - // bytes) and a TCP header with timestamps (32 bytes). - tcpMSSEstimate = 1208 - - // recordSizeBoostThreshold is the number of bytes of application data - // sent after which the TLS record size will be increased to the - // maximum. - recordSizeBoostThreshold = 128 * 1024 -) - -// maxPayloadSizeForWrite returns the maximum TLS payload size to use for the -// next application data record. There is the following trade-off: -// -// - For latency-sensitive applications, such as web browsing, each TLS -// record should fit in one TCP segment. -// - For throughput-sensitive applications, such as large file transfers, -// larger TLS records better amortize framing and encryption overheads. -// -// A simple heuristic that works well in practice is to use small records for -// the first 1MB of data, then use larger records for subsequent data, and -// reset back to smaller records after the connection becomes idle. See "High -// Performance Web Networking", Chapter 4, or: -// https://www.igvita.com/2013/10/24/optimizing-tls-record-size-and-buffering-latency/ -// -// In the interests of simplicity and determinism, this code does not attempt -// to reset the record size once the connection is idle, however. -func (c *Conn) maxPayloadSizeForWrite(typ recordType) int { - if c.config.DynamicRecordSizingDisabled || typ != recordTypeApplicationData { - return maxPlaintext - } - - if c.bytesSent >= recordSizeBoostThreshold { - return maxPlaintext - } - - // Subtract TLS overheads to get the maximum payload size. - payloadBytes := tcpMSSEstimate - recordHeaderLen - c.out.explicitNonceLen() - if c.out.cipher != nil { - switch ciph := c.out.cipher.(type) { - case cipher.Stream: - payloadBytes -= c.out.mac.Size() - case cipher.AEAD: - payloadBytes -= ciph.Overhead() - case cbcMode: - blockSize := ciph.BlockSize() - // The payload must fit in a multiple of blockSize, with - // room for at least one padding byte. - payloadBytes = (payloadBytes & ^(blockSize - 1)) - 1 - // The MAC is appended before padding so affects the - // payload size directly. - payloadBytes -= c.out.mac.Size() - default: - panic("unknown cipher type") - } - } - if c.vers == VersionTLS13 { - payloadBytes-- // encrypted ContentType - } - - // Allow packet growth in arithmetic progression up to max. - pkt := c.packetsSent - c.packetsSent++ - if pkt > 1000 { - return maxPlaintext // avoid overflow in multiply below - } - - n := payloadBytes * int(pkt+1) - if n > maxPlaintext { - n = maxPlaintext - } - return n -} - -func (c *Conn) write(data []byte) (int, error) { - if c.buffering { - c.sendBuf = append(c.sendBuf, data...) - return len(data), nil - } - - n, err := c.conn.Write(data) - c.bytesSent += int64(n) - return n, err -} - -func (c *Conn) flush() (int, error) { - if len(c.sendBuf) == 0 { - return 0, nil - } - - n, err := c.conn.Write(c.sendBuf) - c.bytesSent += int64(n) - c.sendBuf = nil - c.buffering = false - return n, err -} - -// writeRecordLocked writes a TLS record with the given type and payload to the -// connection and updates the record layer state. -func (c *Conn) writeRecordLocked(typ recordType, data []byte) (int, error) { - var n int - for len(data) > 0 { - m := len(data) - if maxPayload := c.maxPayloadSizeForWrite(typ); m > maxPayload { - m = maxPayload - } - - _, c.outBuf = sliceForAppend(c.outBuf[:0], recordHeaderLen) - c.outBuf[0] = byte(typ) - vers := c.vers - if vers == 0 { - // Some TLS servers fail if the record version is - // greater than TLS 1.0 for the initial ClientHello. - vers = VersionTLS10 - } else if vers == VersionTLS13 { - // TLS 1.3 froze the record layer version to 1.2. - // See RFC 8446, Section 5.1. - vers = VersionTLS12 - } - c.outBuf[1] = byte(vers >> 8) - c.outBuf[2] = byte(vers) - c.outBuf[3] = byte(m >> 8) - c.outBuf[4] = byte(m) - - var err error - c.outBuf, err = c.out.encrypt(c.outBuf, data[:m], c.config.rand()) - if err != nil { - return n, err - } - if _, err := c.write(c.outBuf); err != nil { - return n, err - } - n += m - data = data[m:] - } - - if typ == recordTypeChangeCipherSpec && c.vers != VersionTLS13 { - if err := c.out.changeCipherSpec(); err != nil { - return n, c.sendAlertLocked(err.(alert)) - } - } - - return n, nil -} - -// writeRecord writes a TLS record with the given type and payload to the -// connection and updates the record layer state. -func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) { - if c.config.AlternativeRecordLayer != nil { - if typ == recordTypeChangeCipherSpec { - return len(data), nil - } - return c.config.AlternativeRecordLayer.WriteRecord(data) - } - - c.out.Lock() - defer c.out.Unlock() - - return c.writeRecordLocked(typ, data) -} - -// readHandshake reads the next handshake message from -// the record layer. -func (c *Conn) readHandshake() (interface{}, error) { - var data []byte - if c.config.AlternativeRecordLayer != nil { - var err error - data, err = c.config.AlternativeRecordLayer.ReadHandshakeMessage() - if err != nil { - return nil, err - } - } else { - for c.hand.Len() < 4 { - if err := c.readRecord(); err != nil { - return nil, err - } - } - - data = c.hand.Bytes() - n := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - if n > maxHandshake { - c.sendAlertLocked(alertInternalError) - return nil, c.in.setErrorLocked(fmt.Errorf("tls: handshake message of length %d bytes exceeds maximum of %d bytes", n, maxHandshake)) - } - for c.hand.Len() < 4+n { - if err := c.readRecord(); err != nil { - return nil, err - } - } - data = c.hand.Next(4 + n) - } - var m handshakeMessage - switch data[0] { - case typeHelloRequest: - m = new(helloRequestMsg) - case typeClientHello: - m = new(clientHelloMsg) - case typeServerHello: - m = new(serverHelloMsg) - case typeNewSessionTicket: - if c.vers == VersionTLS13 { - m = new(newSessionTicketMsgTLS13) - } else { - m = new(newSessionTicketMsg) - } - case typeCertificate: - if c.vers == VersionTLS13 { - m = new(certificateMsgTLS13) - } else { - m = new(certificateMsg) - } - case typeCertificateRequest: - if c.vers == VersionTLS13 { - m = new(certificateRequestMsgTLS13) - } else { - m = &certificateRequestMsg{ - hasSignatureAlgorithm: c.vers >= VersionTLS12, - } - } - case typeCertificateStatus: - m = new(certificateStatusMsg) - case typeServerKeyExchange: - m = new(serverKeyExchangeMsg) - case typeServerHelloDone: - m = new(serverHelloDoneMsg) - case typeClientKeyExchange: - m = new(clientKeyExchangeMsg) - case typeCertificateVerify: - m = &certificateVerifyMsg{ - hasSignatureAlgorithm: c.vers >= VersionTLS12, - } - case typeNextProtocol: - m = new(nextProtoMsg) - case typeFinished: - m = new(finishedMsg) - case typeEncryptedExtensions: - m = new(encryptedExtensionsMsg) - case typeEndOfEarlyData: - m = new(endOfEarlyDataMsg) - case typeKeyUpdate: - m = new(keyUpdateMsg) - default: - return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - - // The handshake message unmarshalers - // expect to be able to keep references to data, - // so pass in a fresh copy that won't be overwritten. - data = append([]byte(nil), data...) - - if !m.unmarshal(data) { - return nil, c.in.setErrorLocked(c.sendAlert(alertUnexpectedMessage)) - } - return m, nil -} - -var ( - errClosed = errors.New("tls: use of closed connection") - errShutdown = errors.New("tls: protocol is shutdown") -) - -// Write writes data to the connection. -func (c *Conn) Write(b []byte) (int, error) { - // interlock with Close below - for { - x := atomic.LoadInt32(&c.activeCall) - if x&1 != 0 { - return 0, errClosed - } - if atomic.CompareAndSwapInt32(&c.activeCall, x, x+2) { - defer atomic.AddInt32(&c.activeCall, -2) - break - } - } - - if err := c.Handshake(); err != nil { - return 0, err - } - - c.out.Lock() - defer c.out.Unlock() - - if err := c.out.err; err != nil { - return 0, err - } - - if !c.handshakeComplete() { - return 0, alertInternalError - } - - if c.closeNotifySent { - return 0, errShutdown - } - - // SSL 3.0 and TLS 1.0 are susceptible to a chosen-plaintext - // attack when using block mode ciphers due to predictable IVs. - // This can be prevented by splitting each Application Data - // record into two records, effectively randomizing the IV. - // - // https://www.openssl.org/~bodo/tls-cbc.txt - // https://bugzilla.mozilla.org/show_bug.cgi?id=665814 - // https://www.imperialviolet.org/2012/01/15/beastfollowup.html - - var m int - if len(b) > 1 && c.vers <= VersionTLS10 { - if _, ok := c.out.cipher.(cipher.BlockMode); ok { - n, err := c.writeRecordLocked(recordTypeApplicationData, b[:1]) - if err != nil { - return n, c.out.setErrorLocked(err) - } - m, b = 1, b[1:] - } - } - - n, err := c.writeRecordLocked(recordTypeApplicationData, b) - return n + m, c.out.setErrorLocked(err) -} - -// handleRenegotiation processes a HelloRequest handshake message. -func (c *Conn) handleRenegotiation() error { - if c.vers == VersionTLS13 { - return errors.New("tls: internal error: unexpected renegotiation") - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - - helloReq, ok := msg.(*helloRequestMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(helloReq, msg) - } - - if !c.isClient { - return c.sendAlert(alertNoRenegotiation) - } - - switch c.config.Renegotiation { - case RenegotiateNever: - return c.sendAlert(alertNoRenegotiation) - case RenegotiateOnceAsClient: - if c.handshakes > 1 { - return c.sendAlert(alertNoRenegotiation) - } - case RenegotiateFreelyAsClient: - // Ok. - default: - c.sendAlert(alertInternalError) - return errors.New("tls: unknown Renegotiation value") - } - - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - atomic.StoreUint32(&c.handshakeStatus, 0) - if c.handshakeErr = c.clientHandshake(); c.handshakeErr == nil { - c.handshakes++ - } - return c.handshakeErr -} - -func (c *Conn) HandlePostHandshakeMessage() error { - return c.handlePostHandshakeMessage() -} - -// handlePostHandshakeMessage processes a handshake message arrived after the -// handshake is complete. Up to TLS 1.2, it indicates the start of a renegotiation. -func (c *Conn) handlePostHandshakeMessage() error { - if c.vers != VersionTLS13 { - return c.handleRenegotiation() - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - - c.retryCount++ - if c.retryCount > maxUselessRecords { - c.sendAlert(alertUnexpectedMessage) - return c.in.setErrorLocked(errors.New("tls: too many non-advancing records")) - } - - switch msg := msg.(type) { - case *newSessionTicketMsgTLS13: - return c.handleNewSessionTicket(msg) - case *keyUpdateMsg: - return c.handleKeyUpdate(msg) - default: - c.sendAlert(alertUnexpectedMessage) - return fmt.Errorf("tls: received unexpected handshake message of type %T", msg) - } -} - -func (c *Conn) handleKeyUpdate(keyUpdate *keyUpdateMsg) error { - cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite) - if cipherSuite == nil { - return c.in.setErrorLocked(c.sendAlert(alertInternalError)) - } - - newSecret := cipherSuite.nextTrafficSecret(c.in.trafficSecret) - c.in.setTrafficSecret(cipherSuite, newSecret) - - if keyUpdate.updateRequested { - c.out.Lock() - defer c.out.Unlock() - - msg := &keyUpdateMsg{} - _, err := c.writeRecordLocked(recordTypeHandshake, msg.marshal()) - if err != nil { - // Surface the error at the next write. - c.out.setErrorLocked(err) - return nil - } - - newSecret := cipherSuite.nextTrafficSecret(c.out.trafficSecret) - c.out.setTrafficSecret(cipherSuite, newSecret) - } - - return nil -} - -// Read can be made to time out and return a net.Error with Timeout() == true -// after a fixed time limit; see SetDeadline and SetReadDeadline. -func (c *Conn) Read(b []byte) (int, error) { - if err := c.Handshake(); err != nil { - return 0, err - } - if len(b) == 0 { - // Put this after Handshake, in case people were calling - // Read(nil) for the side effect of the Handshake. - return 0, nil - } - - c.in.Lock() - defer c.in.Unlock() - - for c.input.Len() == 0 { - if err := c.readRecord(); err != nil { - return 0, err - } - for c.hand.Len() > 0 { - if err := c.handlePostHandshakeMessage(); err != nil { - return 0, err - } - } - } - - n, _ := c.input.Read(b) - - // If a close-notify alert is waiting, read it so that we can return (n, - // EOF) instead of (n, nil), to signal to the HTTP response reading - // goroutine that the connection is now closed. This eliminates a race - // where the HTTP response reading goroutine would otherwise not observe - // the EOF until its next read, by which time a client goroutine might - // have already tried to reuse the HTTP connection for a new request. - // See https://golang.org/cl/76400046 and https://golang.org/issue/3514 - if n != 0 && c.input.Len() == 0 && c.rawInput.Len() > 0 && - recordType(c.rawInput.Bytes()[0]) == recordTypeAlert { - if err := c.readRecord(); err != nil { - return n, err // will be io.EOF on closeNotify - } - } - - return n, nil -} - -// Close closes the connection. -func (c *Conn) Close() error { - // Interlock with Conn.Write above. - var x int32 - for { - x = atomic.LoadInt32(&c.activeCall) - if x&1 != 0 { - return errClosed - } - if atomic.CompareAndSwapInt32(&c.activeCall, x, x|1) { - break - } - } - if x != 0 { - // io.Writer and io.Closer should not be used concurrently. - // If Close is called while a Write is currently in-flight, - // interpret that as a sign that this Close is really just - // being used to break the Write and/or clean up resources and - // avoid sending the alertCloseNotify, which may block - // waiting on handshakeMutex or the c.out mutex. - return c.conn.Close() - } - - var alertErr error - - if c.handshakeComplete() { - alertErr = c.closeNotify() - } - - if err := c.conn.Close(); err != nil { - return err - } - return alertErr -} - -var errEarlyCloseWrite = errors.New("tls: CloseWrite called before handshake complete") - -// CloseWrite shuts down the writing side of the connection. It should only be -// called once the handshake has completed and does not call CloseWrite on the -// underlying connection. Most callers should just use Close. -func (c *Conn) CloseWrite() error { - if !c.handshakeComplete() { - return errEarlyCloseWrite - } - - return c.closeNotify() -} - -func (c *Conn) closeNotify() error { - c.out.Lock() - defer c.out.Unlock() - - if !c.closeNotifySent { - c.closeNotifyErr = c.sendAlertLocked(alertCloseNotify) - c.closeNotifySent = true - } - return c.closeNotifyErr -} - -// Handshake runs the client or server handshake -// protocol if it has not yet been run. -// Most uses of this package need not call Handshake -// explicitly: the first Read or Write will call it automatically. -func (c *Conn) Handshake() error { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - if err := c.handshakeErr; err != nil { - return err - } - if c.handshakeComplete() { - return nil - } - - c.in.Lock() - defer c.in.Unlock() - - if c.isClient { - c.handshakeErr = c.clientHandshake() - } else { - c.handshakeErr = c.serverHandshake() - } - if c.handshakeErr == nil { - c.handshakes++ - } else { - // If an error occurred during the hadshake try to flush the - // alert that might be left in the buffer. - c.flush() - } - - if c.handshakeErr == nil && !c.handshakeComplete() { - c.handshakeErr = errors.New("tls: internal error: handshake should have had a result") - } - - return c.handshakeErr -} - -// ConnectionState returns basic TLS details about the connection. -func (c *Conn) ConnectionState() ConnectionState { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - var state ConnectionState - state.HandshakeComplete = c.handshakeComplete() - state.ServerName = c.serverName - - if state.HandshakeComplete { - state.Version = c.vers - state.NegotiatedProtocol = c.clientProtocol - state.DidResume = c.didResume - state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback - state.CipherSuite = c.cipherSuite - state.PeerCertificates = c.peerCertificates - state.VerifiedChains = c.verifiedChains - state.SignedCertificateTimestamps = c.scts - state.OCSPResponse = c.ocspResponse - if !c.didResume && c.vers != VersionTLS13 { - if c.clientFinishedIsFirst { - state.TLSUnique = c.clientFinished[:] - } else { - state.TLSUnique = c.serverFinished[:] - } - } - if c.config.Renegotiation != RenegotiateNever { - state.ekm = noExportedKeyingMaterial - } else { - state.ekm = c.ekm - } - } - - return state -} - -// OCSPResponse returns the stapled OCSP response from the TLS server, if -// any. (Only valid for client connections.) -func (c *Conn) OCSPResponse() []byte { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - return c.ocspResponse -} - -// VerifyHostname checks that the peer certificate chain is valid for -// connecting to host. If so, it returns nil; if not, it returns an error -// describing the problem. -func (c *Conn) VerifyHostname(host string) error { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - if !c.isClient { - return errors.New("tls: VerifyHostname called on TLS server connection") - } - if !c.handshakeComplete() { - return errors.New("tls: handshake has not yet been performed") - } - if len(c.verifiedChains) == 0 { - return errors.New("tls: handshake did not verify certificate chain") - } - return c.peerCertificates[0].VerifyHostname(host) -} - -func (c *Conn) handshakeComplete() bool { - return atomic.LoadUint32(&c.handshakeStatus) == 1 -} diff --git a/vendor/github.com/marten-seemann/qtls/generate_cert.go b/vendor/github.com/marten-seemann/qtls/generate_cert.go deleted file mode 100644 index 8d012be75..000000000 --- a/vendor/github.com/marten-seemann/qtls/generate_cert.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate a self-signed X.509 certificate for a TLS server. Outputs to -// 'cert.pem' and 'key.pem' and will overwrite existing files. - -package main - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "flag" - "fmt" - "log" - "math/big" - "net" - "os" - "strings" - "time" -) - -var ( - host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for") - validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011") - validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for") - isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority") - rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate. Ignored if --ecdsa-curve is set") - ecdsaCurve = flag.String("ecdsa-curve", "", "ECDSA curve to use to generate a key. Valid values are P224, P256 (recommended), P384, P521") -) - -func publicKey(priv interface{}) interface{} { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &k.PublicKey - case *ecdsa.PrivateKey: - return &k.PublicKey - default: - return nil - } -} - -func pemBlockForKey(priv interface{}) *pem.Block { - switch k := priv.(type) { - case *rsa.PrivateKey: - return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} - case *ecdsa.PrivateKey: - b, err := x509.MarshalECPrivateKey(k) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to marshal ECDSA private key: %v", err) - os.Exit(2) - } - return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} - default: - return nil - } -} - -func main() { - flag.Parse() - - if len(*host) == 0 { - log.Fatalf("Missing required --host parameter") - } - - var priv interface{} - var err error - switch *ecdsaCurve { - case "": - priv, err = rsa.GenerateKey(rand.Reader, *rsaBits) - case "P224": - priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader) - case "P256": - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case "P384": - priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case "P521": - priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - default: - fmt.Fprintf(os.Stderr, "Unrecognized elliptic curve: %q", *ecdsaCurve) - os.Exit(1) - } - if err != nil { - log.Fatalf("failed to generate private key: %s", err) - } - - var notBefore time.Time - if len(*validFrom) == 0 { - notBefore = time.Now() - } else { - notBefore, err = time.Parse("Jan 2 15:04:05 2006", *validFrom) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to parse creation date: %s\n", err) - os.Exit(1) - } - } - - notAfter := notBefore.Add(*validFor) - - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - log.Fatalf("failed to generate serial number: %s", err) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{"Acme Co"}, - }, - NotBefore: notBefore, - NotAfter: notAfter, - - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - } - - hosts := strings.Split(*host, ",") - for _, h := range hosts { - if ip := net.ParseIP(h); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else { - template.DNSNames = append(template.DNSNames, h) - } - } - - if *isCA { - template.IsCA = true - template.KeyUsage |= x509.KeyUsageCertSign - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv) - if err != nil { - log.Fatalf("Failed to create certificate: %s", err) - } - - certOut, err := os.Create("cert.pem") - if err != nil { - log.Fatalf("failed to open cert.pem for writing: %s", err) - } - if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - log.Fatalf("failed to write data to cert.pem: %s", err) - } - if err := certOut.Close(); err != nil { - log.Fatalf("error closing cert.pem: %s", err) - } - log.Print("wrote cert.pem\n") - - keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - log.Print("failed to open key.pem for writing:", err) - return - } - if err := pem.Encode(keyOut, pemBlockForKey(priv)); err != nil { - log.Fatalf("failed to write data to key.pem: %s", err) - } - if err := keyOut.Close(); err != nil { - log.Fatalf("error closing key.pem: %s", err) - } - log.Print("wrote key.pem\n") -} diff --git a/vendor/github.com/marten-seemann/qtls/go.mod b/vendor/github.com/marten-seemann/qtls/go.mod deleted file mode 100644 index 9cd2ced53..000000000 --- a/vendor/github.com/marten-seemann/qtls/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/marten-seemann/qtls - -go 1.12 - -require ( - golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 - golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e -) diff --git a/vendor/github.com/marten-seemann/qtls/go.sum b/vendor/github.com/marten-seemann/qtls/go.sum deleted file mode 100644 index a47aabd29..000000000 --- a/vendor/github.com/marten-seemann/qtls/go.sum +++ /dev/null @@ -1,5 +0,0 @@ -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e h1:ZytStCyV048ZqDsWHiYDdoI2Vd4msMcrDECFxS+tL9c= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/marten-seemann/qtls/handshake_client.go b/vendor/github.com/marten-seemann/qtls/handshake_client.go deleted file mode 100644 index 1d2903d56..000000000 --- a/vendor/github.com/marten-seemann/qtls/handshake_client.go +++ /dev/null @@ -1,1023 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/subtle" - "crypto/x509" - "errors" - "fmt" - "io" - "net" - "strconv" - "strings" - "sync/atomic" - "time" -) - -type clientHandshakeState struct { - c *Conn - serverHello *serverHelloMsg - hello *clientHelloMsg - suite *cipherSuite - finishedHash finishedHash - masterSecret []byte - session *ClientSessionState -} - -func (c *Conn) makeClientHello() (*clientHelloMsg, ecdheParameters, error) { - config := c.config - if len(config.ServerName) == 0 && !config.InsecureSkipVerify { - return nil, nil, errors.New("tls: either ServerName or InsecureSkipVerify must be specified in the tls.Config") - } - - nextProtosLength := 0 - for _, proto := range config.NextProtos { - if l := len(proto); l == 0 || l > 255 { - return nil, nil, errors.New("tls: invalid NextProtos value") - } else { - nextProtosLength += 1 + l - } - } - if nextProtosLength > 0xffff { - return nil, nil, errors.New("tls: NextProtos values too large") - } - - supportedVersions := config.supportedVersions(true) - if len(supportedVersions) == 0 { - return nil, nil, errors.New("tls: no supported versions satisfy MinVersion and MaxVersion") - } - - clientHelloVersion := supportedVersions[0] - // The version at the beginning of the ClientHello was capped at TLS 1.2 - // for compatibility reasons. The supported_versions extension is used - // to negotiate versions now. See RFC 8446, Section 4.2.1. - if clientHelloVersion > VersionTLS12 { - clientHelloVersion = VersionTLS12 - } - - hello := &clientHelloMsg{ - vers: clientHelloVersion, - compressionMethods: []uint8{compressionNone}, - random: make([]byte, 32), - sessionId: make([]byte, 32), - ocspStapling: true, - scts: true, - serverName: hostnameInSNI(config.ServerName), - supportedCurves: config.curvePreferences(), - supportedPoints: []uint8{pointFormatUncompressed}, - nextProtoNeg: len(config.NextProtos) > 0, - secureRenegotiationSupported: true, - alpnProtocols: config.NextProtos, - supportedVersions: supportedVersions, - } - - if c.handshakes > 0 { - hello.secureRenegotiation = c.clientFinished[:] - } - - possibleCipherSuites := config.cipherSuites() - hello.cipherSuites = make([]uint16, 0, len(possibleCipherSuites)) - -NextCipherSuite: - for _, suiteId := range possibleCipherSuites { - for _, suite := range cipherSuites { - if suite.id != suiteId { - continue - } - // Don't advertise TLS 1.2-only cipher suites unless - // we're attempting TLS 1.2. - if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 { - continue - } - hello.cipherSuites = append(hello.cipherSuites, suiteId) - continue NextCipherSuite - } - } - - _, err := io.ReadFull(config.rand(), hello.random) - if err != nil { - return nil, nil, errors.New("tls: short read from Rand: " + err.Error()) - } - - // A random session ID is used to detect when the server accepted a ticket - // and is resuming a session (see RFC 5077). In TLS 1.3, it's always set as - // a compatibility measure (see RFC 8446, Section 4.1.2). - if _, err := io.ReadFull(config.rand(), hello.sessionId); err != nil { - return nil, nil, errors.New("tls: short read from Rand: " + err.Error()) - } - - if hello.vers >= VersionTLS12 { - hello.supportedSignatureAlgorithms = supportedSignatureAlgorithms - } - - var params ecdheParameters - if hello.supportedVersions[0] == VersionTLS13 { - hello.cipherSuites = append(hello.cipherSuites, defaultCipherSuitesTLS13()...) - - curveID := config.curvePreferences()[0] - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { - return nil, nil, errors.New("tls: CurvePreferences includes unsupported curve") - } - params, err = generateECDHEParameters(config.rand(), curveID) - if err != nil { - return nil, nil, err - } - hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}} - } - - if hello.supportedVersions[0] == VersionTLS13 && config.GetExtensions != nil { - hello.additionalExtensions = config.GetExtensions(typeClientHello) - } - - return hello, params, nil -} - -func (c *Conn) clientHandshake() (err error) { - if c.config == nil { - c.config = defaultConfig() - } - c.setAlternativeRecordLayer() - - // This may be a renegotiation handshake, in which case some fields - // need to be reset. - c.didResume = false - - hello, ecdheParams, err := c.makeClientHello() - if err != nil { - return err - } - - cacheKey, session, earlySecret, binderKey := c.loadSession(hello) - if cacheKey != "" && session != nil { - defer func() { - // If we got a handshake failure when resuming a session, throw away - // the session ticket. See RFC 5077, Section 3.2. - // - // RFC 8446 makes no mention of dropping tickets on failure, but it - // does require servers to abort on invalid binders, so we need to - // delete tickets to recover from a corrupted PSK. - if err != nil { - c.config.ClientSessionCache.Put(cacheKey, nil) - } - }() - } - - if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil { - return err - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - - serverHello, ok := msg.(*serverHelloMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(serverHello, msg) - } - - if err := c.pickTLSVersion(serverHello); err != nil { - return err - } - - if c.vers == VersionTLS13 { - hs := &clientHandshakeStateTLS13{ - c: c, - serverHello: serverHello, - hello: hello, - ecdheParams: ecdheParams, - session: session, - earlySecret: earlySecret, - binderKey: binderKey, - } - - // In TLS 1.3, session tickets are delivered after the handshake. - return hs.handshake() - } - - hs := &clientHandshakeState{ - c: c, - serverHello: serverHello, - hello: hello, - session: session, - } - - if err := hs.handshake(); err != nil { - return err - } - - // If we had a successful handshake and hs.session is different from - // the one already cached - cache a new one. - if cacheKey != "" && hs.session != nil && session != hs.session { - c.config.ClientSessionCache.Put(cacheKey, hs.session) - } - - return nil -} - -func (c *Conn) loadSession(hello *clientHelloMsg) (cacheKey string, - session *ClientSessionState, earlySecret, binderKey []byte) { - if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil { - return "", nil, nil, nil - } - - hello.ticketSupported = true - - if hello.supportedVersions[0] == VersionTLS13 { - // Require DHE on resumption as it guarantees forward secrecy against - // compromise of the session ticket key. See RFC 8446, Section 4.2.9. - hello.pskModes = []uint8{pskModeDHE} - } - - // Session resumption is not allowed if renegotiating because - // renegotiation is primarily used to allow a client to send a client - // certificate, which would be skipped if session resumption occurred. - if c.handshakes != 0 { - return "", nil, nil, nil - } - - // Try to resume a previously negotiated TLS session, if available. - cacheKey = clientSessionCacheKey(c.conn.RemoteAddr(), c.config) - session, ok := c.config.ClientSessionCache.Get(cacheKey) - if !ok || session == nil { - return cacheKey, nil, nil, nil - } - - // Check that version used for the previous session is still valid. - versOk := false - for _, v := range hello.supportedVersions { - if v == session.vers { - versOk = true - break - } - } - if !versOk { - return cacheKey, nil, nil, nil - } - - // Check that the cached server certificate is not expired, and that it's - // valid for the ServerName. This should be ensured by the cache key, but - // protect the application from a faulty ClientSessionCache implementation. - if !c.config.InsecureSkipVerify { - if len(session.verifiedChains) == 0 { - // The original connection had InsecureSkipVerify, while this doesn't. - return cacheKey, nil, nil, nil - } - serverCert := session.serverCertificates[0] - if c.config.time().After(serverCert.NotAfter) { - // Expired certificate, delete the entry. - c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil - } - if err := serverCert.VerifyHostname(c.config.ServerName); err != nil { - return cacheKey, nil, nil, nil - } - } - - if session.vers != VersionTLS13 { - // In TLS 1.2 the cipher suite must match the resumed session. Ensure we - // are still offering it. - if mutualCipherSuite(hello.cipherSuites, session.cipherSuite) == nil { - return cacheKey, nil, nil, nil - } - - hello.sessionTicket = session.sessionTicket - return - } - - // Check that the session ticket is not expired. - if c.config.time().After(session.useBy) { - c.config.ClientSessionCache.Put(cacheKey, nil) - return cacheKey, nil, nil, nil - } - - // In TLS 1.3 the KDF hash must match the resumed session. Ensure we - // offer at least one cipher suite with that hash. - cipherSuite := cipherSuiteTLS13ByID(session.cipherSuite) - if cipherSuite == nil { - return cacheKey, nil, nil, nil - } - cipherSuiteOk := false - for _, offeredID := range hello.cipherSuites { - offeredSuite := cipherSuiteTLS13ByID(offeredID) - if offeredSuite != nil && offeredSuite.hash == cipherSuite.hash { - cipherSuiteOk = true - break - } - } - if !cipherSuiteOk { - return cacheKey, nil, nil, nil - } - - // Set the pre_shared_key extension. See RFC 8446, Section 4.2.11.1. - ticketAge := uint32(c.config.time().Sub(session.receivedAt) / time.Millisecond) - identity := pskIdentity{ - label: session.sessionTicket, - obfuscatedTicketAge: ticketAge + session.ageAdd, - } - hello.pskIdentities = []pskIdentity{identity} - hello.pskBinders = [][]byte{make([]byte, cipherSuite.hash.Size())} - - // Compute the PSK binders. See RFC 8446, Section 4.2.11.2. - psk := cipherSuite.expandLabel(session.masterSecret, "resumption", - session.nonce, cipherSuite.hash.Size()) - earlySecret = cipherSuite.extract(psk, nil) - binderKey = cipherSuite.deriveSecret(earlySecret, resumptionBinderLabel, nil) - transcript := cipherSuite.hash.New() - transcript.Write(hello.marshalWithoutBinders()) - pskBinders := [][]byte{cipherSuite.finishedHash(binderKey, transcript)} - hello.updateBinders(pskBinders) - - return -} - -func (c *Conn) pickTLSVersion(serverHello *serverHelloMsg) error { - peerVersion := serverHello.vers - if serverHello.supportedVersion != 0 { - peerVersion = serverHello.supportedVersion - } - - vers, ok := c.config.mutualVersion(true, []uint16{peerVersion}) - if !ok { - c.sendAlert(alertProtocolVersion) - return fmt.Errorf("tls: server selected unsupported protocol version %x", peerVersion) - } - - c.vers = vers - c.haveVers = true - c.in.version = vers - c.out.version = vers - - return nil -} - -// Does the handshake, either a full one or resumes old session. Requires hs.c, -// hs.hello, hs.serverHello, and, optionally, hs.session to be set. -func (hs *clientHandshakeState) handshake() error { - c := hs.c - - isResume, err := hs.processServerHello() - if err != nil { - return err - } - - hs.finishedHash = newFinishedHash(c.vers, hs.suite) - - // No signatures of the handshake are needed in a resumption. - // Otherwise, in a full handshake, if we don't have any certificates - // configured then we will never send a CertificateVerify message and - // thus no signatures are needed in that case either. - if isResume || (len(c.config.Certificates) == 0 && c.config.GetClientCertificate == nil) { - hs.finishedHash.discardHandshakeBuffer() - } - - hs.finishedHash.Write(hs.hello.marshal()) - hs.finishedHash.Write(hs.serverHello.marshal()) - - c.buffering = true - if isResume { - if err := hs.establishKeys(); err != nil { - return err - } - if err := hs.readSessionTicket(); err != nil { - return err - } - if err := hs.readFinished(c.serverFinished[:]); err != nil { - return err - } - c.clientFinishedIsFirst = false - if err := hs.sendFinished(c.clientFinished[:]); err != nil { - return err - } - if _, err := c.flush(); err != nil { - return err - } - } else { - if err := hs.doFullHandshake(); err != nil { - return err - } - if err := hs.establishKeys(); err != nil { - return err - } - if err := hs.sendFinished(c.clientFinished[:]); err != nil { - return err - } - if _, err := c.flush(); err != nil { - return err - } - c.clientFinishedIsFirst = true - if err := hs.readSessionTicket(); err != nil { - return err - } - if err := hs.readFinished(c.serverFinished[:]); err != nil { - return err - } - } - - c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random) - c.didResume = isResume - atomic.StoreUint32(&c.handshakeStatus, 1) - - return nil -} - -func (hs *clientHandshakeState) pickCipherSuite() error { - if hs.suite = mutualCipherSuite(hs.hello.cipherSuites, hs.serverHello.cipherSuite); hs.suite == nil { - hs.c.sendAlert(alertHandshakeFailure) - return errors.New("tls: server chose an unconfigured cipher suite") - } - - hs.c.cipherSuite = hs.suite.id - return nil -} - -func (hs *clientHandshakeState) doFullHandshake() error { - c := hs.c - - msg, err := c.readHandshake() - if err != nil { - return err - } - certMsg, ok := msg.(*certificateMsg) - if !ok || len(certMsg.certificates) == 0 { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(certMsg, msg) - } - hs.finishedHash.Write(certMsg.marshal()) - - if c.handshakes == 0 { - // If this is the first handshake on a connection, process and - // (optionally) verify the server's certificates. - if err := c.verifyServerCertificate(certMsg.certificates); err != nil { - return err - } - } else { - // This is a renegotiation handshake. We require that the - // server's identity (i.e. leaf certificate) is unchanged and - // thus any previous trust decision is still valid. - // - // See https://mitls.org/pages/attacks/3SHAKE for the - // motivation behind this requirement. - if !bytes.Equal(c.peerCertificates[0].Raw, certMsg.certificates[0]) { - c.sendAlert(alertBadCertificate) - return errors.New("tls: server's identity changed during renegotiation") - } - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - - cs, ok := msg.(*certificateStatusMsg) - if ok { - // RFC4366 on Certificate Status Request: - // The server MAY return a "certificate_status" message. - - if !hs.serverHello.ocspStapling { - // If a server returns a "CertificateStatus" message, then the - // server MUST have included an extension of type "status_request" - // with empty "extension_data" in the extended server hello. - - c.sendAlert(alertUnexpectedMessage) - return errors.New("tls: received unexpected CertificateStatus message") - } - hs.finishedHash.Write(cs.marshal()) - - c.ocspResponse = cs.response - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - keyAgreement := hs.suite.ka(c.vers) - - skx, ok := msg.(*serverKeyExchangeMsg) - if ok { - hs.finishedHash.Write(skx.marshal()) - err = keyAgreement.processServerKeyExchange(c.config, hs.hello, hs.serverHello, c.peerCertificates[0], skx) - if err != nil { - c.sendAlert(alertUnexpectedMessage) - return err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - var chainToSend *Certificate - var certRequested bool - certReq, ok := msg.(*certificateRequestMsg) - if ok { - certRequested = true - hs.finishedHash.Write(certReq.marshal()) - - cri := certificateRequestInfoFromMsg(certReq) - if chainToSend, err = c.getClientCertificate(cri); err != nil { - c.sendAlert(alertInternalError) - return err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - shd, ok := msg.(*serverHelloDoneMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(shd, msg) - } - hs.finishedHash.Write(shd.marshal()) - - // If the server requested a certificate then we have to send a - // Certificate message, even if it's empty because we don't have a - // certificate to send. - if certRequested { - certMsg = new(certificateMsg) - certMsg.certificates = chainToSend.Certificate - hs.finishedHash.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { - return err - } - } - - preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hs.hello, c.peerCertificates[0]) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - if ckx != nil { - hs.finishedHash.Write(ckx.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil { - return err - } - } - - if chainToSend != nil && len(chainToSend.Certificate) > 0 { - certVerify := &certificateVerifyMsg{ - hasSignatureAlgorithm: c.vers >= VersionTLS12, - } - - key, ok := chainToSend.PrivateKey.(crypto.Signer) - if !ok { - c.sendAlert(alertInternalError) - return fmt.Errorf("tls: client certificate private key of type %T does not implement crypto.Signer", chainToSend.PrivateKey) - } - - signatureAlgorithm, sigType, hashFunc, err := pickSignatureAlgorithm(key.Public(), certReq.supportedSignatureAlgorithms, hs.hello.supportedSignatureAlgorithms, c.vers) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - // SignatureAndHashAlgorithm was introduced in TLS 1.2. - if certVerify.hasSignatureAlgorithm { - certVerify.signatureAlgorithm = signatureAlgorithm - } - digest, err := hs.finishedHash.hashForClientCertificate(sigType, hashFunc, hs.masterSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - signOpts := crypto.SignerOpts(hashFunc) - if sigType == signatureRSAPSS { - signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: hashFunc} - } - certVerify.signature, err = key.Sign(c.config.rand(), digest, signOpts) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - - hs.finishedHash.Write(certVerify.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil { - return err - } - } - - hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.hello.random, hs.serverHello.random) - if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.hello.random, hs.masterSecret); err != nil { - c.sendAlert(alertInternalError) - return errors.New("tls: failed to write to key log: " + err.Error()) - } - - hs.finishedHash.discardHandshakeBuffer() - - return nil -} - -func (hs *clientHandshakeState) establishKeys() error { - c := hs.c - - clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := - keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.hello.random, hs.serverHello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen) - var clientCipher, serverCipher interface{} - var clientHash, serverHash macFunction - if hs.suite.cipher != nil { - clientCipher = hs.suite.cipher(clientKey, clientIV, false /* not for reading */) - clientHash = hs.suite.mac(c.vers, clientMAC) - serverCipher = hs.suite.cipher(serverKey, serverIV, true /* for reading */) - serverHash = hs.suite.mac(c.vers, serverMAC) - } else { - clientCipher = hs.suite.aead(clientKey, clientIV) - serverCipher = hs.suite.aead(serverKey, serverIV) - } - - c.in.prepareCipherSpec(c.vers, serverCipher, serverHash) - c.out.prepareCipherSpec(c.vers, clientCipher, clientHash) - return nil -} - -func (hs *clientHandshakeState) serverResumedSession() bool { - // If the server responded with the same sessionId then it means the - // sessionTicket is being used to resume a TLS session. - return hs.session != nil && hs.hello.sessionId != nil && - bytes.Equal(hs.serverHello.sessionId, hs.hello.sessionId) -} - -func (hs *clientHandshakeState) processServerHello() (bool, error) { - c := hs.c - - if err := hs.pickCipherSuite(); err != nil { - return false, err - } - - if hs.serverHello.compressionMethod != compressionNone { - c.sendAlert(alertUnexpectedMessage) - return false, errors.New("tls: server selected unsupported compression format") - } - - if c.handshakes == 0 && hs.serverHello.secureRenegotiationSupported { - c.secureRenegotiation = true - if len(hs.serverHello.secureRenegotiation) != 0 { - c.sendAlert(alertHandshakeFailure) - return false, errors.New("tls: initial handshake had non-empty renegotiation extension") - } - } - - if c.handshakes > 0 && c.secureRenegotiation { - var expectedSecureRenegotiation [24]byte - copy(expectedSecureRenegotiation[:], c.clientFinished[:]) - copy(expectedSecureRenegotiation[12:], c.serverFinished[:]) - if !bytes.Equal(hs.serverHello.secureRenegotiation, expectedSecureRenegotiation[:]) { - c.sendAlert(alertHandshakeFailure) - return false, errors.New("tls: incorrect renegotiation extension contents") - } - } - - clientDidNPN := hs.hello.nextProtoNeg - clientDidALPN := len(hs.hello.alpnProtocols) > 0 - serverHasNPN := hs.serverHello.nextProtoNeg - serverHasALPN := len(hs.serverHello.alpnProtocol) > 0 - - if !clientDidNPN && serverHasNPN { - c.sendAlert(alertHandshakeFailure) - return false, errors.New("tls: server advertised unrequested NPN extension") - } - - if !clientDidALPN && serverHasALPN { - c.sendAlert(alertHandshakeFailure) - return false, errors.New("tls: server advertised unrequested ALPN extension") - } - - if serverHasNPN && serverHasALPN { - c.sendAlert(alertHandshakeFailure) - return false, errors.New("tls: server advertised both NPN and ALPN extensions") - } - - if serverHasALPN { - c.clientProtocol = hs.serverHello.alpnProtocol - c.clientProtocolFallback = false - } - c.scts = hs.serverHello.scts - - if !hs.serverResumedSession() { - return false, nil - } - - if hs.session.vers != c.vers { - c.sendAlert(alertHandshakeFailure) - return false, errors.New("tls: server resumed a session with a different version") - } - - if hs.session.cipherSuite != hs.suite.id { - c.sendAlert(alertHandshakeFailure) - return false, errors.New("tls: server resumed a session with a different cipher suite") - } - - // Restore masterSecret and peerCerts from previous state - hs.masterSecret = hs.session.masterSecret - c.peerCertificates = hs.session.serverCertificates - c.verifiedChains = hs.session.verifiedChains - return true, nil -} - -func (hs *clientHandshakeState) readFinished(out []byte) error { - c := hs.c - - if err := c.readChangeCipherSpec(); err != nil { - return err - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - serverFinished, ok := msg.(*finishedMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(serverFinished, msg) - } - - verify := hs.finishedHash.serverSum(hs.masterSecret) - if len(verify) != len(serverFinished.verifyData) || - subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: server's Finished message was incorrect") - } - hs.finishedHash.Write(serverFinished.marshal()) - copy(out, verify) - return nil -} - -func (hs *clientHandshakeState) readSessionTicket() error { - if !hs.serverHello.ticketSupported { - return nil - } - - c := hs.c - msg, err := c.readHandshake() - if err != nil { - return err - } - sessionTicketMsg, ok := msg.(*newSessionTicketMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(sessionTicketMsg, msg) - } - hs.finishedHash.Write(sessionTicketMsg.marshal()) - - hs.session = &ClientSessionState{ - sessionTicket: sessionTicketMsg.ticket, - vers: c.vers, - cipherSuite: hs.suite.id, - masterSecret: hs.masterSecret, - serverCertificates: c.peerCertificates, - verifiedChains: c.verifiedChains, - receivedAt: c.config.time(), - } - - return nil -} - -func (hs *clientHandshakeState) sendFinished(out []byte) error { - c := hs.c - - if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil { - return err - } - if hs.serverHello.nextProtoNeg { - nextProto := new(nextProtoMsg) - proto, fallback := mutualProtocol(c.config.NextProtos, hs.serverHello.nextProtos) - nextProto.proto = proto - c.clientProtocol = proto - c.clientProtocolFallback = fallback - - hs.finishedHash.Write(nextProto.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, nextProto.marshal()); err != nil { - return err - } - } - - finished := new(finishedMsg) - finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret) - hs.finishedHash.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { - return err - } - copy(out, finished.verifyData) - return nil -} - -// verifyServerCertificate parses and verifies the provided chain, setting -// c.verifiedChains and c.peerCertificates or sending the appropriate alert. -func (c *Conn) verifyServerCertificate(certificates [][]byte) error { - certs := make([]*x509.Certificate, len(certificates)) - for i, asn1Data := range certificates { - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("tls: failed to parse certificate from server: " + err.Error()) - } - certs[i] = cert - } - - if !c.config.InsecureSkipVerify { - opts := x509.VerifyOptions{ - Roots: c.config.RootCAs, - CurrentTime: c.config.time(), - DNSName: c.config.ServerName, - Intermediates: x509.NewCertPool(), - } - - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - var err error - c.verifiedChains, err = certs[0].Verify(opts) - if err != nil { - c.sendAlert(alertBadCertificate) - return err - } - } - - if c.config.VerifyPeerCertificate != nil { - if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil { - c.sendAlert(alertBadCertificate) - return err - } - } - - switch certs[0].PublicKey.(type) { - case *rsa.PublicKey, *ecdsa.PublicKey: - break - default: - c.sendAlert(alertUnsupportedCertificate) - return fmt.Errorf("tls: server's certificate contains an unsupported type of public key: %T", certs[0].PublicKey) - } - - c.peerCertificates = certs - - return nil -} - -// tls11SignatureSchemes contains the signature schemes that we synthesise for -// a TLS <= 1.1 connection, based on the supported certificate types. -var ( - tls11SignatureSchemes = []SignatureScheme{ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512, PKCS1WithSHA1} - tls11SignatureSchemesECDSA = tls11SignatureSchemes[:3] - tls11SignatureSchemesRSA = tls11SignatureSchemes[3:] -) - -// certificateRequestInfoFromMsg generates a CertificateRequestInfo from a TLS -// <= 1.2 CertificateRequest, making an effort to fill in missing information. -func certificateRequestInfoFromMsg(certReq *certificateRequestMsg) *CertificateRequestInfo { - var rsaAvail, ecdsaAvail bool - for _, certType := range certReq.certificateTypes { - switch certType { - case certTypeRSASign: - rsaAvail = true - case certTypeECDSASign: - ecdsaAvail = true - } - } - - cri := &CertificateRequestInfo{ - AcceptableCAs: certReq.certificateAuthorities, - } - - if !certReq.hasSignatureAlgorithm { - // Prior to TLS 1.2, the signature schemes were not - // included in the certificate request message. In this - // case we use a plausible list based on the acceptable - // certificate types. - switch { - case rsaAvail && ecdsaAvail: - cri.SignatureSchemes = tls11SignatureSchemes - case rsaAvail: - cri.SignatureSchemes = tls11SignatureSchemesRSA - case ecdsaAvail: - cri.SignatureSchemes = tls11SignatureSchemesECDSA - } - return cri - } - - // In TLS 1.2, the signature schemes apply to both the certificate chain and - // the leaf key, while the certificate types only apply to the leaf key. - // See RFC 5246, Section 7.4.4 (where it calls this "somewhat complicated"). - // Filter the signature schemes based on the certificate type. - cri.SignatureSchemes = make([]SignatureScheme, 0, len(certReq.supportedSignatureAlgorithms)) - for _, sigScheme := range certReq.supportedSignatureAlgorithms { - switch signatureFromSignatureScheme(sigScheme) { - case signatureECDSA: - if ecdsaAvail { - cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme) - } - case signatureRSAPSS, signaturePKCS1v15: - if rsaAvail { - cri.SignatureSchemes = append(cri.SignatureSchemes, sigScheme) - } - } - } - - return cri -} - -func (c *Conn) getClientCertificate(cri *CertificateRequestInfo) (*Certificate, error) { - if c.config.GetClientCertificate != nil { - return c.config.GetClientCertificate(cri) - } - - // We need to search our list of client certs for one - // where SignatureAlgorithm is acceptable to the server and the - // Issuer is in AcceptableCAs. - for i, chain := range c.config.Certificates { - sigOK := false - for _, alg := range signatureSchemesForCertificate(c.vers, &chain) { - if isSupportedSignatureAlgorithm(alg, cri.SignatureSchemes) { - sigOK = true - break - } - } - if !sigOK { - continue - } - - if len(cri.AcceptableCAs) == 0 { - return &chain, nil - } - - for j, cert := range chain.Certificate { - x509Cert := chain.Leaf - // Parse the certificate if this isn't the leaf node, or if - // chain.Leaf was nil. - if j != 0 || x509Cert == nil { - var err error - if x509Cert, err = x509.ParseCertificate(cert); err != nil { - c.sendAlert(alertInternalError) - return nil, errors.New("tls: failed to parse configured certificate chain #" + strconv.Itoa(i) + ": " + err.Error()) - } - } - - for _, ca := range cri.AcceptableCAs { - if bytes.Equal(x509Cert.RawIssuer, ca) { - return &chain, nil - } - } - } - } - - // No acceptable certificate found. Don't send a certificate. - return new(Certificate), nil -} - -// clientSessionCacheKey returns a key used to cache sessionTickets that could -// be used to resume previously negotiated TLS sessions with a server. -func clientSessionCacheKey(serverAddr net.Addr, config *Config) string { - if len(config.ServerName) > 0 { - return config.ServerName - } - return serverAddr.String() -} - -// mutualProtocol finds the mutual Next Protocol Negotiation or ALPN protocol -// given list of possible protocols and a list of the preference order. The -// first list must not be empty. It returns the resulting protocol and flag -// indicating if the fallback case was reached. -func mutualProtocol(protos, preferenceProtos []string) (string, bool) { - for _, s := range preferenceProtos { - for _, c := range protos { - if s == c { - return s, false - } - } - } - - return protos[0], true -} - -// hostnameInSNI converts name into an approriate hostname for SNI. -// Literal IP addresses and absolute FQDNs are not permitted as SNI values. -// See RFC 6066, Section 3. -func hostnameInSNI(name string) string { - host := name - if len(host) > 0 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - if i := strings.LastIndex(host, "%"); i > 0 { - host = host[:i] - } - if net.ParseIP(host) != nil { - return "" - } - for len(name) > 0 && name[len(name)-1] == '.' { - name = name[:len(name)-1] - } - return name -} diff --git a/vendor/github.com/marten-seemann/qtls/handshake_client_tls13.go b/vendor/github.com/marten-seemann/qtls/handshake_client_tls13.go deleted file mode 100644 index 9d8e0f7be..000000000 --- a/vendor/github.com/marten-seemann/qtls/handshake_client_tls13.go +++ /dev/null @@ -1,680 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "bytes" - "crypto" - "crypto/hmac" - "crypto/rsa" - "errors" - "hash" - "sync/atomic" - "time" -) - -type clientHandshakeStateTLS13 struct { - c *Conn - serverHello *serverHelloMsg - hello *clientHelloMsg - ecdheParams ecdheParameters - - session *ClientSessionState - earlySecret []byte - binderKey []byte - - certReq *certificateRequestMsgTLS13 - usingPSK bool - sentDummyCCS bool - suite *cipherSuiteTLS13 - transcript hash.Hash - masterSecret []byte - trafficSecret []byte // client_application_traffic_secret_0 -} - -// handshake requires hs.c, hs.hello, hs.serverHello, hs.ecdheParams, and, -// optionally, hs.session, hs.earlySecret and hs.binderKey to be set. -func (hs *clientHandshakeStateTLS13) handshake() error { - c := hs.c - - // The server must not select TLS 1.3 in a renegotiation. See RFC 8446, - // sections 4.1.2 and 4.1.3. - if c.handshakes > 0 { - c.sendAlert(alertProtocolVersion) - return errors.New("tls: server selected TLS 1.3 in a renegotiation") - } - - // Consistency check on the presence of a keyShare and its parameters. - if hs.ecdheParams == nil || len(hs.hello.keyShares) != 1 { - return c.sendAlert(alertInternalError) - } - - if err := hs.checkServerHelloOrHRR(); err != nil { - return err - } - - hs.transcript = hs.suite.hash.New() - hs.transcript.Write(hs.hello.marshal()) - - if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) { - if err := hs.sendDummyChangeCipherSpec(); err != nil { - return err - } - if err := hs.processHelloRetryRequest(); err != nil { - return err - } - } - - hs.transcript.Write(hs.serverHello.marshal()) - - c.buffering = true - if err := hs.processServerHello(); err != nil { - return err - } - if err := hs.sendDummyChangeCipherSpec(); err != nil { - return err - } - if err := hs.establishHandshakeKeys(); err != nil { - return err - } - if err := hs.readServerParameters(); err != nil { - return err - } - if err := hs.readServerCertificate(); err != nil { - return err - } - if err := hs.readServerFinished(); err != nil { - return err - } - if err := hs.sendClientCertificate(); err != nil { - return err - } - if err := hs.sendClientFinished(); err != nil { - return err - } - if _, err := c.flush(); err != nil { - return err - } - - atomic.StoreUint32(&c.handshakeStatus, 1) - - return nil -} - -// checkServerHelloOrHRR does validity checks that apply to both ServerHello and -// HelloRetryRequest messages. It sets hs.suite. -func (hs *clientHandshakeStateTLS13) checkServerHelloOrHRR() error { - c := hs.c - - if hs.serverHello.supportedVersion == 0 { - c.sendAlert(alertMissingExtension) - return errors.New("tls: server selected TLS 1.3 using the legacy version field") - } - - if hs.serverHello.supportedVersion != VersionTLS13 { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server selected an invalid version after a HelloRetryRequest") - } - - if hs.serverHello.vers != VersionTLS12 { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server sent an incorrect legacy version") - } - - if hs.serverHello.nextProtoNeg || - len(hs.serverHello.nextProtos) != 0 || - hs.serverHello.ocspStapling || - hs.serverHello.ticketSupported || - hs.serverHello.secureRenegotiationSupported || - len(hs.serverHello.secureRenegotiation) != 0 || - len(hs.serverHello.alpnProtocol) != 0 || - len(hs.serverHello.scts) != 0 { - c.sendAlert(alertUnsupportedExtension) - return errors.New("tls: server sent a ServerHello extension forbidden in TLS 1.3") - } - - if !bytes.Equal(hs.hello.sessionId, hs.serverHello.sessionId) { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server did not echo the legacy session ID") - } - - if hs.serverHello.compressionMethod != compressionNone { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server selected unsupported compression format") - } - - selectedSuite := mutualCipherSuiteTLS13(hs.hello.cipherSuites, hs.serverHello.cipherSuite) - if hs.suite != nil && selectedSuite != hs.suite { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server changed cipher suite after a HelloRetryRequest") - } - if selectedSuite == nil { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server chose an unconfigured cipher suite") - } - hs.suite = selectedSuite - c.cipherSuite = hs.suite.id - - return nil -} - -// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility -// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4. -func (hs *clientHandshakeStateTLS13) sendDummyChangeCipherSpec() error { - if hs.sentDummyCCS { - return nil - } - hs.sentDummyCCS = true - - _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - return err -} - -// processHelloRetryRequest handles the HRR in hs.serverHello, modifies and -// resends hs.hello, and reads the new ServerHello into hs.serverHello. -func (hs *clientHandshakeStateTLS13) processHelloRetryRequest() error { - c := hs.c - - // The first ClientHello gets double-hashed into the transcript upon a - // HelloRetryRequest. See RFC 8446, Section 4.4.1. - chHash := hs.transcript.Sum(nil) - hs.transcript.Reset() - hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) - hs.transcript.Write(chHash) - hs.transcript.Write(hs.serverHello.marshal()) - - if hs.serverHello.serverShare.group != 0 { - c.sendAlert(alertDecodeError) - return errors.New("tls: received malformed key_share extension") - } - - curveID := hs.serverHello.selectedGroup - if curveID == 0 { - c.sendAlert(alertMissingExtension) - return errors.New("tls: received HelloRetryRequest without selected group") - } - curveOK := false - for _, id := range hs.hello.supportedCurves { - if id == curveID { - curveOK = true - break - } - } - if !curveOK { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server selected unsupported group") - } - if hs.ecdheParams.CurveID() == curveID { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server sent an unnecessary HelloRetryRequest message") - } - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { - c.sendAlert(alertInternalError) - return errors.New("tls: CurvePreferences includes unsupported curve") - } - params, err := generateECDHEParameters(c.config.rand(), curveID) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - hs.ecdheParams = params - hs.hello.keyShares = []keyShare{{group: curveID, data: params.PublicKey()}} - - hs.hello.cookie = hs.serverHello.cookie - - hs.hello.raw = nil - if len(hs.hello.pskIdentities) > 0 { - pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite) - if pskSuite == nil { - return c.sendAlert(alertInternalError) - } - if pskSuite.hash == hs.suite.hash { - // Update binders and obfuscated_ticket_age. - ticketAge := uint32(c.config.time().Sub(hs.session.receivedAt) / time.Millisecond) - hs.hello.pskIdentities[0].obfuscatedTicketAge = ticketAge + hs.session.ageAdd - - transcript := hs.suite.hash.New() - transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) - transcript.Write(chHash) - transcript.Write(hs.serverHello.marshal()) - transcript.Write(hs.hello.marshalWithoutBinders()) - pskBinders := [][]byte{hs.suite.finishedHash(hs.binderKey, transcript)} - hs.hello.updateBinders(pskBinders) - } else { - // Server selected a cipher suite incompatible with the PSK. - hs.hello.pskIdentities = nil - hs.hello.pskBinders = nil - } - } - - hs.transcript.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { - return err - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - - serverHello, ok := msg.(*serverHelloMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(serverHello, msg) - } - hs.serverHello = serverHello - - if err := hs.checkServerHelloOrHRR(); err != nil { - return err - } - - return nil -} - -func (hs *clientHandshakeStateTLS13) processServerHello() error { - c := hs.c - - if bytes.Equal(hs.serverHello.random, helloRetryRequestRandom) { - c.sendAlert(alertUnexpectedMessage) - return errors.New("tls: server sent two HelloRetryRequest messages") - } - - if len(hs.serverHello.cookie) != 0 { - c.sendAlert(alertUnsupportedExtension) - return errors.New("tls: server sent a cookie in a normal ServerHello") - } - - if hs.serverHello.selectedGroup != 0 { - c.sendAlert(alertDecodeError) - return errors.New("tls: malformed key_share extension") - } - - if hs.serverHello.serverShare.group == 0 { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server did not send a key share") - } - if hs.serverHello.serverShare.group != hs.ecdheParams.CurveID() { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server selected unsupported group") - } - - if !hs.serverHello.selectedIdentityPresent { - return nil - } - - if int(hs.serverHello.selectedIdentity) >= len(hs.hello.pskIdentities) { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server selected an invalid PSK") - } - - if len(hs.hello.pskIdentities) != 1 || hs.session == nil { - return c.sendAlert(alertInternalError) - } - pskSuite := cipherSuiteTLS13ByID(hs.session.cipherSuite) - if pskSuite == nil { - return c.sendAlert(alertInternalError) - } - if pskSuite.hash != hs.suite.hash { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: server selected an invalid PSK and cipher suite pair") - } - - hs.usingPSK = true - c.didResume = true - c.peerCertificates = hs.session.serverCertificates - c.verifiedChains = hs.session.verifiedChains - return nil -} - -func (hs *clientHandshakeStateTLS13) establishHandshakeKeys() error { - c := hs.c - - sharedKey := hs.ecdheParams.SharedKey(hs.serverHello.serverShare.data) - if sharedKey == nil { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: invalid server key share") - } - - earlySecret := hs.earlySecret - if !hs.usingPSK { - earlySecret = hs.suite.extract(nil, nil) - } - handshakeSecret := hs.suite.extract(sharedKey, - hs.suite.deriveSecret(earlySecret, "derived", nil)) - - clientSecret := hs.suite.deriveSecret(handshakeSecret, - clientHandshakeTrafficLabel, hs.transcript) - c.out.exportKey(hs.suite, clientSecret) - c.out.setTrafficSecret(hs.suite, clientSecret) - serverSecret := hs.suite.deriveSecret(handshakeSecret, - serverHandshakeTrafficLabel, hs.transcript) - c.in.exportKey(hs.suite, serverSecret) - c.in.setTrafficSecret(hs.suite, serverSecret) - - err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.hello.random, clientSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.hello.random, serverSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - - hs.masterSecret = hs.suite.extract(nil, - hs.suite.deriveSecret(handshakeSecret, "derived", nil)) - - return nil -} - -func (hs *clientHandshakeStateTLS13) readServerParameters() error { - c := hs.c - - msg, err := c.readHandshake() - if err != nil { - return err - } - - encryptedExtensions, ok := msg.(*encryptedExtensionsMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(encryptedExtensions, msg) - } - if hs.c.config.ReceivedExtensions != nil { - hs.c.config.ReceivedExtensions(typeEncryptedExtensions, encryptedExtensions.additionalExtensions) - } - hs.transcript.Write(encryptedExtensions.marshal()) - - if len(encryptedExtensions.alpnProtocol) != 0 && len(hs.hello.alpnProtocols) == 0 { - c.sendAlert(alertUnsupportedExtension) - return errors.New("tls: server advertised unrequested ALPN extension") - } - c.clientProtocol = encryptedExtensions.alpnProtocol - - return nil -} - -func (hs *clientHandshakeStateTLS13) readServerCertificate() error { - c := hs.c - - // Either a PSK or a certificate is always used, but not both. - // See RFC 8446, Section 4.1.1. - if hs.usingPSK { - return nil - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - - certReq, ok := msg.(*certificateRequestMsgTLS13) - if ok { - hs.transcript.Write(certReq.marshal()) - - hs.certReq = certReq - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - certMsg, ok := msg.(*certificateMsgTLS13) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(certMsg, msg) - } - if len(certMsg.certificate.Certificate) == 0 { - c.sendAlert(alertDecodeError) - return errors.New("tls: received empty certificates message") - } - hs.transcript.Write(certMsg.marshal()) - - c.scts = certMsg.certificate.SignedCertificateTimestamps - c.ocspResponse = certMsg.certificate.OCSPStaple - - if err := c.verifyServerCertificate(certMsg.certificate.Certificate); err != nil { - return err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - - certVerify, ok := msg.(*certificateVerifyMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(certVerify, msg) - } - - // See RFC 8446, Section 4.4.3. - if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: invalid certificate signature algorithm") - } - sigType := signatureFromSignatureScheme(certVerify.signatureAlgorithm) - sigHash, err := hashFromSignatureScheme(certVerify.signatureAlgorithm) - if sigType == 0 || err != nil { - c.sendAlert(alertInternalError) - return err - } - if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: invalid certificate signature algorithm") - } - h := sigHash.New() - writeSignedMessage(h, serverSignatureContext, hs.transcript) - if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey, - sigHash, h.Sum(nil), certVerify.signature); err != nil { - c.sendAlert(alertDecryptError) - return errors.New("tls: invalid certificate signature") - } - - hs.transcript.Write(certVerify.marshal()) - - return nil -} - -func (hs *clientHandshakeStateTLS13) readServerFinished() error { - c := hs.c - - msg, err := c.readHandshake() - if err != nil { - return err - } - - finished, ok := msg.(*finishedMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(finished, msg) - } - - expectedMAC := hs.suite.finishedHash(c.in.trafficSecret, hs.transcript) - if !hmac.Equal(expectedMAC, finished.verifyData) { - c.sendAlert(alertDecryptError) - return errors.New("tls: invalid server finished hash") - } - - hs.transcript.Write(finished.marshal()) - - // Derive secrets that take context through the server Finished. - - hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret, - clientApplicationTrafficLabel, hs.transcript) - serverSecret := hs.suite.deriveSecret(hs.masterSecret, - serverApplicationTrafficLabel, hs.transcript) - c.in.exportKey(hs.suite, serverSecret) - c.in.setTrafficSecret(hs.suite, serverSecret) - - err = c.config.writeKeyLog(keyLogLabelClientTraffic, hs.hello.random, hs.trafficSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.hello.random, serverSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - - c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript) - - return nil -} - -func (hs *clientHandshakeStateTLS13) sendClientCertificate() error { - c := hs.c - - if hs.certReq == nil { - return nil - } - - cert, err := c.getClientCertificate(&CertificateRequestInfo{ - AcceptableCAs: hs.certReq.certificateAuthorities, - SignatureSchemes: hs.certReq.supportedSignatureAlgorithms, - }) - if err != nil { - return err - } - - certMsg := new(certificateMsgTLS13) - - certMsg.certificate = *cert - certMsg.scts = hs.certReq.scts && len(cert.SignedCertificateTimestamps) > 0 - certMsg.ocspStapling = hs.certReq.ocspStapling && len(cert.OCSPStaple) > 0 - - hs.transcript.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { - return err - } - - // If we sent an empty certificate message, skip the CertificateVerify. - if len(cert.Certificate) == 0 { - return nil - } - - certVerifyMsg := new(certificateVerifyMsg) - certVerifyMsg.hasSignatureAlgorithm = true - - supportedAlgs := signatureSchemesForCertificate(c.vers, cert) - if supportedAlgs == nil { - c.sendAlert(alertInternalError) - return unsupportedCertificateError(cert) - } - // Pick signature scheme in server preference order, as the client - // preference order is not configurable. - for _, preferredAlg := range hs.certReq.supportedSignatureAlgorithms { - if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) { - certVerifyMsg.signatureAlgorithm = preferredAlg - break - } - } - if certVerifyMsg.signatureAlgorithm == 0 { - // getClientCertificate returned a certificate incompatible with the - // CertificateRequestInfo supported signature algorithms. - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: server doesn't support selected certificate") - } - - sigType := signatureFromSignatureScheme(certVerifyMsg.signatureAlgorithm) - sigHash, err := hashFromSignatureScheme(certVerifyMsg.signatureAlgorithm) - if sigType == 0 || err != nil { - return c.sendAlert(alertInternalError) - } - h := sigHash.New() - writeSignedMessage(h, clientSignatureContext, hs.transcript) - - signOpts := crypto.SignerOpts(sigHash) - if sigType == signatureRSAPSS { - signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash} - } - sig, err := cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), h.Sum(nil), signOpts) - if err != nil { - c.sendAlert(alertInternalError) - return errors.New("tls: failed to sign handshake: " + err.Error()) - } - certVerifyMsg.signature = sig - - hs.transcript.Write(certVerifyMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil { - return err - } - - return nil -} - -func (hs *clientHandshakeStateTLS13) sendClientFinished() error { - c := hs.c - - finished := &finishedMsg{ - verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript), - } - - hs.transcript.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { - return err - } - - c.out.exportKey(hs.suite, hs.trafficSecret) - c.out.setTrafficSecret(hs.suite, hs.trafficSecret) - - if !c.config.SessionTicketsDisabled && c.config.ClientSessionCache != nil { - c.resumptionSecret = hs.suite.deriveSecret(hs.masterSecret, - resumptionLabel, hs.transcript) - } - - return nil -} - -func (c *Conn) handleNewSessionTicket(msg *newSessionTicketMsgTLS13) error { - if !c.isClient { - c.sendAlert(alertUnexpectedMessage) - return errors.New("tls: received new session ticket from a client") - } - - if c.config.SessionTicketsDisabled || c.config.ClientSessionCache == nil { - return nil - } - - // See RFC 8446, Section 4.6.1. - if msg.lifetime == 0 { - return nil - } - lifetime := time.Duration(msg.lifetime) * time.Second - if lifetime > maxSessionTicketLifetime { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: received a session ticket with invalid lifetime") - } - - cipherSuite := cipherSuiteTLS13ByID(c.cipherSuite) - if cipherSuite == nil || c.resumptionSecret == nil { - return c.sendAlert(alertInternalError) - } - - // Save the resumption_master_secret and nonce instead of deriving the PSK - // to do the least amount of work on NewSessionTicket messages before we - // know if the ticket will be used. Forward secrecy of resumed connections - // is guaranteed by the requirement for pskModeDHE. - session := &ClientSessionState{ - sessionTicket: msg.label, - vers: c.vers, - cipherSuite: c.cipherSuite, - masterSecret: c.resumptionSecret, - serverCertificates: c.peerCertificates, - verifiedChains: c.verifiedChains, - receivedAt: c.config.time(), - nonce: msg.nonce, - useBy: c.config.time().Add(lifetime), - ageAdd: msg.ageAdd, - } - - cacheKey := clientSessionCacheKey(c.conn.RemoteAddr(), c.config) - c.config.ClientSessionCache.Put(cacheKey, session) - - return nil -} diff --git a/vendor/github.com/marten-seemann/qtls/handshake_messages.go b/vendor/github.com/marten-seemann/qtls/handshake_messages.go deleted file mode 100644 index 9c2ffa1d8..000000000 --- a/vendor/github.com/marten-seemann/qtls/handshake_messages.go +++ /dev/null @@ -1,1900 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "fmt" - "strings" - - "golang.org/x/crypto/cryptobyte" -) - -// The marshalingFunction type is an adapter to allow the use of ordinary -// functions as cryptobyte.MarshalingValue. -type marshalingFunction func(b *cryptobyte.Builder) error - -func (f marshalingFunction) Marshal(b *cryptobyte.Builder) error { - return f(b) -} - -// addBytesWithLength appends a sequence of bytes to the cryptobyte.Builder. If -// the length of the sequence is not the value specified, it produces an error. -func addBytesWithLength(b *cryptobyte.Builder, v []byte, n int) { - b.AddValue(marshalingFunction(func(b *cryptobyte.Builder) error { - if len(v) != n { - return fmt.Errorf("invalid value length: expected %d, got %d", n, len(v)) - } - b.AddBytes(v) - return nil - })) -} - -// addUint64 appends a big-endian, 64-bit value to the cryptobyte.Builder. -func addUint64(b *cryptobyte.Builder, v uint64) { - b.AddUint32(uint32(v >> 32)) - b.AddUint32(uint32(v)) -} - -// readUint64 decodes a big-endian, 64-bit value into out and advances over it. -// It reports whether the read was successful. -func readUint64(s *cryptobyte.String, out *uint64) bool { - var hi, lo uint32 - if !s.ReadUint32(&hi) || !s.ReadUint32(&lo) { - return false - } - *out = uint64(hi)<<32 | uint64(lo) - return true -} - -// readUint8LengthPrefixed acts like s.ReadUint8LengthPrefixed, but targets a -// []byte instead of a cryptobyte.String. -func readUint8LengthPrefixed(s *cryptobyte.String, out *[]byte) bool { - return s.ReadUint8LengthPrefixed((*cryptobyte.String)(out)) -} - -// readUint16LengthPrefixed acts like s.ReadUint16LengthPrefixed, but targets a -// []byte instead of a cryptobyte.String. -func readUint16LengthPrefixed(s *cryptobyte.String, out *[]byte) bool { - return s.ReadUint16LengthPrefixed((*cryptobyte.String)(out)) -} - -// readUint24LengthPrefixed acts like s.ReadUint24LengthPrefixed, but targets a -// []byte instead of a cryptobyte.String. -func readUint24LengthPrefixed(s *cryptobyte.String, out *[]byte) bool { - return s.ReadUint24LengthPrefixed((*cryptobyte.String)(out)) -} - -type clientHelloMsg struct { - raw []byte - vers uint16 - random []byte - sessionId []byte - cipherSuites []uint16 - compressionMethods []uint8 - nextProtoNeg bool - serverName string - ocspStapling bool - supportedCurves []CurveID - supportedPoints []uint8 - ticketSupported bool - sessionTicket []uint8 - supportedSignatureAlgorithms []SignatureScheme - supportedSignatureAlgorithmsCert []SignatureScheme - secureRenegotiationSupported bool - secureRenegotiation []byte - alpnProtocols []string - scts bool - supportedVersions []uint16 - cookie []byte - keyShares []keyShare - earlyData bool - pskModes []uint8 - pskIdentities []pskIdentity - pskBinders [][]byte - additionalExtensions []Extension -} - -func (m *clientHelloMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeClientHello) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.vers) - addBytesWithLength(b, m.random, 32) - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.sessionId) - }) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, suite := range m.cipherSuites { - b.AddUint16(suite) - } - }) - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.compressionMethods) - }) - - // If extensions aren't present, omit them. - var extensionsPresent bool - bWithoutExtensions := *b - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if m.nextProtoNeg { - // draft-agl-tls-nextprotoneg-04 - b.AddUint16(extensionNextProtoNeg) - b.AddUint16(0) // empty extension_data - } - if len(m.serverName) > 0 { - // RFC 6066, Section 3 - b.AddUint16(extensionServerName) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(0) // name_type = host_name - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(m.serverName)) - }) - }) - }) - } - if m.ocspStapling { - // RFC 4366, Section 3.6 - b.AddUint16(extensionStatusRequest) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(1) // status_type = ocsp - b.AddUint16(0) // empty responder_id_list - b.AddUint16(0) // empty request_extensions - }) - } - if len(m.supportedCurves) > 0 { - // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7 - b.AddUint16(extensionSupportedCurves) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, curve := range m.supportedCurves { - b.AddUint16(uint16(curve)) - } - }) - }) - } - if len(m.supportedPoints) > 0 { - // RFC 4492, Section 5.1.2 - b.AddUint16(extensionSupportedPoints) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.supportedPoints) - }) - }) - } - if m.ticketSupported { - // RFC 5077, Section 3.2 - b.AddUint16(extensionSessionTicket) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.sessionTicket) - }) - } - if len(m.supportedSignatureAlgorithms) > 0 { - // RFC 5246, Section 7.4.1.4.1 - b.AddUint16(extensionSignatureAlgorithms) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithms { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if len(m.supportedSignatureAlgorithmsCert) > 0 { - // RFC 8446, Section 4.2.3 - b.AddUint16(extensionSignatureAlgorithmsCert) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithmsCert { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if m.secureRenegotiationSupported { - // RFC 5746, Section 3.2 - b.AddUint16(extensionRenegotiationInfo) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.secureRenegotiation) - }) - }) - } - if len(m.alpnProtocols) > 0 { - // RFC 7301, Section 3.1 - b.AddUint16(extensionALPN) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, proto := range m.alpnProtocols { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(proto)) - }) - } - }) - }) - } - if m.scts { - // RFC 6962, Section 3.3.1 - b.AddUint16(extensionSCT) - b.AddUint16(0) // empty extension_data - } - if len(m.supportedVersions) > 0 { - // RFC 8446, Section 4.2.1 - b.AddUint16(extensionSupportedVersions) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - for _, vers := range m.supportedVersions { - b.AddUint16(vers) - } - }) - }) - } - if len(m.cookie) > 0 { - // RFC 8446, Section 4.2.2 - b.AddUint16(extensionCookie) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.cookie) - }) - }) - } - if len(m.keyShares) > 0 { - // RFC 8446, Section 4.2.8 - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, ks := range m.keyShares { - b.AddUint16(uint16(ks.group)) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ks.data) - }) - } - }) - }) - } - if m.earlyData { - // RFC 8446, Section 4.2.10 - b.AddUint16(extensionEarlyData) - b.AddUint16(0) // empty extension_data - } - if len(m.pskModes) > 0 { - // RFC 8446, Section 4.2.9 - b.AddUint16(extensionPSKModes) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.pskModes) - }) - }) - } - for _, ext := range m.additionalExtensions { - b.AddUint16(ext.Type) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ext.Data) - }) - } - if len(m.pskIdentities) > 0 { // pre_shared_key must be the last extension - // RFC 8446, Section 4.2.11 - b.AddUint16(extensionPreSharedKey) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, psk := range m.pskIdentities { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(psk.label) - }) - b.AddUint32(psk.obfuscatedTicketAge) - } - }) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, binder := range m.pskBinders { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(binder) - }) - } - }) - }) - } - - extensionsPresent = len(b.BytesOrPanic()) > 2 - }) - - if !extensionsPresent { - *b = bWithoutExtensions - } - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -// marshalWithoutBinders returns the ClientHello through the -// PreSharedKeyExtension.identities field, according to RFC 8446, Section -// 4.2.11.2. Note that m.pskBinders must be set to slices of the correct length. -func (m *clientHelloMsg) marshalWithoutBinders() []byte { - bindersLen := 2 // uint16 length prefix - for _, binder := range m.pskBinders { - bindersLen += 1 // uint8 length prefix - bindersLen += len(binder) - } - - fullMessage := m.marshal() - return fullMessage[:len(fullMessage)-bindersLen] -} - -// updateBinders updates the m.pskBinders field, if necessary updating the -// cached marshalled representation. The supplied binders must have the same -// length as the current m.pskBinders. -func (m *clientHelloMsg) updateBinders(pskBinders [][]byte) { - if len(pskBinders) != len(m.pskBinders) { - panic("tls: internal error: pskBinders length mismatch") - } - for i := range m.pskBinders { - if len(pskBinders[i]) != len(m.pskBinders[i]) { - panic("tls: internal error: pskBinders length mismatch") - } - } - m.pskBinders = pskBinders - if m.raw != nil { - lenWithoutBinders := len(m.marshalWithoutBinders()) - // TODO(filippo): replace with NewFixedBuilder once CL 148882 is imported. - b := cryptobyte.NewBuilder(m.raw[:lenWithoutBinders]) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, binder := range m.pskBinders { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(binder) - }) - } - }) - if len(b.BytesOrPanic()) != len(m.raw) { - panic("tls: internal error: failed to update binders") - } - } -} - -func (m *clientHelloMsg) unmarshal(data []byte) bool { - *m = clientHelloMsg{raw: data} - s := cryptobyte.String(data) - - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) || - !readUint8LengthPrefixed(&s, &m.sessionId) { - return false - } - - var cipherSuites cryptobyte.String - if !s.ReadUint16LengthPrefixed(&cipherSuites) { - return false - } - m.cipherSuites = []uint16{} - m.secureRenegotiationSupported = false - for !cipherSuites.Empty() { - var suite uint16 - if !cipherSuites.ReadUint16(&suite) { - return false - } - if suite == scsvRenegotiation { - m.secureRenegotiationSupported = true - } - m.cipherSuites = append(m.cipherSuites, suite) - } - - if !readUint8LengthPrefixed(&s, &m.compressionMethods) { - return false - } - - if s.Empty() { - // ClientHello is optionally followed by extension data - return true - } - - var extensions cryptobyte.String - if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() { - return false - } - - for !extensions.Empty() { - var ext uint16 - var extData cryptobyte.String - if !extensions.ReadUint16(&ext) || - !extensions.ReadUint16LengthPrefixed(&extData) { - return false - } - - switch ext { - case extensionServerName: - // RFC 6066, Section 3 - var nameList cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() { - return false - } - for !nameList.Empty() { - var nameType uint8 - var serverName cryptobyte.String - if !nameList.ReadUint8(&nameType) || - !nameList.ReadUint16LengthPrefixed(&serverName) || - serverName.Empty() { - return false - } - if nameType != 0 { - continue - } - if len(m.serverName) != 0 { - // Multiple names of the same name_type are prohibited. - return false - } - m.serverName = string(serverName) - // An SNI value may not include a trailing dot. - if strings.HasSuffix(m.serverName, ".") { - return false - } - } - case extensionNextProtoNeg: - // draft-agl-tls-nextprotoneg-04 - m.nextProtoNeg = true - case extensionStatusRequest: - // RFC 4366, Section 3.6 - var statusType uint8 - var ignored cryptobyte.String - if !extData.ReadUint8(&statusType) || - !extData.ReadUint16LengthPrefixed(&ignored) || - !extData.ReadUint16LengthPrefixed(&ignored) { - return false - } - m.ocspStapling = statusType == statusTypeOCSP - case extensionSupportedCurves: - // RFC 4492, sections 5.1.1 and RFC 8446, Section 4.2.7 - var curves cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&curves) || curves.Empty() { - return false - } - for !curves.Empty() { - var curve uint16 - if !curves.ReadUint16(&curve) { - return false - } - m.supportedCurves = append(m.supportedCurves, CurveID(curve)) - } - case extensionSupportedPoints: - // RFC 4492, Section 5.1.2 - if !readUint8LengthPrefixed(&extData, &m.supportedPoints) || - len(m.supportedPoints) == 0 { - return false - } - case extensionSessionTicket: - // RFC 5077, Section 3.2 - m.ticketSupported = true - extData.ReadBytes(&m.sessionTicket, len(extData)) - case extensionSignatureAlgorithms: - // RFC 5246, Section 7.4.1.4.1 - var sigAndAlgs cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() { - return false - } - for !sigAndAlgs.Empty() { - var sigAndAlg uint16 - if !sigAndAlgs.ReadUint16(&sigAndAlg) { - return false - } - m.supportedSignatureAlgorithms = append( - m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg)) - } - case extensionSignatureAlgorithmsCert: - // RFC 8446, Section 4.2.3 - var sigAndAlgs cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() { - return false - } - for !sigAndAlgs.Empty() { - var sigAndAlg uint16 - if !sigAndAlgs.ReadUint16(&sigAndAlg) { - return false - } - m.supportedSignatureAlgorithmsCert = append( - m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg)) - } - case extensionRenegotiationInfo: - // RFC 5746, Section 3.2 - if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) { - return false - } - m.secureRenegotiationSupported = true - case extensionALPN: - // RFC 7301, Section 3.1 - var protoList cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() { - return false - } - for !protoList.Empty() { - var proto cryptobyte.String - if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() { - return false - } - m.alpnProtocols = append(m.alpnProtocols, string(proto)) - } - case extensionSCT: - // RFC 6962, Section 3.3.1 - m.scts = true - case extensionSupportedVersions: - // RFC 8446, Section 4.2.1 - var versList cryptobyte.String - if !extData.ReadUint8LengthPrefixed(&versList) || versList.Empty() { - return false - } - for !versList.Empty() { - var vers uint16 - if !versList.ReadUint16(&vers) { - return false - } - m.supportedVersions = append(m.supportedVersions, vers) - } - case extensionCookie: - // RFC 8446, Section 4.2.2 - if !readUint16LengthPrefixed(&extData, &m.cookie) || - len(m.cookie) == 0 { - return false - } - case extensionKeyShare: - // RFC 8446, Section 4.2.8 - var clientShares cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&clientShares) { - return false - } - for !clientShares.Empty() { - var ks keyShare - if !clientShares.ReadUint16((*uint16)(&ks.group)) || - !readUint16LengthPrefixed(&clientShares, &ks.data) || - len(ks.data) == 0 { - return false - } - m.keyShares = append(m.keyShares, ks) - } - case extensionEarlyData: - // RFC 8446, Section 4.2.10 - m.earlyData = true - case extensionPSKModes: - // RFC 8446, Section 4.2.9 - if !readUint8LengthPrefixed(&extData, &m.pskModes) { - return false - } - case extensionPreSharedKey: - // RFC 8446, Section 4.2.11 - if !extensions.Empty() { - return false // pre_shared_key must be the last extension - } - var identities cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&identities) || identities.Empty() { - return false - } - for !identities.Empty() { - var psk pskIdentity - if !readUint16LengthPrefixed(&identities, &psk.label) || - !identities.ReadUint32(&psk.obfuscatedTicketAge) || - len(psk.label) == 0 { - return false - } - m.pskIdentities = append(m.pskIdentities, psk) - } - var binders cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&binders) || binders.Empty() { - return false - } - for !binders.Empty() { - var binder []byte - if !readUint8LengthPrefixed(&binders, &binder) || - len(binder) == 0 { - return false - } - m.pskBinders = append(m.pskBinders, binder) - } - default: - m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData}) - continue - } - - if !extData.Empty() { - return false - } - } - - return true -} - -type serverHelloMsg struct { - raw []byte - vers uint16 - random []byte - sessionId []byte - cipherSuite uint16 - compressionMethod uint8 - nextProtoNeg bool - nextProtos []string - ocspStapling bool - ticketSupported bool - secureRenegotiationSupported bool - secureRenegotiation []byte - alpnProtocol string - scts [][]byte - supportedVersion uint16 - serverShare keyShare - selectedIdentityPresent bool - selectedIdentity uint16 - - // HelloRetryRequest extensions - cookie []byte - selectedGroup CurveID -} - -func (m *serverHelloMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeServerHello) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.vers) - addBytesWithLength(b, m.random, 32) - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.sessionId) - }) - b.AddUint16(m.cipherSuite) - b.AddUint8(m.compressionMethod) - - // If extensions aren't present, omit them. - var extensionsPresent bool - bWithoutExtensions := *b - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if m.nextProtoNeg { - b.AddUint16(extensionNextProtoNeg) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, proto := range m.nextProtos { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(proto)) - }) - } - }) - } - if m.ocspStapling { - b.AddUint16(extensionStatusRequest) - b.AddUint16(0) // empty extension_data - } - if m.ticketSupported { - b.AddUint16(extensionSessionTicket) - b.AddUint16(0) // empty extension_data - } - if m.secureRenegotiationSupported { - b.AddUint16(extensionRenegotiationInfo) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.secureRenegotiation) - }) - }) - } - if len(m.alpnProtocol) > 0 { - b.AddUint16(extensionALPN) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(m.alpnProtocol)) - }) - }) - }) - } - if len(m.scts) > 0 { - b.AddUint16(extensionSCT) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sct := range m.scts { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(sct) - }) - } - }) - }) - } - if m.supportedVersion != 0 { - b.AddUint16(extensionSupportedVersions) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.supportedVersion) - }) - } - if m.serverShare.group != 0 { - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(uint16(m.serverShare.group)) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.serverShare.data) - }) - }) - } - if m.selectedIdentityPresent { - b.AddUint16(extensionPreSharedKey) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(m.selectedIdentity) - }) - } - - if len(m.cookie) > 0 { - b.AddUint16(extensionCookie) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.cookie) - }) - }) - } - if m.selectedGroup != 0 { - b.AddUint16(extensionKeyShare) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16(uint16(m.selectedGroup)) - }) - } - - extensionsPresent = len(b.BytesOrPanic()) > 2 - }) - - if !extensionsPresent { - *b = bWithoutExtensions - } - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *serverHelloMsg) unmarshal(data []byte) bool { - *m = serverHelloMsg{raw: data} - s := cryptobyte.String(data) - - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint16(&m.vers) || !s.ReadBytes(&m.random, 32) || - !readUint8LengthPrefixed(&s, &m.sessionId) || - !s.ReadUint16(&m.cipherSuite) || - !s.ReadUint8(&m.compressionMethod) { - return false - } - - if s.Empty() { - // ServerHello is optionally followed by extension data - return true - } - - var extensions cryptobyte.String - if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() { - return false - } - - for !extensions.Empty() { - var extension uint16 - var extData cryptobyte.String - if !extensions.ReadUint16(&extension) || - !extensions.ReadUint16LengthPrefixed(&extData) { - return false - } - - switch extension { - case extensionNextProtoNeg: - m.nextProtoNeg = true - for !extData.Empty() { - var proto cryptobyte.String - if !extData.ReadUint8LengthPrefixed(&proto) || - proto.Empty() { - return false - } - m.nextProtos = append(m.nextProtos, string(proto)) - } - case extensionStatusRequest: - m.ocspStapling = true - case extensionSessionTicket: - m.ticketSupported = true - case extensionRenegotiationInfo: - if !readUint8LengthPrefixed(&extData, &m.secureRenegotiation) { - return false - } - m.secureRenegotiationSupported = true - case extensionALPN: - var protoList cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() { - return false - } - var proto cryptobyte.String - if !protoList.ReadUint8LengthPrefixed(&proto) || - proto.Empty() || !protoList.Empty() { - return false - } - m.alpnProtocol = string(proto) - case extensionSCT: - var sctList cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() { - return false - } - for !sctList.Empty() { - var sct []byte - if !readUint16LengthPrefixed(&sctList, &sct) || - len(sct) == 0 { - return false - } - m.scts = append(m.scts, sct) - } - case extensionSupportedVersions: - if !extData.ReadUint16(&m.supportedVersion) { - return false - } - case extensionCookie: - if !readUint16LengthPrefixed(&extData, &m.cookie) || - len(m.cookie) == 0 { - return false - } - case extensionKeyShare: - // This extension has different formats in SH and HRR, accept either - // and let the handshake logic decide. See RFC 8446, Section 4.2.8. - if len(extData) == 2 { - if !extData.ReadUint16((*uint16)(&m.selectedGroup)) { - return false - } - } else { - if !extData.ReadUint16((*uint16)(&m.serverShare.group)) || - !readUint16LengthPrefixed(&extData, &m.serverShare.data) { - return false - } - } - case extensionPreSharedKey: - m.selectedIdentityPresent = true - if !extData.ReadUint16(&m.selectedIdentity) { - return false - } - default: - // Ignore unknown extensions. - continue - } - - if !extData.Empty() { - return false - } - } - - return true -} - -type encryptedExtensionsMsg struct { - raw []byte - alpnProtocol string - - additionalExtensions []Extension -} - -func (m *encryptedExtensionsMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeEncryptedExtensions) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if len(m.alpnProtocol) > 0 { - b.AddUint16(extensionALPN) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte(m.alpnProtocol)) - }) - }) - }) - } - for _, ext := range m.additionalExtensions { - b.AddUint16(ext.Type) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ext.Data) - }) - } - }) - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *encryptedExtensionsMsg) unmarshal(data []byte) bool { - *m = encryptedExtensionsMsg{raw: data} - s := cryptobyte.String(data) - - var extensions cryptobyte.String - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() { - return false - } - - for !extensions.Empty() { - var ext uint16 - var extData cryptobyte.String - if !extensions.ReadUint16(&ext) || - !extensions.ReadUint16LengthPrefixed(&extData) { - return false - } - - switch ext { - case extensionALPN: - var protoList cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() { - return false - } - var proto cryptobyte.String - if !protoList.ReadUint8LengthPrefixed(&proto) || - proto.Empty() || !protoList.Empty() { - return false - } - m.alpnProtocol = string(proto) - default: - m.additionalExtensions = append(m.additionalExtensions, Extension{Type: ext, Data: extData}) - continue - } - - if !extData.Empty() { - return false - } - } - - return true -} - -type endOfEarlyDataMsg struct{} - -func (m *endOfEarlyDataMsg) marshal() []byte { - x := make([]byte, 4) - x[0] = typeEndOfEarlyData - return x -} - -func (m *endOfEarlyDataMsg) unmarshal(data []byte) bool { - return len(data) == 4 -} - -type keyUpdateMsg struct { - raw []byte - updateRequested bool -} - -func (m *keyUpdateMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeKeyUpdate) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - if m.updateRequested { - b.AddUint8(1) - } else { - b.AddUint8(0) - } - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *keyUpdateMsg) unmarshal(data []byte) bool { - m.raw = data - s := cryptobyte.String(data) - - var updateRequested uint8 - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint8(&updateRequested) || !s.Empty() { - return false - } - switch updateRequested { - case 0: - m.updateRequested = false - case 1: - m.updateRequested = true - default: - return false - } - return true -} - -type newSessionTicketMsgTLS13 struct { - raw []byte - lifetime uint32 - ageAdd uint32 - nonce []byte - label []byte - maxEarlyData uint32 -} - -func (m *newSessionTicketMsgTLS13) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeNewSessionTicket) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint32(m.lifetime) - b.AddUint32(m.ageAdd) - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.nonce) - }) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.label) - }) - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if m.maxEarlyData > 0 { - b.AddUint16(extensionEarlyData) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint32(m.maxEarlyData) - }) - } - }) - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *newSessionTicketMsgTLS13) unmarshal(data []byte) bool { - *m = newSessionTicketMsgTLS13{raw: data} - s := cryptobyte.String(data) - - var extensions cryptobyte.String - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint32(&m.lifetime) || - !s.ReadUint32(&m.ageAdd) || - !readUint8LengthPrefixed(&s, &m.nonce) || - !readUint16LengthPrefixed(&s, &m.label) || - !s.ReadUint16LengthPrefixed(&extensions) || - !s.Empty() { - return false - } - - for !extensions.Empty() { - var extension uint16 - var extData cryptobyte.String - if !extensions.ReadUint16(&extension) || - !extensions.ReadUint16LengthPrefixed(&extData) { - return false - } - - switch extension { - case extensionEarlyData: - if !extData.ReadUint32(&m.maxEarlyData) { - return false - } - default: - // Ignore unknown extensions. - continue - } - - if !extData.Empty() { - return false - } - } - - return true -} - -type certificateRequestMsgTLS13 struct { - raw []byte - ocspStapling bool - scts bool - supportedSignatureAlgorithms []SignatureScheme - supportedSignatureAlgorithmsCert []SignatureScheme - certificateAuthorities [][]byte -} - -func (m *certificateRequestMsgTLS13) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeCertificateRequest) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - // certificate_request_context (SHALL be zero length unless used for - // post-handshake authentication) - b.AddUint8(0) - - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if m.ocspStapling { - b.AddUint16(extensionStatusRequest) - b.AddUint16(0) // empty extension_data - } - if m.scts { - // RFC 8446, Section 4.4.2.1 makes no mention of - // signed_certificate_timestamp in CertificateRequest, but - // "Extensions in the Certificate message from the client MUST - // correspond to extensions in the CertificateRequest message - // from the server." and it appears in the table in Section 4.2. - b.AddUint16(extensionSCT) - b.AddUint16(0) // empty extension_data - } - if len(m.supportedSignatureAlgorithms) > 0 { - b.AddUint16(extensionSignatureAlgorithms) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithms { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if len(m.supportedSignatureAlgorithmsCert) > 0 { - b.AddUint16(extensionSignatureAlgorithmsCert) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sigAlgo := range m.supportedSignatureAlgorithmsCert { - b.AddUint16(uint16(sigAlgo)) - } - }) - }) - } - if len(m.certificateAuthorities) > 0 { - b.AddUint16(extensionCertificateAuthorities) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, ca := range m.certificateAuthorities { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(ca) - }) - } - }) - }) - } - }) - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *certificateRequestMsgTLS13) unmarshal(data []byte) bool { - *m = certificateRequestMsgTLS13{raw: data} - s := cryptobyte.String(data) - - var context, extensions cryptobyte.String - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint8LengthPrefixed(&context) || !context.Empty() || - !s.ReadUint16LengthPrefixed(&extensions) || - !s.Empty() { - return false - } - - for !extensions.Empty() { - var extension uint16 - var extData cryptobyte.String - if !extensions.ReadUint16(&extension) || - !extensions.ReadUint16LengthPrefixed(&extData) { - return false - } - - switch extension { - case extensionStatusRequest: - m.ocspStapling = true - case extensionSCT: - m.scts = true - case extensionSignatureAlgorithms: - var sigAndAlgs cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() { - return false - } - for !sigAndAlgs.Empty() { - var sigAndAlg uint16 - if !sigAndAlgs.ReadUint16(&sigAndAlg) { - return false - } - m.supportedSignatureAlgorithms = append( - m.supportedSignatureAlgorithms, SignatureScheme(sigAndAlg)) - } - case extensionSignatureAlgorithmsCert: - var sigAndAlgs cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&sigAndAlgs) || sigAndAlgs.Empty() { - return false - } - for !sigAndAlgs.Empty() { - var sigAndAlg uint16 - if !sigAndAlgs.ReadUint16(&sigAndAlg) { - return false - } - m.supportedSignatureAlgorithmsCert = append( - m.supportedSignatureAlgorithmsCert, SignatureScheme(sigAndAlg)) - } - case extensionCertificateAuthorities: - var auths cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&auths) || auths.Empty() { - return false - } - for !auths.Empty() { - var ca []byte - if !readUint16LengthPrefixed(&auths, &ca) || len(ca) == 0 { - return false - } - m.certificateAuthorities = append(m.certificateAuthorities, ca) - } - default: - // Ignore unknown extensions. - continue - } - - if !extData.Empty() { - return false - } - } - - return true -} - -type certificateMsg struct { - raw []byte - certificates [][]byte -} - -func (m *certificateMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - var i int - for _, slice := range m.certificates { - i += len(slice) - } - - length := 3 + 3*len(m.certificates) + i - x = make([]byte, 4+length) - x[0] = typeCertificate - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - certificateOctets := length - 3 - x[4] = uint8(certificateOctets >> 16) - x[5] = uint8(certificateOctets >> 8) - x[6] = uint8(certificateOctets) - - y := x[7:] - for _, slice := range m.certificates { - y[0] = uint8(len(slice) >> 16) - y[1] = uint8(len(slice) >> 8) - y[2] = uint8(len(slice)) - copy(y[3:], slice) - y = y[3+len(slice):] - } - - m.raw = x - return -} - -func (m *certificateMsg) unmarshal(data []byte) bool { - if len(data) < 7 { - return false - } - - m.raw = data - certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6]) - if uint32(len(data)) != certsLen+7 { - return false - } - - numCerts := 0 - d := data[7:] - for certsLen > 0 { - if len(d) < 4 { - return false - } - certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) - if uint32(len(d)) < 3+certLen { - return false - } - d = d[3+certLen:] - certsLen -= 3 + certLen - numCerts++ - } - - m.certificates = make([][]byte, numCerts) - d = data[7:] - for i := 0; i < numCerts; i++ { - certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) - m.certificates[i] = d[3 : 3+certLen] - d = d[3+certLen:] - } - - return true -} - -type certificateMsgTLS13 struct { - raw []byte - certificate Certificate - ocspStapling bool - scts bool -} - -func (m *certificateMsgTLS13) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeCertificate) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(0) // certificate_request_context - - certificate := m.certificate - if !m.ocspStapling { - certificate.OCSPStaple = nil - } - if !m.scts { - certificate.SignedCertificateTimestamps = nil - } - marshalCertificate(b, certificate) - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func marshalCertificate(b *cryptobyte.Builder, certificate Certificate) { - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - for i, cert := range certificate.Certificate { - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(cert) - }) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - if i > 0 { - // This library only supports OCSP and SCT for leaf certificates. - return - } - if certificate.OCSPStaple != nil { - b.AddUint16(extensionStatusRequest) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(statusTypeOCSP) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(certificate.OCSPStaple) - }) - }) - } - if certificate.SignedCertificateTimestamps != nil { - b.AddUint16(extensionSCT) - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - for _, sct := range certificate.SignedCertificateTimestamps { - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(sct) - }) - } - }) - }) - } - }) - } - }) -} - -func (m *certificateMsgTLS13) unmarshal(data []byte) bool { - *m = certificateMsgTLS13{raw: data} - s := cryptobyte.String(data) - - var context cryptobyte.String - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint8LengthPrefixed(&context) || !context.Empty() || - !unmarshalCertificate(&s, &m.certificate) || - !s.Empty() { - return false - } - - m.scts = m.certificate.SignedCertificateTimestamps != nil - m.ocspStapling = m.certificate.OCSPStaple != nil - - return true -} - -func unmarshalCertificate(s *cryptobyte.String, certificate *Certificate) bool { - var certList cryptobyte.String - if !s.ReadUint24LengthPrefixed(&certList) { - return false - } - for !certList.Empty() { - var cert []byte - var extensions cryptobyte.String - if !readUint24LengthPrefixed(&certList, &cert) || - !certList.ReadUint16LengthPrefixed(&extensions) { - return false - } - certificate.Certificate = append(certificate.Certificate, cert) - for !extensions.Empty() { - var extension uint16 - var extData cryptobyte.String - if !extensions.ReadUint16(&extension) || - !extensions.ReadUint16LengthPrefixed(&extData) { - return false - } - if len(certificate.Certificate) > 1 { - // This library only supports OCSP and SCT for leaf certificates. - continue - } - - switch extension { - case extensionStatusRequest: - var statusType uint8 - if !extData.ReadUint8(&statusType) || statusType != statusTypeOCSP || - !readUint24LengthPrefixed(&extData, &certificate.OCSPStaple) || - len(certificate.OCSPStaple) == 0 { - return false - } - case extensionSCT: - var sctList cryptobyte.String - if !extData.ReadUint16LengthPrefixed(&sctList) || sctList.Empty() { - return false - } - for !sctList.Empty() { - var sct []byte - if !readUint16LengthPrefixed(&sctList, &sct) || - len(sct) == 0 { - return false - } - certificate.SignedCertificateTimestamps = append( - certificate.SignedCertificateTimestamps, sct) - } - default: - // Ignore unknown extensions. - continue - } - - if !extData.Empty() { - return false - } - } - } - return true -} - -type serverKeyExchangeMsg struct { - raw []byte - key []byte -} - -func (m *serverKeyExchangeMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - length := len(m.key) - x := make([]byte, length+4) - x[0] = typeServerKeyExchange - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - copy(x[4:], m.key) - - m.raw = x - return x -} - -func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - m.key = data[4:] - return true -} - -type certificateStatusMsg struct { - raw []byte - response []byte -} - -func (m *certificateStatusMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeCertificateStatus) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddUint8(statusTypeOCSP) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.response) - }) - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *certificateStatusMsg) unmarshal(data []byte) bool { - m.raw = data - s := cryptobyte.String(data) - - var statusType uint8 - if !s.Skip(4) || // message type and uint24 length field - !s.ReadUint8(&statusType) || statusType != statusTypeOCSP || - !readUint24LengthPrefixed(&s, &m.response) || - len(m.response) == 0 || !s.Empty() { - return false - } - return true -} - -type serverHelloDoneMsg struct{} - -func (m *serverHelloDoneMsg) marshal() []byte { - x := make([]byte, 4) - x[0] = typeServerHelloDone - return x -} - -func (m *serverHelloDoneMsg) unmarshal(data []byte) bool { - return len(data) == 4 -} - -type clientKeyExchangeMsg struct { - raw []byte - ciphertext []byte -} - -func (m *clientKeyExchangeMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - length := len(m.ciphertext) - x := make([]byte, length+4) - x[0] = typeClientKeyExchange - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - copy(x[4:], m.ciphertext) - - m.raw = x - return x -} - -func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - l := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - if l != len(data)-4 { - return false - } - m.ciphertext = data[4:] - return true -} - -type finishedMsg struct { - raw []byte - verifyData []byte -} - -func (m *finishedMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeFinished) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.verifyData) - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *finishedMsg) unmarshal(data []byte) bool { - m.raw = data - s := cryptobyte.String(data) - return s.Skip(1) && - readUint24LengthPrefixed(&s, &m.verifyData) && - s.Empty() -} - -type nextProtoMsg struct { - raw []byte - proto string -} - -func (m *nextProtoMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - l := len(m.proto) - if l > 255 { - l = 255 - } - - padding := 32 - (l+2)%32 - length := l + padding + 2 - x := make([]byte, length+4) - x[0] = typeNextProtocol - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - y := x[4:] - y[0] = byte(l) - copy(y[1:], []byte(m.proto[0:l])) - y = y[1+l:] - y[0] = byte(padding) - - m.raw = x - - return x -} - -func (m *nextProtoMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 5 { - return false - } - data = data[4:] - protoLen := int(data[0]) - data = data[1:] - if len(data) < protoLen { - return false - } - m.proto = string(data[0:protoLen]) - data = data[protoLen:] - - if len(data) < 1 { - return false - } - paddingLen := int(data[0]) - data = data[1:] - if len(data) != paddingLen { - return false - } - - return true -} - -type certificateRequestMsg struct { - raw []byte - // hasSignatureAlgorithm indicates whether this message includes a list of - // supported signature algorithms. This change was introduced with TLS 1.2. - hasSignatureAlgorithm bool - - certificateTypes []byte - supportedSignatureAlgorithms []SignatureScheme - certificateAuthorities [][]byte -} - -func (m *certificateRequestMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - // See RFC 4346, Section 7.4.4. - length := 1 + len(m.certificateTypes) + 2 - casLength := 0 - for _, ca := range m.certificateAuthorities { - casLength += 2 + len(ca) - } - length += casLength - - if m.hasSignatureAlgorithm { - length += 2 + 2*len(m.supportedSignatureAlgorithms) - } - - x = make([]byte, 4+length) - x[0] = typeCertificateRequest - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - x[4] = uint8(len(m.certificateTypes)) - - copy(x[5:], m.certificateTypes) - y := x[5+len(m.certificateTypes):] - - if m.hasSignatureAlgorithm { - n := len(m.supportedSignatureAlgorithms) * 2 - y[0] = uint8(n >> 8) - y[1] = uint8(n) - y = y[2:] - for _, sigAlgo := range m.supportedSignatureAlgorithms { - y[0] = uint8(sigAlgo >> 8) - y[1] = uint8(sigAlgo) - y = y[2:] - } - } - - y[0] = uint8(casLength >> 8) - y[1] = uint8(casLength) - y = y[2:] - for _, ca := range m.certificateAuthorities { - y[0] = uint8(len(ca) >> 8) - y[1] = uint8(len(ca)) - y = y[2:] - copy(y, ca) - y = y[len(ca):] - } - - m.raw = x - return -} - -func (m *certificateRequestMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 5 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { - return false - } - - numCertTypes := int(data[4]) - data = data[5:] - if numCertTypes == 0 || len(data) <= numCertTypes { - return false - } - - m.certificateTypes = make([]byte, numCertTypes) - if copy(m.certificateTypes, data) != numCertTypes { - return false - } - - data = data[numCertTypes:] - - if m.hasSignatureAlgorithm { - if len(data) < 2 { - return false - } - sigAndHashLen := uint16(data[0])<<8 | uint16(data[1]) - data = data[2:] - if sigAndHashLen&1 != 0 { - return false - } - if len(data) < int(sigAndHashLen) { - return false - } - numSigAlgos := sigAndHashLen / 2 - m.supportedSignatureAlgorithms = make([]SignatureScheme, numSigAlgos) - for i := range m.supportedSignatureAlgorithms { - m.supportedSignatureAlgorithms[i] = SignatureScheme(data[0])<<8 | SignatureScheme(data[1]) - data = data[2:] - } - } - - if len(data) < 2 { - return false - } - casLength := uint16(data[0])<<8 | uint16(data[1]) - data = data[2:] - if len(data) < int(casLength) { - return false - } - cas := make([]byte, casLength) - copy(cas, data) - data = data[casLength:] - - m.certificateAuthorities = nil - for len(cas) > 0 { - if len(cas) < 2 { - return false - } - caLen := uint16(cas[0])<<8 | uint16(cas[1]) - cas = cas[2:] - - if len(cas) < int(caLen) { - return false - } - - m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen]) - cas = cas[caLen:] - } - - return len(data) == 0 -} - -type certificateVerifyMsg struct { - raw []byte - hasSignatureAlgorithm bool // format change introduced in TLS 1.2 - signatureAlgorithm SignatureScheme - signature []byte -} - -func (m *certificateVerifyMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - var b cryptobyte.Builder - b.AddUint8(typeCertificateVerify) - b.AddUint24LengthPrefixed(func(b *cryptobyte.Builder) { - if m.hasSignatureAlgorithm { - b.AddUint16(uint16(m.signatureAlgorithm)) - } - b.AddUint16LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.signature) - }) - }) - - m.raw = b.BytesOrPanic() - return m.raw -} - -func (m *certificateVerifyMsg) unmarshal(data []byte) bool { - m.raw = data - s := cryptobyte.String(data) - - if !s.Skip(4) { // message type and uint24 length field - return false - } - if m.hasSignatureAlgorithm { - if !s.ReadUint16((*uint16)(&m.signatureAlgorithm)) { - return false - } - } - return readUint16LengthPrefixed(&s, &m.signature) && s.Empty() -} - -type newSessionTicketMsg struct { - raw []byte - ticket []byte -} - -func (m *newSessionTicketMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - // See RFC 5077, Section 3.3. - ticketLen := len(m.ticket) - length := 2 + 4 + ticketLen - x = make([]byte, 4+length) - x[0] = typeNewSessionTicket - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[8] = uint8(ticketLen >> 8) - x[9] = uint8(ticketLen) - copy(x[10:], m.ticket) - - m.raw = x - - return -} - -func (m *newSessionTicketMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 10 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { - return false - } - - ticketLen := int(data[8])<<8 + int(data[9]) - if len(data)-10 != ticketLen { - return false - } - - m.ticket = data[10:] - - return true -} - -type helloRequestMsg struct { -} - -func (*helloRequestMsg) marshal() []byte { - return []byte{typeHelloRequest, 0, 0, 0} -} - -func (*helloRequestMsg) unmarshal(data []byte) bool { - return len(data) == 4 -} diff --git a/vendor/github.com/marten-seemann/qtls/handshake_server.go b/vendor/github.com/marten-seemann/qtls/handshake_server.go deleted file mode 100644 index 2f3b41447..000000000 --- a/vendor/github.com/marten-seemann/qtls/handshake_server.go +++ /dev/null @@ -1,822 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/subtle" - "crypto/x509" - "errors" - "fmt" - "io" - "sync/atomic" -) - -// serverHandshakeState contains details of a server handshake in progress. -// It's discarded once the handshake has completed. -type serverHandshakeState struct { - c *Conn - clientHello *clientHelloMsg - hello *serverHelloMsg - suite *cipherSuite - ellipticOk bool - ecdsaOk bool - rsaDecryptOk bool - rsaSignOk bool - sessionState *sessionState - finishedHash finishedHash - masterSecret []byte - cert *Certificate -} - -// serverHandshake performs a TLS handshake as a server. -func (c *Conn) serverHandshake() error { - // If this is the first server handshake, we generate a random key to - // encrypt the tickets with. - c.config.serverInitOnce.Do(func() { c.config.serverInit(nil) }) - c.setAlternativeRecordLayer() - - clientHello, err := c.readClientHello() - if err != nil { - return err - } - - if c.vers == VersionTLS13 { - hs := serverHandshakeStateTLS13{ - c: c, - clientHello: clientHello, - } - return hs.handshake() - } - - hs := serverHandshakeState{ - c: c, - clientHello: clientHello, - } - return hs.handshake() -} - -func (hs *serverHandshakeState) handshake() error { - c := hs.c - - if err := hs.processClientHello(); err != nil { - return err - } - - // For an overview of TLS handshaking, see RFC 5246, Section 7.3. - c.buffering = true - if hs.checkForResumption() { - // The client has included a session ticket and so we do an abbreviated handshake. - if err := hs.doResumeHandshake(); err != nil { - return err - } - if err := hs.establishKeys(); err != nil { - return err - } - // ticketSupported is set in a resumption handshake if the - // ticket from the client was encrypted with an old session - // ticket key and thus a refreshed ticket should be sent. - if hs.hello.ticketSupported { - if err := hs.sendSessionTicket(); err != nil { - return err - } - } - if err := hs.sendFinished(c.serverFinished[:]); err != nil { - return err - } - if _, err := c.flush(); err != nil { - return err - } - c.clientFinishedIsFirst = false - if err := hs.readFinished(nil); err != nil { - return err - } - c.didResume = true - } else { - // The client didn't include a session ticket, or it wasn't - // valid so we do a full handshake. - if err := hs.pickCipherSuite(); err != nil { - return err - } - if err := hs.doFullHandshake(); err != nil { - return err - } - if err := hs.establishKeys(); err != nil { - return err - } - if err := hs.readFinished(c.clientFinished[:]); err != nil { - return err - } - c.clientFinishedIsFirst = true - c.buffering = true - if err := hs.sendSessionTicket(); err != nil { - return err - } - if err := hs.sendFinished(nil); err != nil { - return err - } - if _, err := c.flush(); err != nil { - return err - } - } - - c.ekm = ekmFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random) - atomic.StoreUint32(&c.handshakeStatus, 1) - - return nil -} - -// readClientHello reads a ClientHello message and selects the protocol version. -func (c *Conn) readClientHello() (*clientHelloMsg, error) { - msg, err := c.readHandshake() - if err != nil { - return nil, err - } - clientHello, ok := msg.(*clientHelloMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return nil, unexpectedMessageError(clientHello, msg) - } - - if c.config.GetConfigForClient != nil { - chi := clientHelloInfo(c, clientHello) - if newConfig, err := c.config.GetConfigForClient(chi); err != nil { - c.sendAlert(alertInternalError) - return nil, err - } else if newConfig != nil { - newConfig.serverInitOnce.Do(func() { newConfig.serverInit(c.config) }) - c.config = newConfig - } - } - - clientVersions := clientHello.supportedVersions - if len(clientHello.supportedVersions) == 0 { - clientVersions = supportedVersionsFromMax(clientHello.vers) - } - c.vers, ok = c.config.mutualVersion(false, clientVersions) - if !ok { - c.sendAlert(alertProtocolVersion) - return nil, fmt.Errorf("tls: client offered only unsupported versions: %x", clientVersions) - } - c.haveVers = true - c.in.version = c.vers - c.out.version = c.vers - - return clientHello, nil -} - -func (hs *serverHandshakeState) processClientHello() error { - c := hs.c - - hs.hello = new(serverHelloMsg) - hs.hello.vers = c.vers - - supportedCurve := false - preferredCurves := c.config.curvePreferences() -Curves: - for _, curve := range hs.clientHello.supportedCurves { - for _, supported := range preferredCurves { - if supported == curve { - supportedCurve = true - break Curves - } - } - } - - supportedPointFormat := false - for _, pointFormat := range hs.clientHello.supportedPoints { - if pointFormat == pointFormatUncompressed { - supportedPointFormat = true - break - } - } - hs.ellipticOk = supportedCurve && supportedPointFormat - - foundCompression := false - // We only support null compression, so check that the client offered it. - for _, compression := range hs.clientHello.compressionMethods { - if compression == compressionNone { - foundCompression = true - break - } - } - - if !foundCompression { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: client does not support uncompressed connections") - } - - hs.hello.random = make([]byte, 32) - serverRandom := hs.hello.random - // Downgrade protection canaries. See RFC 8446, Section 4.1.3. - maxVers := c.config.maxSupportedVersion(false) - if maxVers >= VersionTLS12 && c.vers < maxVers { - if c.vers == VersionTLS12 { - copy(serverRandom[24:], downgradeCanaryTLS12) - } else { - copy(serverRandom[24:], downgradeCanaryTLS11) - } - serverRandom = serverRandom[:24] - } - _, err := io.ReadFull(c.config.rand(), serverRandom) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - - if len(hs.clientHello.secureRenegotiation) != 0 { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: initial handshake had non-empty renegotiation extension") - } - - hs.hello.secureRenegotiationSupported = hs.clientHello.secureRenegotiationSupported - hs.hello.compressionMethod = compressionNone - if len(hs.clientHello.serverName) > 0 { - c.serverName = hs.clientHello.serverName - } - - if len(hs.clientHello.alpnProtocols) > 0 { - if selectedProto, fallback := mutualProtocol(hs.clientHello.alpnProtocols, c.config.NextProtos); !fallback { - hs.hello.alpnProtocol = selectedProto - c.clientProtocol = selectedProto - } - } else { - // Although sending an empty NPN extension is reasonable, Firefox has - // had a bug around this. Best to send nothing at all if - // c.config.NextProtos is empty. See - // https://golang.org/issue/5445. - if hs.clientHello.nextProtoNeg && len(c.config.NextProtos) > 0 { - hs.hello.nextProtoNeg = true - hs.hello.nextProtos = c.config.NextProtos - } - } - - hs.cert, err = c.config.getCertificate(clientHelloInfo(c, hs.clientHello)) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - if hs.clientHello.scts { - hs.hello.scts = hs.cert.SignedCertificateTimestamps - } - - if priv, ok := hs.cert.PrivateKey.(crypto.Signer); ok { - switch priv.Public().(type) { - case *ecdsa.PublicKey: - hs.ecdsaOk = true - case *rsa.PublicKey: - hs.rsaSignOk = true - default: - c.sendAlert(alertInternalError) - return fmt.Errorf("tls: unsupported signing key type (%T)", priv.Public()) - } - } - if priv, ok := hs.cert.PrivateKey.(crypto.Decrypter); ok { - switch priv.Public().(type) { - case *rsa.PublicKey: - hs.rsaDecryptOk = true - default: - c.sendAlert(alertInternalError) - return fmt.Errorf("tls: unsupported decryption key type (%T)", priv.Public()) - } - } - - return nil -} - -func (hs *serverHandshakeState) pickCipherSuite() error { - c := hs.c - - var preferenceList, supportedList []uint16 - if c.config.PreferServerCipherSuites { - preferenceList = c.config.cipherSuites() - supportedList = hs.clientHello.cipherSuites - } else { - preferenceList = hs.clientHello.cipherSuites - supportedList = c.config.cipherSuites() - } - - for _, id := range preferenceList { - if hs.setCipherSuite(id, supportedList, c.vers) { - break - } - } - - if hs.suite == nil { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: no cipher suite supported by both client and server") - } - - for _, id := range hs.clientHello.cipherSuites { - if id == TLS_FALLBACK_SCSV { - // The client is doing a fallback connection. See RFC 7507. - if hs.clientHello.vers < c.config.maxSupportedVersion(false) { - c.sendAlert(alertInappropriateFallback) - return errors.New("tls: client using inappropriate protocol fallback") - } - break - } - } - - return nil -} - -// checkForResumption reports whether we should perform resumption on this connection. -func (hs *serverHandshakeState) checkForResumption() bool { - c := hs.c - - if c.config.SessionTicketsDisabled { - return false - } - - plaintext, usedOldKey := c.decryptTicket(hs.clientHello.sessionTicket) - if plaintext == nil { - return false - } - hs.sessionState = &sessionState{usedOldKey: usedOldKey} - ok := hs.sessionState.unmarshal(plaintext) - if !ok { - return false - } - - // Never resume a session for a different TLS version. - if c.vers != hs.sessionState.vers { - return false - } - - cipherSuiteOk := false - // Check that the client is still offering the ciphersuite in the session. - for _, id := range hs.clientHello.cipherSuites { - if id == hs.sessionState.cipherSuite { - cipherSuiteOk = true - break - } - } - if !cipherSuiteOk { - return false - } - - // Check that we also support the ciphersuite from the session. - if !hs.setCipherSuite(hs.sessionState.cipherSuite, c.config.cipherSuites(), hs.sessionState.vers) { - return false - } - - sessionHasClientCerts := len(hs.sessionState.certificates) != 0 - needClientCerts := requiresClientCert(c.config.ClientAuth) - if needClientCerts && !sessionHasClientCerts { - return false - } - if sessionHasClientCerts && c.config.ClientAuth == NoClientCert { - return false - } - - return true -} - -func (hs *serverHandshakeState) doResumeHandshake() error { - c := hs.c - - hs.hello.cipherSuite = hs.suite.id - // We echo the client's session ID in the ServerHello to let it know - // that we're doing a resumption. - hs.hello.sessionId = hs.clientHello.sessionId - hs.hello.ticketSupported = hs.sessionState.usedOldKey - hs.finishedHash = newFinishedHash(c.vers, hs.suite) - hs.finishedHash.discardHandshakeBuffer() - hs.finishedHash.Write(hs.clientHello.marshal()) - hs.finishedHash.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { - return err - } - - if err := c.processCertsFromClient(Certificate{ - Certificate: hs.sessionState.certificates, - }); err != nil { - return err - } - - hs.masterSecret = hs.sessionState.masterSecret - - return nil -} - -func (hs *serverHandshakeState) doFullHandshake() error { - c := hs.c - - if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 { - hs.hello.ocspStapling = true - } - - hs.hello.ticketSupported = hs.clientHello.ticketSupported && !c.config.SessionTicketsDisabled - hs.hello.cipherSuite = hs.suite.id - - hs.finishedHash = newFinishedHash(hs.c.vers, hs.suite) - if c.config.ClientAuth == NoClientCert { - // No need to keep a full record of the handshake if client - // certificates won't be used. - hs.finishedHash.discardHandshakeBuffer() - } - hs.finishedHash.Write(hs.clientHello.marshal()) - hs.finishedHash.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { - return err - } - - certMsg := new(certificateMsg) - certMsg.certificates = hs.cert.Certificate - hs.finishedHash.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { - return err - } - - if hs.hello.ocspStapling { - certStatus := new(certificateStatusMsg) - certStatus.response = hs.cert.OCSPStaple - hs.finishedHash.Write(certStatus.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil { - return err - } - } - - keyAgreement := hs.suite.ka(c.vers) - skx, err := keyAgreement.generateServerKeyExchange(c.config, hs.cert, hs.clientHello, hs.hello) - if err != nil { - c.sendAlert(alertHandshakeFailure) - return err - } - if skx != nil { - hs.finishedHash.Write(skx.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil { - return err - } - } - - if c.config.ClientAuth >= RequestClientCert { - // Request a client certificate - certReq := new(certificateRequestMsg) - certReq.certificateTypes = []byte{ - byte(certTypeRSASign), - byte(certTypeECDSASign), - } - if c.vers >= VersionTLS12 { - certReq.hasSignatureAlgorithm = true - certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithmsTLS12 - } - - // An empty list of certificateAuthorities signals to - // the client that it may send any certificate in response - // to our request. When we know the CAs we trust, then - // we can send them down, so that the client can choose - // an appropriate certificate to give to us. - if c.config.ClientCAs != nil { - certReq.certificateAuthorities = c.config.ClientCAs.Subjects() - } - hs.finishedHash.Write(certReq.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil { - return err - } - } - - helloDone := new(serverHelloDoneMsg) - hs.finishedHash.Write(helloDone.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil { - return err - } - - if _, err := c.flush(); err != nil { - return err - } - - var pub crypto.PublicKey // public key for client auth, if any - - msg, err := c.readHandshake() - if err != nil { - return err - } - - // If we requested a client certificate, then the client must send a - // certificate message, even if it's empty. - if c.config.ClientAuth >= RequestClientCert { - certMsg, ok := msg.(*certificateMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(certMsg, msg) - } - hs.finishedHash.Write(certMsg.marshal()) - - if err := c.processCertsFromClient(Certificate{ - Certificate: certMsg.certificates, - }); err != nil { - return err - } - if len(certMsg.certificates) != 0 { - pub = c.peerCertificates[0].PublicKey - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - // Get client key exchange - ckx, ok := msg.(*clientKeyExchangeMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(ckx, msg) - } - hs.finishedHash.Write(ckx.marshal()) - - preMasterSecret, err := keyAgreement.processClientKeyExchange(c.config, hs.cert, ckx, c.vers) - if err != nil { - c.sendAlert(alertHandshakeFailure) - return err - } - hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.clientHello.random, hs.hello.random) - if err := c.config.writeKeyLog(keyLogLabelTLS12, hs.clientHello.random, hs.masterSecret); err != nil { - c.sendAlert(alertInternalError) - return err - } - - // If we received a client cert in response to our certificate request message, - // the client will send us a certificateVerifyMsg immediately after the - // clientKeyExchangeMsg. This message is a digest of all preceding - // handshake-layer messages that is signed using the private key corresponding - // to the client's certificate. This allows us to verify that the client is in - // possession of the private key of the certificate. - if len(c.peerCertificates) > 0 { - msg, err = c.readHandshake() - if err != nil { - return err - } - certVerify, ok := msg.(*certificateVerifyMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(certVerify, msg) - } - - // Determine the signature type. - _, sigType, hashFunc, err := pickSignatureAlgorithm(pub, []SignatureScheme{certVerify.signatureAlgorithm}, supportedSignatureAlgorithmsTLS12, c.vers) - if err != nil { - c.sendAlert(alertIllegalParameter) - return err - } - - var digest []byte - if digest, err = hs.finishedHash.hashForClientCertificate(sigType, hashFunc, hs.masterSecret); err == nil { - err = verifyHandshakeSignature(sigType, pub, hashFunc, digest, certVerify.signature) - } - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("tls: could not validate signature of connection nonces: " + err.Error()) - } - - hs.finishedHash.Write(certVerify.marshal()) - } - - hs.finishedHash.discardHandshakeBuffer() - - return nil -} - -func (hs *serverHandshakeState) establishKeys() error { - c := hs.c - - clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := - keysFromMasterSecret(c.vers, hs.suite, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen) - - var clientCipher, serverCipher interface{} - var clientHash, serverHash macFunction - - if hs.suite.aead == nil { - clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */) - clientHash = hs.suite.mac(c.vers, clientMAC) - serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */) - serverHash = hs.suite.mac(c.vers, serverMAC) - } else { - clientCipher = hs.suite.aead(clientKey, clientIV) - serverCipher = hs.suite.aead(serverKey, serverIV) - } - - c.in.prepareCipherSpec(c.vers, clientCipher, clientHash) - c.out.prepareCipherSpec(c.vers, serverCipher, serverHash) - - return nil -} - -func (hs *serverHandshakeState) readFinished(out []byte) error { - c := hs.c - - if err := c.readChangeCipherSpec(); err != nil { - return err - } - - if hs.hello.nextProtoNeg { - msg, err := c.readHandshake() - if err != nil { - return err - } - nextProto, ok := msg.(*nextProtoMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(nextProto, msg) - } - hs.finishedHash.Write(nextProto.marshal()) - c.clientProtocol = nextProto.proto - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - clientFinished, ok := msg.(*finishedMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(clientFinished, msg) - } - - verify := hs.finishedHash.clientSum(hs.masterSecret) - if len(verify) != len(clientFinished.verifyData) || - subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: client's Finished message is incorrect") - } - - hs.finishedHash.Write(clientFinished.marshal()) - copy(out, verify) - return nil -} - -func (hs *serverHandshakeState) sendSessionTicket() error { - if !hs.hello.ticketSupported { - return nil - } - - c := hs.c - m := new(newSessionTicketMsg) - - var certsFromClient [][]byte - for _, cert := range c.peerCertificates { - certsFromClient = append(certsFromClient, cert.Raw) - } - state := sessionState{ - vers: c.vers, - cipherSuite: hs.suite.id, - masterSecret: hs.masterSecret, - certificates: certsFromClient, - } - var err error - m.ticket, err = c.encryptTicket(state.marshal()) - if err != nil { - return err - } - - hs.finishedHash.Write(m.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil { - return err - } - - return nil -} - -func (hs *serverHandshakeState) sendFinished(out []byte) error { - c := hs.c - - if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil { - return err - } - - finished := new(finishedMsg) - finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret) - hs.finishedHash.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { - return err - } - - c.cipherSuite = hs.suite.id - copy(out, finished.verifyData) - - return nil -} - -// processCertsFromClient takes a chain of client certificates either from a -// Certificates message or from a sessionState and verifies them. It returns -// the public key of the leaf certificate. -func (c *Conn) processCertsFromClient(certificate Certificate) error { - certificates := certificate.Certificate - certs := make([]*x509.Certificate, len(certificates)) - var err error - for i, asn1Data := range certificates { - if certs[i], err = x509.ParseCertificate(asn1Data); err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("tls: failed to parse client certificate: " + err.Error()) - } - } - - if len(certs) == 0 && requiresClientCert(c.config.ClientAuth) { - c.sendAlert(alertBadCertificate) - return errors.New("tls: client didn't provide a certificate") - } - - if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 { - opts := x509.VerifyOptions{ - Roots: c.config.ClientCAs, - CurrentTime: c.config.time(), - Intermediates: x509.NewCertPool(), - KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - } - - for _, cert := range certs[1:] { - opts.Intermediates.AddCert(cert) - } - - chains, err := certs[0].Verify(opts) - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("tls: failed to verify client's certificate: " + err.Error()) - } - - c.verifiedChains = chains - } - - if c.config.VerifyPeerCertificate != nil { - if err := c.config.VerifyPeerCertificate(certificates, c.verifiedChains); err != nil { - c.sendAlert(alertBadCertificate) - return err - } - } - - if len(certs) == 0 { - return nil - } - - switch certs[0].PublicKey.(type) { - case *ecdsa.PublicKey, *rsa.PublicKey: - default: - c.sendAlert(alertUnsupportedCertificate) - return fmt.Errorf("tls: client's certificate contains an unsupported public key of type %T", certs[0].PublicKey) - } - - c.peerCertificates = certs - c.ocspResponse = certificate.OCSPStaple - c.scts = certificate.SignedCertificateTimestamps - return nil -} - -// setCipherSuite sets a cipherSuite with the given id as the serverHandshakeState -// suite if that cipher suite is acceptable to use. -// It returns a bool indicating if the suite was set. -func (hs *serverHandshakeState) setCipherSuite(id uint16, supportedCipherSuites []uint16, version uint16) bool { - for _, supported := range supportedCipherSuites { - if id == supported { - candidate := cipherSuiteByID(id) - if candidate == nil { - continue - } - // Don't select a ciphersuite which we can't - // support for this client. - if candidate.flags&suiteECDHE != 0 { - if !hs.ellipticOk { - continue - } - if candidate.flags&suiteECDSA != 0 { - if !hs.ecdsaOk { - continue - } - } else if !hs.rsaSignOk { - continue - } - } else if !hs.rsaDecryptOk { - continue - } - if version < VersionTLS12 && candidate.flags&suiteTLS12 != 0 { - continue - } - hs.suite = candidate - return true - } - } - return false -} - -func clientHelloInfo(c *Conn, clientHello *clientHelloMsg) *ClientHelloInfo { - supportedVersions := clientHello.supportedVersions - if len(clientHello.supportedVersions) == 0 { - supportedVersions = supportedVersionsFromMax(clientHello.vers) - } - - return &ClientHelloInfo{ - CipherSuites: clientHello.cipherSuites, - ServerName: clientHello.serverName, - SupportedCurves: clientHello.supportedCurves, - SupportedPoints: clientHello.supportedPoints, - SignatureSchemes: clientHello.supportedSignatureAlgorithms, - SupportedProtos: clientHello.alpnProtocols, - SupportedVersions: supportedVersions, - Conn: c.conn, - } -} diff --git a/vendor/github.com/marten-seemann/qtls/handshake_server_tls13.go b/vendor/github.com/marten-seemann/qtls/handshake_server_tls13.go deleted file mode 100644 index 97f15091f..000000000 --- a/vendor/github.com/marten-seemann/qtls/handshake_server_tls13.go +++ /dev/null @@ -1,872 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "bytes" - "crypto" - "crypto/hmac" - "crypto/rsa" - "errors" - "hash" - "io" - "sync/atomic" - "time" -) - -// maxClientPSKIdentities is the number of client PSK identities the server will -// attempt to validate. It will ignore the rest not to let cheap ClientHello -// messages cause too much work in session ticket decryption attempts. -const maxClientPSKIdentities = 5 - -type serverHandshakeStateTLS13 struct { - c *Conn - clientHello *clientHelloMsg - hello *serverHelloMsg - sentDummyCCS bool - usingPSK bool - suite *cipherSuiteTLS13 - cert *Certificate - sigAlg SignatureScheme - earlySecret []byte - sharedKey []byte - handshakeSecret []byte - masterSecret []byte - trafficSecret []byte // client_application_traffic_secret_0 - transcript hash.Hash - clientFinished []byte -} - -func (hs *serverHandshakeStateTLS13) handshake() error { - c := hs.c - - // For an overview of the TLS 1.3 handshake, see RFC 8446, Section 2. - if err := hs.processClientHello(); err != nil { - return err - } - if err := hs.checkForResumption(); err != nil { - return err - } - if err := hs.pickCertificate(); err != nil { - return err - } - c.buffering = true - if err := hs.sendServerParameters(); err != nil { - return err - } - if err := hs.sendServerCertificate(); err != nil { - return err - } - if err := hs.sendServerFinished(); err != nil { - return err - } - // Note that at this point we could start sending application data without - // waiting for the client's second flight, but the application might not - // expect the lack of replay protection of the ClientHello parameters. - if _, err := c.flush(); err != nil { - return err - } - if err := hs.readClientCertificate(); err != nil { - return err - } - if err := hs.readClientFinished(); err != nil { - return err - } - - atomic.StoreUint32(&c.handshakeStatus, 1) - - return nil -} - -func (hs *serverHandshakeStateTLS13) processClientHello() error { - c := hs.c - - hs.hello = new(serverHelloMsg) - - // TLS 1.3 froze the ServerHello.legacy_version field, and uses - // supported_versions instead. See RFC 8446, sections 4.1.3 and 4.2.1. - hs.hello.vers = VersionTLS12 - hs.hello.supportedVersion = c.vers - - if len(hs.clientHello.supportedVersions) == 0 { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: client used the legacy version field to negotiate TLS 1.3") - } - - // Abort if the client is doing a fallback and landing lower than what we - // support. See RFC 7507, which however does not specify the interaction - // with supported_versions. The only difference is that with - // supported_versions a client has a chance to attempt a [TLS 1.2, TLS 1.4] - // handshake in case TLS 1.3 is broken but 1.2 is not. Alas, in that case, - // it will have to drop the TLS_FALLBACK_SCSV protection if it falls back to - // TLS 1.2, because a TLS 1.3 server would abort here. The situation before - // supported_versions was not better because there was just no way to do a - // TLS 1.4 handshake without risking the server selecting TLS 1.3. - for _, id := range hs.clientHello.cipherSuites { - if id == TLS_FALLBACK_SCSV { - // Use c.vers instead of max(supported_versions) because an attacker - // could defeat this by adding an arbitrary high version otherwise. - if c.vers < c.config.maxSupportedVersion(false) { - c.sendAlert(alertInappropriateFallback) - return errors.New("tls: client using inappropriate protocol fallback") - } - break - } - } - - if len(hs.clientHello.compressionMethods) != 1 || - hs.clientHello.compressionMethods[0] != compressionNone { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: TLS 1.3 client supports illegal compression methods") - } - - hs.hello.random = make([]byte, 32) - if _, err := io.ReadFull(c.config.rand(), hs.hello.random); err != nil { - c.sendAlert(alertInternalError) - return err - } - - if len(hs.clientHello.secureRenegotiation) != 0 { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: initial handshake had non-empty renegotiation extension") - } - - if hs.clientHello.earlyData { - // See RFC 8446, Section 4.2.10 for the complicated behavior required - // here. The scenario is that a different server at our address offered - // to accept early data in the past, which we can't handle. For now, all - // 0-RTT enabled session tickets need to expire before a Go server can - // replace a server or join a pool. That's the same requirement that - // applies to mixing or replacing with any TLS 1.2 server. - c.sendAlert(alertUnsupportedExtension) - return errors.New("tls: client sent unexpected early data") - } - - hs.hello.sessionId = hs.clientHello.sessionId - hs.hello.compressionMethod = compressionNone - - var preferenceList, supportedList []uint16 - if c.config.PreferServerCipherSuites { - preferenceList = defaultCipherSuitesTLS13() - supportedList = hs.clientHello.cipherSuites - } else { - preferenceList = hs.clientHello.cipherSuites - supportedList = defaultCipherSuitesTLS13() - } - for _, suiteID := range preferenceList { - hs.suite = mutualCipherSuiteTLS13(supportedList, suiteID) - if hs.suite != nil { - break - } - } - if hs.suite == nil { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: no cipher suite supported by both client and server") - } - c.cipherSuite = hs.suite.id - hs.hello.cipherSuite = hs.suite.id - hs.transcript = hs.suite.hash.New() - - // Pick the ECDHE group in server preference order, but give priority to - // groups with a key share, to avoid a HelloRetryRequest round-trip. - var selectedGroup CurveID - var clientKeyShare *keyShare -GroupSelection: - for _, preferredGroup := range c.config.curvePreferences() { - for _, ks := range hs.clientHello.keyShares { - if ks.group == preferredGroup { - selectedGroup = ks.group - clientKeyShare = &ks - break GroupSelection - } - } - if selectedGroup != 0 { - continue - } - for _, group := range hs.clientHello.supportedCurves { - if group == preferredGroup { - selectedGroup = group - break - } - } - } - if selectedGroup == 0 { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: no ECDHE curve supported by both client and server") - } - if clientKeyShare == nil { - if err := hs.doHelloRetryRequest(selectedGroup); err != nil { - return err - } - clientKeyShare = &hs.clientHello.keyShares[0] - } - - if _, ok := curveForCurveID(selectedGroup); selectedGroup != X25519 && !ok { - c.sendAlert(alertInternalError) - return errors.New("tls: CurvePreferences includes unsupported curve") - } - params, err := generateECDHEParameters(c.config.rand(), selectedGroup) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - hs.hello.serverShare = keyShare{group: selectedGroup, data: params.PublicKey()} - hs.sharedKey = params.SharedKey(clientKeyShare.data) - if hs.sharedKey == nil { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: invalid client key share") - } - - c.serverName = hs.clientHello.serverName - - if c.config.ReceivedExtensions != nil { - c.config.ReceivedExtensions(typeClientHello, hs.clientHello.additionalExtensions) - } - - return nil -} - -func (hs *serverHandshakeStateTLS13) checkForResumption() error { - c := hs.c - - if c.config.SessionTicketsDisabled { - return nil - } - - modeOK := false - for _, mode := range hs.clientHello.pskModes { - if mode == pskModeDHE { - modeOK = true - break - } - } - if !modeOK { - return nil - } - - if len(hs.clientHello.pskIdentities) != len(hs.clientHello.pskBinders) { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: invalid or missing PSK binders") - } - if len(hs.clientHello.pskIdentities) == 0 { - return nil - } - - for i, identity := range hs.clientHello.pskIdentities { - if i >= maxClientPSKIdentities { - break - } - - plaintext, _ := c.decryptTicket(identity.label) - if plaintext == nil { - continue - } - sessionState := new(sessionStateTLS13) - if ok := sessionState.unmarshal(plaintext); !ok { - continue - } - - createdAt := time.Unix(int64(sessionState.createdAt), 0) - if c.config.time().Sub(createdAt) > maxSessionTicketLifetime { - continue - } - - // We don't check the obfuscated ticket age because it's affected by - // clock skew and it's only a freshness signal useful for shrinking the - // window for replay attacks, which don't affect us as we don't do 0-RTT. - - pskSuite := cipherSuiteTLS13ByID(sessionState.cipherSuite) - if pskSuite == nil || pskSuite.hash != hs.suite.hash { - continue - } - - // PSK connections don't re-establish client certificates, but carry - // them over in the session ticket. Ensure the presence of client certs - // in the ticket is consistent with the configured requirements. - sessionHasClientCerts := len(sessionState.certificate.Certificate) != 0 - needClientCerts := requiresClientCert(c.config.ClientAuth) - if needClientCerts && !sessionHasClientCerts { - continue - } - if sessionHasClientCerts && c.config.ClientAuth == NoClientCert { - continue - } - - psk := hs.suite.expandLabel(sessionState.resumptionSecret, "resumption", - nil, hs.suite.hash.Size()) - hs.earlySecret = hs.suite.extract(psk, nil) - binderKey := hs.suite.deriveSecret(hs.earlySecret, resumptionBinderLabel, nil) - // Clone the transcript in case a HelloRetryRequest was recorded. - transcript := cloneHash(hs.transcript, hs.suite.hash) - if transcript == nil { - c.sendAlert(alertInternalError) - return errors.New("tls: internal error: failed to clone hash") - } - transcript.Write(hs.clientHello.marshalWithoutBinders()) - pskBinder := hs.suite.finishedHash(binderKey, transcript) - if !hmac.Equal(hs.clientHello.pskBinders[i], pskBinder) { - c.sendAlert(alertDecryptError) - return errors.New("tls: invalid PSK binder") - } - - if err := c.processCertsFromClient(sessionState.certificate); err != nil { - return err - } - - hs.hello.selectedIdentityPresent = true - hs.hello.selectedIdentity = uint16(i) - hs.usingPSK = true - c.didResume = true - return nil - } - - return nil -} - -// cloneHash uses the encoding.BinaryMarshaler and encoding.BinaryUnmarshaler -// interfaces implemented by standard library hashes to clone the state of in -// to a new instance of h. It returns nil if the operation fails. -func cloneHash(in hash.Hash, h crypto.Hash) hash.Hash { - // Recreate the interface to avoid importing encoding. - type binaryMarshaler interface { - MarshalBinary() (data []byte, err error) - UnmarshalBinary(data []byte) error - } - marshaler, ok := in.(binaryMarshaler) - if !ok { - return nil - } - state, err := marshaler.MarshalBinary() - if err != nil { - return nil - } - out := h.New() - unmarshaler, ok := out.(binaryMarshaler) - if !ok { - return nil - } - if err := unmarshaler.UnmarshalBinary(state); err != nil { - return nil - } - return out -} - -func (hs *serverHandshakeStateTLS13) pickCertificate() error { - c := hs.c - - // Only one of PSK and certificates are used at a time. - if hs.usingPSK { - return nil - } - - // This implements a very simplistic certificate selection strategy for now: - // getCertificate delegates to the application Config.GetCertificate, or - // selects based on the server_name only. If the selected certificate's - // public key does not match the client signature_algorithms, the handshake - // is aborted. No attention is given to signature_algorithms_cert, and it is - // not passed to the application Config.GetCertificate. This will need to - // improve according to RFC 8446, sections 4.4.2.2 and 4.2.3. - certificate, err := c.config.getCertificate(clientHelloInfo(c, hs.clientHello)) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - supportedAlgs := signatureSchemesForCertificate(c.vers, certificate) - if supportedAlgs == nil { - c.sendAlert(alertInternalError) - return unsupportedCertificateError(certificate) - } - // Pick signature scheme in client preference order, as the server - // preference order is not configurable. - for _, preferredAlg := range hs.clientHello.supportedSignatureAlgorithms { - if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) { - hs.sigAlg = preferredAlg - break - } - } - if hs.sigAlg == 0 { - // getCertificate returned a certificate incompatible with the - // ClientHello supported signature algorithms. - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: client doesn't support selected certificate") - } - hs.cert = certificate - - return nil -} - -// sendDummyChangeCipherSpec sends a ChangeCipherSpec record for compatibility -// with middleboxes that didn't implement TLS correctly. See RFC 8446, Appendix D.4. -func (hs *serverHandshakeStateTLS13) sendDummyChangeCipherSpec() error { - if hs.sentDummyCCS { - return nil - } - hs.sentDummyCCS = true - - _, err := hs.c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - return err -} - -func (hs *serverHandshakeStateTLS13) doHelloRetryRequest(selectedGroup CurveID) error { - c := hs.c - - // The first ClientHello gets double-hashed into the transcript upon a - // HelloRetryRequest. See RFC 8446, Section 4.4.1. - hs.transcript.Write(hs.clientHello.marshal()) - chHash := hs.transcript.Sum(nil) - hs.transcript.Reset() - hs.transcript.Write([]byte{typeMessageHash, 0, 0, uint8(len(chHash))}) - hs.transcript.Write(chHash) - - helloRetryRequest := &serverHelloMsg{ - vers: hs.hello.vers, - random: helloRetryRequestRandom, - sessionId: hs.hello.sessionId, - cipherSuite: hs.hello.cipherSuite, - compressionMethod: hs.hello.compressionMethod, - supportedVersion: hs.hello.supportedVersion, - selectedGroup: selectedGroup, - } - - hs.transcript.Write(helloRetryRequest.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, helloRetryRequest.marshal()); err != nil { - return err - } - - if err := hs.sendDummyChangeCipherSpec(); err != nil { - return err - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - - clientHello, ok := msg.(*clientHelloMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(clientHello, msg) - } - - if len(clientHello.keyShares) != 1 || clientHello.keyShares[0].group != selectedGroup { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: client sent invalid key share in second ClientHello") - } - - if clientHello.earlyData { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: client indicated early data in second ClientHello") - } - - if illegalClientHelloChange(clientHello, hs.clientHello) { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: client illegally modified second ClientHello") - } - - hs.clientHello = clientHello - return nil -} - -// illegalClientHelloChange reports whether the two ClientHello messages are -// different, with the exception of the changes allowed before and after a -// HelloRetryRequest. See RFC 8446, Section 4.1.2. -func illegalClientHelloChange(ch, ch1 *clientHelloMsg) bool { - if len(ch.supportedVersions) != len(ch1.supportedVersions) || - len(ch.cipherSuites) != len(ch1.cipherSuites) || - len(ch.supportedCurves) != len(ch1.supportedCurves) || - len(ch.supportedSignatureAlgorithms) != len(ch1.supportedSignatureAlgorithms) || - len(ch.supportedSignatureAlgorithmsCert) != len(ch1.supportedSignatureAlgorithmsCert) || - len(ch.alpnProtocols) != len(ch1.alpnProtocols) { - return true - } - for i := range ch.supportedVersions { - if ch.supportedVersions[i] != ch1.supportedVersions[i] { - return true - } - } - for i := range ch.cipherSuites { - if ch.cipherSuites[i] != ch1.cipherSuites[i] { - return true - } - } - for i := range ch.supportedCurves { - if ch.supportedCurves[i] != ch1.supportedCurves[i] { - return true - } - } - for i := range ch.supportedSignatureAlgorithms { - if ch.supportedSignatureAlgorithms[i] != ch1.supportedSignatureAlgorithms[i] { - return true - } - } - for i := range ch.supportedSignatureAlgorithmsCert { - if ch.supportedSignatureAlgorithmsCert[i] != ch1.supportedSignatureAlgorithmsCert[i] { - return true - } - } - for i := range ch.alpnProtocols { - if ch.alpnProtocols[i] != ch1.alpnProtocols[i] { - return true - } - } - return ch.vers != ch1.vers || - !bytes.Equal(ch.random, ch1.random) || - !bytes.Equal(ch.sessionId, ch1.sessionId) || - !bytes.Equal(ch.compressionMethods, ch1.compressionMethods) || - ch.nextProtoNeg != ch1.nextProtoNeg || - ch.serverName != ch1.serverName || - ch.ocspStapling != ch1.ocspStapling || - !bytes.Equal(ch.supportedPoints, ch1.supportedPoints) || - ch.ticketSupported != ch1.ticketSupported || - !bytes.Equal(ch.sessionTicket, ch1.sessionTicket) || - ch.secureRenegotiationSupported != ch1.secureRenegotiationSupported || - !bytes.Equal(ch.secureRenegotiation, ch1.secureRenegotiation) || - ch.scts != ch1.scts || - !bytes.Equal(ch.cookie, ch1.cookie) || - !bytes.Equal(ch.pskModes, ch1.pskModes) -} - -func (hs *serverHandshakeStateTLS13) sendServerParameters() error { - c := hs.c - - hs.transcript.Write(hs.clientHello.marshal()) - hs.transcript.Write(hs.hello.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil { - return err - } - - if err := hs.sendDummyChangeCipherSpec(); err != nil { - return err - } - - earlySecret := hs.earlySecret - if earlySecret == nil { - earlySecret = hs.suite.extract(nil, nil) - } - hs.handshakeSecret = hs.suite.extract(hs.sharedKey, - hs.suite.deriveSecret(earlySecret, "derived", nil)) - - clientSecret := hs.suite.deriveSecret(hs.handshakeSecret, - clientHandshakeTrafficLabel, hs.transcript) - c.in.exportKey(hs.suite, clientSecret) - c.in.setTrafficSecret(hs.suite, clientSecret) - serverSecret := hs.suite.deriveSecret(hs.handshakeSecret, - serverHandshakeTrafficLabel, hs.transcript) - c.out.exportKey(hs.suite, serverSecret) - c.out.setTrafficSecret(hs.suite, serverSecret) - - err := c.config.writeKeyLog(keyLogLabelClientHandshake, hs.clientHello.random, clientSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - err = c.config.writeKeyLog(keyLogLabelServerHandshake, hs.clientHello.random, serverSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - - encryptedExtensions := new(encryptedExtensionsMsg) - - if len(hs.clientHello.alpnProtocols) > 0 { - if selectedProto, fallback := mutualProtocol(hs.clientHello.alpnProtocols, c.config.NextProtos); !fallback { - encryptedExtensions.alpnProtocol = selectedProto - c.clientProtocol = selectedProto - } - } - if hs.c.config.GetExtensions != nil { - encryptedExtensions.additionalExtensions = hs.c.config.GetExtensions(typeEncryptedExtensions) - } - - hs.transcript.Write(encryptedExtensions.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, encryptedExtensions.marshal()); err != nil { - return err - } - - return nil -} - -func (hs *serverHandshakeStateTLS13) requestClientCert() bool { - return hs.c.config.ClientAuth >= RequestClientCert && !hs.usingPSK -} - -func (hs *serverHandshakeStateTLS13) sendServerCertificate() error { - c := hs.c - - // Only one of PSK and certificates are used at a time. - if hs.usingPSK { - return nil - } - - if hs.requestClientCert() { - // Request a client certificate - certReq := new(certificateRequestMsgTLS13) - certReq.ocspStapling = true - certReq.scts = true - certReq.supportedSignatureAlgorithms = supportedSignatureAlgorithms - if c.config.ClientCAs != nil { - certReq.certificateAuthorities = c.config.ClientCAs.Subjects() - } - - hs.transcript.Write(certReq.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil { - return err - } - } - - certMsg := new(certificateMsgTLS13) - - certMsg.certificate = *hs.cert - certMsg.scts = hs.clientHello.scts && len(hs.cert.SignedCertificateTimestamps) > 0 - certMsg.ocspStapling = hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 - - hs.transcript.Write(certMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil { - return err - } - - certVerifyMsg := new(certificateVerifyMsg) - certVerifyMsg.hasSignatureAlgorithm = true - certVerifyMsg.signatureAlgorithm = hs.sigAlg - - sigType := signatureFromSignatureScheme(hs.sigAlg) - sigHash, err := hashFromSignatureScheme(hs.sigAlg) - if sigType == 0 || err != nil { - return c.sendAlert(alertInternalError) - } - h := sigHash.New() - writeSignedMessage(h, serverSignatureContext, hs.transcript) - - signOpts := crypto.SignerOpts(sigHash) - if sigType == signatureRSAPSS { - signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: sigHash} - } - sig, err := hs.cert.PrivateKey.(crypto.Signer).Sign(c.config.rand(), h.Sum(nil), signOpts) - if err != nil { - public := hs.cert.PrivateKey.(crypto.Signer).Public() - if rsaKey, ok := public.(*rsa.PublicKey); ok && sigType == signatureRSAPSS && - rsaKey.N.BitLen()/8 < sigHash.Size()*2+2 { // key too small for RSA-PSS - c.sendAlert(alertHandshakeFailure) - } else { - c.sendAlert(alertInternalError) - } - return errors.New("tls: failed to sign handshake: " + err.Error()) - } - certVerifyMsg.signature = sig - - hs.transcript.Write(certVerifyMsg.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, certVerifyMsg.marshal()); err != nil { - return err - } - - return nil -} - -func (hs *serverHandshakeStateTLS13) sendServerFinished() error { - c := hs.c - - finished := &finishedMsg{ - verifyData: hs.suite.finishedHash(c.out.trafficSecret, hs.transcript), - } - - hs.transcript.Write(finished.marshal()) - if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil { - return err - } - - // Derive secrets that take context through the server Finished. - - hs.masterSecret = hs.suite.extract(nil, - hs.suite.deriveSecret(hs.handshakeSecret, "derived", nil)) - - hs.trafficSecret = hs.suite.deriveSecret(hs.masterSecret, - clientApplicationTrafficLabel, hs.transcript) - serverSecret := hs.suite.deriveSecret(hs.masterSecret, - serverApplicationTrafficLabel, hs.transcript) - c.out.exportKey(hs.suite, serverSecret) - c.out.setTrafficSecret(hs.suite, serverSecret) - - err := c.config.writeKeyLog(keyLogLabelClientTraffic, hs.clientHello.random, hs.trafficSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - err = c.config.writeKeyLog(keyLogLabelServerTraffic, hs.clientHello.random, serverSecret) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - - c.ekm = hs.suite.exportKeyingMaterial(hs.masterSecret, hs.transcript) - - // If we did not request client certificates, at this point we can - // precompute the client finished and roll the transcript forward to send - // session tickets in our first flight. - if !hs.requestClientCert() { - if err := hs.sendSessionTickets(); err != nil { - return err - } - } - - return nil -} - -func (hs *serverHandshakeStateTLS13) shouldSendSessionTickets() bool { - if hs.c.config.SessionTicketsDisabled { - return false - } - - // Don't send tickets the client wouldn't use. See RFC 8446, Section 4.2.9. - for _, pskMode := range hs.clientHello.pskModes { - if pskMode == pskModeDHE { - return true - } - } - return false -} - -func (hs *serverHandshakeStateTLS13) sendSessionTickets() error { - c := hs.c - - hs.clientFinished = hs.suite.finishedHash(c.in.trafficSecret, hs.transcript) - finishedMsg := &finishedMsg{ - verifyData: hs.clientFinished, - } - hs.transcript.Write(finishedMsg.marshal()) - - if !hs.shouldSendSessionTickets() { - return nil - } - - resumptionSecret := hs.suite.deriveSecret(hs.masterSecret, - resumptionLabel, hs.transcript) - - m := new(newSessionTicketMsgTLS13) - - var certsFromClient [][]byte - for _, cert := range c.peerCertificates { - certsFromClient = append(certsFromClient, cert.Raw) - } - state := sessionStateTLS13{ - cipherSuite: hs.suite.id, - createdAt: uint64(c.config.time().Unix()), - resumptionSecret: resumptionSecret, - certificate: Certificate{ - Certificate: certsFromClient, - OCSPStaple: c.ocspResponse, - SignedCertificateTimestamps: c.scts, - }, - } - var err error - m.label, err = c.encryptTicket(state.marshal()) - if err != nil { - return err - } - m.lifetime = uint32(maxSessionTicketLifetime / time.Second) - - if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil { - return err - } - - return nil -} - -func (hs *serverHandshakeStateTLS13) readClientCertificate() error { - c := hs.c - - if !hs.requestClientCert() { - return nil - } - - // If we requested a client certificate, then the client must send a - // certificate message. If it's empty, no CertificateVerify is sent. - - msg, err := c.readHandshake() - if err != nil { - return err - } - - certMsg, ok := msg.(*certificateMsgTLS13) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(certMsg, msg) - } - hs.transcript.Write(certMsg.marshal()) - - if err := c.processCertsFromClient(certMsg.certificate); err != nil { - return err - } - - if len(certMsg.certificate.Certificate) != 0 { - msg, err = c.readHandshake() - if err != nil { - return err - } - - certVerify, ok := msg.(*certificateVerifyMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(certVerify, msg) - } - - // See RFC 8446, Section 4.4.3. - if !isSupportedSignatureAlgorithm(certVerify.signatureAlgorithm, supportedSignatureAlgorithms) { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: invalid certificate signature algorithm") - } - sigType := signatureFromSignatureScheme(certVerify.signatureAlgorithm) - sigHash, err := hashFromSignatureScheme(certVerify.signatureAlgorithm) - if sigType == 0 || err != nil { - c.sendAlert(alertInternalError) - return err - } - if sigType == signaturePKCS1v15 || sigHash == crypto.SHA1 { - c.sendAlert(alertIllegalParameter) - return errors.New("tls: invalid certificate signature algorithm") - } - h := sigHash.New() - writeSignedMessage(h, clientSignatureContext, hs.transcript) - if err := verifyHandshakeSignature(sigType, c.peerCertificates[0].PublicKey, - sigHash, h.Sum(nil), certVerify.signature); err != nil { - c.sendAlert(alertDecryptError) - return errors.New("tls: invalid certificate signature") - } - - hs.transcript.Write(certVerify.marshal()) - } - - // If we waited until the client certificates to send session tickets, we - // are ready to do it now. - if err := hs.sendSessionTickets(); err != nil { - return err - } - - return nil -} - -func (hs *serverHandshakeStateTLS13) readClientFinished() error { - c := hs.c - - msg, err := c.readHandshake() - if err != nil { - return err - } - - finished, ok := msg.(*finishedMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return unexpectedMessageError(finished, msg) - } - - if !hmac.Equal(hs.clientFinished, finished.verifyData) { - c.sendAlert(alertDecryptError) - return errors.New("tls: invalid client finished hash") - } - - c.in.exportKey(hs.suite, hs.trafficSecret) - c.in.setTrafficSecret(hs.suite, hs.trafficSecret) - - return nil -} diff --git a/vendor/github.com/marten-seemann/qtls/key_agreement.go b/vendor/github.com/marten-seemann/qtls/key_agreement.go deleted file mode 100644 index 477aae4a4..000000000 --- a/vendor/github.com/marten-seemann/qtls/key_agreement.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "errors" - "io" -) - -var errClientKeyExchange = errors.New("tls: invalid ClientKeyExchange message") -var errServerKeyExchange = errors.New("tls: invalid ServerKeyExchange message") - -// rsaKeyAgreement implements the standard TLS key agreement where the client -// encrypts the pre-master secret to the server's public key. -type rsaKeyAgreement struct{} - -func (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { - return nil, nil -} - -func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { - if len(ckx.ciphertext) < 2 { - return nil, errClientKeyExchange - } - - ciphertext := ckx.ciphertext - if version != VersionSSL30 { - ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1]) - if ciphertextLen != len(ckx.ciphertext)-2 { - return nil, errClientKeyExchange - } - ciphertext = ckx.ciphertext[2:] - } - priv, ok := cert.PrivateKey.(crypto.Decrypter) - if !ok { - return nil, errors.New("tls: certificate private key does not implement crypto.Decrypter") - } - // Perform constant time RSA PKCS#1 v1.5 decryption - preMasterSecret, err := priv.Decrypt(config.rand(), ciphertext, &rsa.PKCS1v15DecryptOptions{SessionKeyLen: 48}) - if err != nil { - return nil, err - } - // We don't check the version number in the premaster secret. For one, - // by checking it, we would leak information about the validity of the - // encrypted pre-master secret. Secondly, it provides only a small - // benefit against a downgrade attack and some implementations send the - // wrong version anyway. See the discussion at the end of section - // 7.4.7.1 of RFC 4346. - return preMasterSecret, nil -} - -func (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { - return errors.New("tls: unexpected ServerKeyExchange") -} - -func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { - preMasterSecret := make([]byte, 48) - preMasterSecret[0] = byte(clientHello.vers >> 8) - preMasterSecret[1] = byte(clientHello.vers) - _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) - if err != nil { - return nil, nil, err - } - - encrypted, err := rsa.EncryptPKCS1v15(config.rand(), cert.PublicKey.(*rsa.PublicKey), preMasterSecret) - if err != nil { - return nil, nil, err - } - ckx := new(clientKeyExchangeMsg) - ckx.ciphertext = make([]byte, len(encrypted)+2) - ckx.ciphertext[0] = byte(len(encrypted) >> 8) - ckx.ciphertext[1] = byte(len(encrypted)) - copy(ckx.ciphertext[2:], encrypted) - return preMasterSecret, ckx, nil -} - -// sha1Hash calculates a SHA1 hash over the given byte slices. -func sha1Hash(slices [][]byte) []byte { - hsha1 := sha1.New() - for _, slice := range slices { - hsha1.Write(slice) - } - return hsha1.Sum(nil) -} - -// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the -// concatenation of an MD5 and SHA1 hash. -func md5SHA1Hash(slices [][]byte) []byte { - md5sha1 := make([]byte, md5.Size+sha1.Size) - hmd5 := md5.New() - for _, slice := range slices { - hmd5.Write(slice) - } - copy(md5sha1, hmd5.Sum(nil)) - copy(md5sha1[md5.Size:], sha1Hash(slices)) - return md5sha1 -} - -// hashForServerKeyExchange hashes the given slices and returns their digest -// using the given hash function (for >= TLS 1.2) or using a default based on -// the sigType (for earlier TLS versions). -func hashForServerKeyExchange(sigType uint8, hashFunc crypto.Hash, version uint16, slices ...[]byte) ([]byte, error) { - if version >= VersionTLS12 { - h := hashFunc.New() - for _, slice := range slices { - h.Write(slice) - } - digest := h.Sum(nil) - return digest, nil - } - if sigType == signatureECDSA { - return sha1Hash(slices), nil - } - return md5SHA1Hash(slices), nil -} - -// ecdheKeyAgreement implements a TLS key agreement where the server -// generates an ephemeral EC public/private key pair and signs it. The -// pre-master secret is then calculated using ECDH. The signature may -// either be ECDSA or RSA. -type ecdheKeyAgreement struct { - version uint16 - isRSA bool - params ecdheParameters - - // ckx and preMasterSecret are generated in processServerKeyExchange - // and returned in generateClientKeyExchange. - ckx *clientKeyExchangeMsg - preMasterSecret []byte -} - -func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { - preferredCurves := config.curvePreferences() - - var curveID CurveID -NextCandidate: - for _, candidate := range preferredCurves { - for _, c := range clientHello.supportedCurves { - if candidate == c { - curveID = c - break NextCandidate - } - } - } - - if curveID == 0 { - return nil, errors.New("tls: no supported elliptic curves offered") - } - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { - return nil, errors.New("tls: CurvePreferences includes unsupported curve") - } - - params, err := generateECDHEParameters(config.rand(), curveID) - if err != nil { - return nil, err - } - ka.params = params - - // See RFC 4492, Section 5.4. - ecdhePublic := params.PublicKey() - serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic)) - serverECDHParams[0] = 3 // named curve - serverECDHParams[1] = byte(curveID >> 8) - serverECDHParams[2] = byte(curveID) - serverECDHParams[3] = byte(len(ecdhePublic)) - copy(serverECDHParams[4:], ecdhePublic) - - priv, ok := cert.PrivateKey.(crypto.Signer) - if !ok { - return nil, errors.New("tls: certificate private key does not implement crypto.Signer") - } - - signatureAlgorithm, sigType, hashFunc, err := pickSignatureAlgorithm(priv.Public(), clientHello.supportedSignatureAlgorithms, supportedSignatureAlgorithmsTLS12, ka.version) - if err != nil { - return nil, err - } - if (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA { - return nil, errors.New("tls: certificate cannot be used with the selected cipher suite") - } - - digest, err := hashForServerKeyExchange(sigType, hashFunc, ka.version, clientHello.random, hello.random, serverECDHParams) - if err != nil { - return nil, err - } - - signOpts := crypto.SignerOpts(hashFunc) - if sigType == signatureRSAPSS { - signOpts = &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash, Hash: hashFunc} - } - sig, err := priv.Sign(config.rand(), digest, signOpts) - if err != nil { - return nil, errors.New("tls: failed to sign ECDHE parameters: " + err.Error()) - } - - skx := new(serverKeyExchangeMsg) - sigAndHashLen := 0 - if ka.version >= VersionTLS12 { - sigAndHashLen = 2 - } - skx.key = make([]byte, len(serverECDHParams)+sigAndHashLen+2+len(sig)) - copy(skx.key, serverECDHParams) - k := skx.key[len(serverECDHParams):] - if ka.version >= VersionTLS12 { - k[0] = byte(signatureAlgorithm >> 8) - k[1] = byte(signatureAlgorithm) - k = k[2:] - } - k[0] = byte(len(sig) >> 8) - k[1] = byte(len(sig)) - copy(k[2:], sig) - - return skx, nil -} - -func (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { - if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 { - return nil, errClientKeyExchange - } - - preMasterSecret := ka.params.SharedKey(ckx.ciphertext[1:]) - if preMasterSecret == nil { - return nil, errClientKeyExchange - } - - return preMasterSecret, nil -} - -func (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { - if len(skx.key) < 4 { - return errServerKeyExchange - } - if skx.key[0] != 3 { // named curve - return errors.New("tls: server selected unsupported curve") - } - curveID := CurveID(skx.key[1])<<8 | CurveID(skx.key[2]) - - publicLen := int(skx.key[3]) - if publicLen+4 > len(skx.key) { - return errServerKeyExchange - } - serverECDHParams := skx.key[:4+publicLen] - publicKey := serverECDHParams[4:] - - sig := skx.key[4+publicLen:] - if len(sig) < 2 { - return errServerKeyExchange - } - - if _, ok := curveForCurveID(curveID); curveID != X25519 && !ok { - return errors.New("tls: server selected unsupported curve") - } - - params, err := generateECDHEParameters(config.rand(), curveID) - if err != nil { - return err - } - ka.params = params - - ka.preMasterSecret = params.SharedKey(publicKey) - if ka.preMasterSecret == nil { - return errServerKeyExchange - } - - ourPublicKey := params.PublicKey() - ka.ckx = new(clientKeyExchangeMsg) - ka.ckx.ciphertext = make([]byte, 1+len(ourPublicKey)) - ka.ckx.ciphertext[0] = byte(len(ourPublicKey)) - copy(ka.ckx.ciphertext[1:], ourPublicKey) - - var signatureAlgorithm SignatureScheme - if ka.version >= VersionTLS12 { - // handle SignatureAndHashAlgorithm - signatureAlgorithm = SignatureScheme(sig[0])<<8 | SignatureScheme(sig[1]) - sig = sig[2:] - if len(sig) < 2 { - return errServerKeyExchange - } - } - _, sigType, hashFunc, err := pickSignatureAlgorithm(cert.PublicKey, []SignatureScheme{signatureAlgorithm}, clientHello.supportedSignatureAlgorithms, ka.version) - if err != nil { - return err - } - if (sigType == signaturePKCS1v15 || sigType == signatureRSAPSS) != ka.isRSA { - return errServerKeyExchange - } - - sigLen := int(sig[0])<<8 | int(sig[1]) - if sigLen+2 != len(sig) { - return errServerKeyExchange - } - sig = sig[2:] - - digest, err := hashForServerKeyExchange(sigType, hashFunc, ka.version, clientHello.random, serverHello.random, serverECDHParams) - if err != nil { - return err - } - return verifyHandshakeSignature(sigType, cert.PublicKey, hashFunc, digest, sig) -} - -func (ka *ecdheKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { - if ka.ckx == nil { - return nil, nil, errors.New("tls: missing ServerKeyExchange message") - } - - return ka.preMasterSecret, ka.ckx, nil -} diff --git a/vendor/github.com/marten-seemann/qtls/key_schedule.go b/vendor/github.com/marten-seemann/qtls/key_schedule.go deleted file mode 100644 index 24bae2e68..000000000 --- a/vendor/github.com/marten-seemann/qtls/key_schedule.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "crypto" - "crypto/elliptic" - "crypto/hmac" - "errors" - "hash" - "io" - "math/big" - - "golang.org/x/crypto/cryptobyte" - "golang.org/x/crypto/curve25519" - "golang.org/x/crypto/hkdf" -) - -// This file contains the functions necessary to compute the TLS 1.3 key -// schedule. See RFC 8446, Section 7. - -const ( - resumptionBinderLabel = "res binder" - clientHandshakeTrafficLabel = "c hs traffic" - serverHandshakeTrafficLabel = "s hs traffic" - clientApplicationTrafficLabel = "c ap traffic" - serverApplicationTrafficLabel = "s ap traffic" - exporterLabel = "exp master" - resumptionLabel = "res master" - trafficUpdateLabel = "traffic upd" -) - -// HkdfExtract generates a pseudorandom key for use with Expand from an input secret and an optional independent salt. -func HkdfExtract(hash crypto.Hash, newSecret, currentSecret []byte) []byte { - if newSecret == nil { - newSecret = make([]byte, hash.Size()) - } - return hkdf.Extract(hash.New, newSecret, currentSecret) -} - -// HkdfExpandLabel HKDF expands a label -func HkdfExpandLabel(hash crypto.Hash, secret, hashValue []byte, label string, L int) []byte { - return hkdfExpandLabel(hash, secret, hashValue, label, L) -} - -func hkdfExpandLabel(hash crypto.Hash, secret, context []byte, label string, length int) []byte { - var hkdfLabel cryptobyte.Builder - hkdfLabel.AddUint16(uint16(length)) - hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes([]byte("tls13 ")) - b.AddBytes([]byte(label)) - }) - hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(context) - }) - out := make([]byte, length) - n, err := hkdf.Expand(hash.New, secret, hkdfLabel.BytesOrPanic()).Read(out) - if err != nil || n != length { - panic("tls: HKDF-Expand-Label invocation failed unexpectedly") - } - return out -} - -// expandLabel implements HKDF-Expand-Label from RFC 8446, Section 7.1. -func (c *cipherSuiteTLS13) expandLabel(secret []byte, label string, context []byte, length int) []byte { - return hkdfExpandLabel(c.hash, secret, context, label, length) -} - -// deriveSecret implements Derive-Secret from RFC 8446, Section 7.1. -func (c *cipherSuiteTLS13) deriveSecret(secret []byte, label string, transcript hash.Hash) []byte { - if transcript == nil { - transcript = c.hash.New() - } - return c.expandLabel(secret, label, transcript.Sum(nil), c.hash.Size()) -} - -// extract implements HKDF-Extract with the cipher suite hash. -func (c *cipherSuiteTLS13) extract(newSecret, currentSecret []byte) []byte { - return HkdfExtract(c.hash, newSecret, currentSecret) -} - -// nextTrafficSecret generates the next traffic secret, given the current one, -// according to RFC 8446, Section 7.2. -func (c *cipherSuiteTLS13) nextTrafficSecret(trafficSecret []byte) []byte { - return c.expandLabel(trafficSecret, trafficUpdateLabel, nil, c.hash.Size()) -} - -// trafficKey generates traffic keys according to RFC 8446, Section 7.3. -func (c *cipherSuiteTLS13) trafficKey(trafficSecret []byte) (key, iv []byte) { - key = c.expandLabel(trafficSecret, "key", nil, c.keyLen) - iv = c.expandLabel(trafficSecret, "iv", nil, aeadNonceLength) - return -} - -// finishedHash generates the Finished verify_data or PskBinderEntry according -// to RFC 8446, Section 4.4.4. See sections 4.4 and 4.2.11.2 for the baseKey -// selection. -func (c *cipherSuiteTLS13) finishedHash(baseKey []byte, transcript hash.Hash) []byte { - finishedKey := c.expandLabel(baseKey, "finished", nil, c.hash.Size()) - verifyData := hmac.New(c.hash.New, finishedKey) - verifyData.Write(transcript.Sum(nil)) - return verifyData.Sum(nil) -} - -// exportKeyingMaterial implements RFC5705 exporters for TLS 1.3 according to -// RFC 8446, Section 7.5. -func (c *cipherSuiteTLS13) exportKeyingMaterial(masterSecret []byte, transcript hash.Hash) func(string, []byte, int) ([]byte, error) { - expMasterSecret := c.deriveSecret(masterSecret, exporterLabel, transcript) - return func(label string, context []byte, length int) ([]byte, error) { - secret := c.deriveSecret(expMasterSecret, label, nil) - h := c.hash.New() - h.Write(context) - return c.expandLabel(secret, "exporter", h.Sum(nil), length), nil - } -} - -// ecdheParameters implements Diffie-Hellman with either NIST curves or X25519, -// according to RFC 8446, Section 4.2.8.2. -type ecdheParameters interface { - CurveID() CurveID - PublicKey() []byte - SharedKey(peerPublicKey []byte) []byte -} - -func generateECDHEParameters(rand io.Reader, curveID CurveID) (ecdheParameters, error) { - if curveID == X25519 { - p := &x25519Parameters{} - if _, err := io.ReadFull(rand, p.privateKey[:]); err != nil { - return nil, err - } - curve25519.ScalarBaseMult(&p.publicKey, &p.privateKey) - return p, nil - } - - curve, ok := curveForCurveID(curveID) - if !ok { - return nil, errors.New("tls: internal error: unsupported curve") - } - - p := &nistParameters{curveID: curveID} - var err error - p.privateKey, p.x, p.y, err = elliptic.GenerateKey(curve, rand) - if err != nil { - return nil, err - } - return p, nil -} - -func curveForCurveID(id CurveID) (elliptic.Curve, bool) { - switch id { - case CurveP256: - return elliptic.P256(), true - case CurveP384: - return elliptic.P384(), true - case CurveP521: - return elliptic.P521(), true - default: - return nil, false - } -} - -type nistParameters struct { - privateKey []byte - x, y *big.Int // public key - curveID CurveID -} - -func (p *nistParameters) CurveID() CurveID { - return p.curveID -} - -func (p *nistParameters) PublicKey() []byte { - curve, _ := curveForCurveID(p.curveID) - return elliptic.Marshal(curve, p.x, p.y) -} - -func (p *nistParameters) SharedKey(peerPublicKey []byte) []byte { - curve, _ := curveForCurveID(p.curveID) - // Unmarshal also checks whether the given point is on the curve. - x, y := elliptic.Unmarshal(curve, peerPublicKey) - if x == nil { - return nil - } - - xShared, _ := curve.ScalarMult(x, y, p.privateKey) - sharedKey := make([]byte, (curve.Params().BitSize+7)>>3) - xBytes := xShared.Bytes() - copy(sharedKey[len(sharedKey)-len(xBytes):], xBytes) - - return sharedKey -} - -type x25519Parameters struct { - privateKey [32]byte - publicKey [32]byte -} - -func (p *x25519Parameters) CurveID() CurveID { - return X25519 -} - -func (p *x25519Parameters) PublicKey() []byte { - return p.publicKey[:] -} - -func (p *x25519Parameters) SharedKey(peerPublicKey []byte) []byte { - if len(peerPublicKey) != 32 { - return nil - } - var theirPublicKey, sharedKey [32]byte - copy(theirPublicKey[:], peerPublicKey) - curve25519.ScalarMult(&sharedKey, &p.privateKey, &theirPublicKey) - return sharedKey[:] -} diff --git a/vendor/github.com/marten-seemann/qtls/prf.go b/vendor/github.com/marten-seemann/qtls/prf.go deleted file mode 100644 index a973d6ad4..000000000 --- a/vendor/github.com/marten-seemann/qtls/prf.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "crypto" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "errors" - "fmt" - "hash" -) - -// Split a premaster secret in two as specified in RFC 4346, Section 5. -func splitPreMasterSecret(secret []byte) (s1, s2 []byte) { - s1 = secret[0 : (len(secret)+1)/2] - s2 = secret[len(secret)/2:] - return -} - -// pHash implements the P_hash function, as defined in RFC 4346, Section 5. -func pHash(result, secret, seed []byte, hash func() hash.Hash) { - h := hmac.New(hash, secret) - h.Write(seed) - a := h.Sum(nil) - - j := 0 - for j < len(result) { - h.Reset() - h.Write(a) - h.Write(seed) - b := h.Sum(nil) - copy(result[j:], b) - j += len(b) - - h.Reset() - h.Write(a) - a = h.Sum(nil) - } -} - -// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, Section 5. -func prf10(result, secret, label, seed []byte) { - hashSHA1 := sha1.New - hashMD5 := md5.New - - labelAndSeed := make([]byte, len(label)+len(seed)) - copy(labelAndSeed, label) - copy(labelAndSeed[len(label):], seed) - - s1, s2 := splitPreMasterSecret(secret) - pHash(result, s1, labelAndSeed, hashMD5) - result2 := make([]byte, len(result)) - pHash(result2, s2, labelAndSeed, hashSHA1) - - for i, b := range result2 { - result[i] ^= b - } -} - -// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, Section 5. -func prf12(hashFunc func() hash.Hash) func(result, secret, label, seed []byte) { - return func(result, secret, label, seed []byte) { - labelAndSeed := make([]byte, len(label)+len(seed)) - copy(labelAndSeed, label) - copy(labelAndSeed[len(label):], seed) - - pHash(result, secret, labelAndSeed, hashFunc) - } -} - -// prf30 implements the SSL 3.0 pseudo-random function, as defined in -// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 6. -func prf30(result, secret, label, seed []byte) { - hashSHA1 := sha1.New() - hashMD5 := md5.New() - - done := 0 - i := 0 - // RFC 5246 section 6.3 says that the largest PRF output needed is 128 - // bytes. Since no more ciphersuites will be added to SSLv3, this will - // remain true. Each iteration gives us 16 bytes so 10 iterations will - // be sufficient. - var b [11]byte - for done < len(result) { - for j := 0; j <= i; j++ { - b[j] = 'A' + byte(i) - } - - hashSHA1.Reset() - hashSHA1.Write(b[:i+1]) - hashSHA1.Write(secret) - hashSHA1.Write(seed) - digest := hashSHA1.Sum(nil) - - hashMD5.Reset() - hashMD5.Write(secret) - hashMD5.Write(digest) - - done += copy(result[done:], hashMD5.Sum(nil)) - i++ - } -} - -const ( - masterSecretLength = 48 // Length of a master secret in TLS 1.1. - finishedVerifyLength = 12 // Length of verify_data in a Finished message. -) - -var masterSecretLabel = []byte("master secret") -var keyExpansionLabel = []byte("key expansion") -var clientFinishedLabel = []byte("client finished") -var serverFinishedLabel = []byte("server finished") - -func prfAndHashForVersion(version uint16, suite *cipherSuite) (func(result, secret, label, seed []byte), crypto.Hash) { - switch version { - case VersionSSL30: - return prf30, crypto.Hash(0) - case VersionTLS10, VersionTLS11: - return prf10, crypto.Hash(0) - case VersionTLS12: - if suite.flags&suiteSHA384 != 0 { - return prf12(sha512.New384), crypto.SHA384 - } - return prf12(sha256.New), crypto.SHA256 - default: - panic("unknown version") - } -} - -func prfForVersion(version uint16, suite *cipherSuite) func(result, secret, label, seed []byte) { - prf, _ := prfAndHashForVersion(version, suite) - return prf -} - -// masterFromPreMasterSecret generates the master secret from the pre-master -// secret. See RFC 5246, Section 8.1. -func masterFromPreMasterSecret(version uint16, suite *cipherSuite, preMasterSecret, clientRandom, serverRandom []byte) []byte { - seed := make([]byte, 0, len(clientRandom)+len(serverRandom)) - seed = append(seed, clientRandom...) - seed = append(seed, serverRandom...) - - masterSecret := make([]byte, masterSecretLength) - prfForVersion(version, suite)(masterSecret, preMasterSecret, masterSecretLabel, seed) - return masterSecret -} - -// keysFromMasterSecret generates the connection keys from the master -// secret, given the lengths of the MAC key, cipher key and IV, as defined in -// RFC 2246, Section 6.3. -func keysFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) { - seed := make([]byte, 0, len(serverRandom)+len(clientRandom)) - seed = append(seed, serverRandom...) - seed = append(seed, clientRandom...) - - n := 2*macLen + 2*keyLen + 2*ivLen - keyMaterial := make([]byte, n) - prfForVersion(version, suite)(keyMaterial, masterSecret, keyExpansionLabel, seed) - clientMAC = keyMaterial[:macLen] - keyMaterial = keyMaterial[macLen:] - serverMAC = keyMaterial[:macLen] - keyMaterial = keyMaterial[macLen:] - clientKey = keyMaterial[:keyLen] - keyMaterial = keyMaterial[keyLen:] - serverKey = keyMaterial[:keyLen] - keyMaterial = keyMaterial[keyLen:] - clientIV = keyMaterial[:ivLen] - keyMaterial = keyMaterial[ivLen:] - serverIV = keyMaterial[:ivLen] - return -} - -// hashFromSignatureScheme returns the corresponding crypto.Hash for a given -// hash from a TLS SignatureScheme. -func hashFromSignatureScheme(signatureAlgorithm SignatureScheme) (crypto.Hash, error) { - switch signatureAlgorithm { - case PKCS1WithSHA1, ECDSAWithSHA1: - return crypto.SHA1, nil - case PKCS1WithSHA256, PSSWithSHA256, ECDSAWithP256AndSHA256: - return crypto.SHA256, nil - case PKCS1WithSHA384, PSSWithSHA384, ECDSAWithP384AndSHA384: - return crypto.SHA384, nil - case PKCS1WithSHA512, PSSWithSHA512, ECDSAWithP521AndSHA512: - return crypto.SHA512, nil - default: - return 0, fmt.Errorf("tls: unsupported signature algorithm: %#04x", signatureAlgorithm) - } -} - -func newFinishedHash(version uint16, cipherSuite *cipherSuite) finishedHash { - var buffer []byte - if version == VersionSSL30 || version >= VersionTLS12 { - buffer = []byte{} - } - - prf, hash := prfAndHashForVersion(version, cipherSuite) - if hash != 0 { - return finishedHash{hash.New(), hash.New(), nil, nil, buffer, version, prf} - } - - return finishedHash{sha1.New(), sha1.New(), md5.New(), md5.New(), buffer, version, prf} -} - -// A finishedHash calculates the hash of a set of handshake messages suitable -// for including in a Finished message. -type finishedHash struct { - client hash.Hash - server hash.Hash - - // Prior to TLS 1.2, an additional MD5 hash is required. - clientMD5 hash.Hash - serverMD5 hash.Hash - - // In TLS 1.2, a full buffer is sadly required. - buffer []byte - - version uint16 - prf func(result, secret, label, seed []byte) -} - -func (h *finishedHash) Write(msg []byte) (n int, err error) { - h.client.Write(msg) - h.server.Write(msg) - - if h.version < VersionTLS12 { - h.clientMD5.Write(msg) - h.serverMD5.Write(msg) - } - - if h.buffer != nil { - h.buffer = append(h.buffer, msg...) - } - - return len(msg), nil -} - -func (h finishedHash) Sum() []byte { - if h.version >= VersionTLS12 { - return h.client.Sum(nil) - } - - out := make([]byte, 0, md5.Size+sha1.Size) - out = h.clientMD5.Sum(out) - return h.client.Sum(out) -} - -// finishedSum30 calculates the contents of the verify_data member of a SSLv3 -// Finished message given the MD5 and SHA1 hashes of a set of handshake -// messages. -func finishedSum30(md5, sha1 hash.Hash, masterSecret []byte, magic []byte) []byte { - md5.Write(magic) - md5.Write(masterSecret) - md5.Write(ssl30Pad1[:]) - md5Digest := md5.Sum(nil) - - md5.Reset() - md5.Write(masterSecret) - md5.Write(ssl30Pad2[:]) - md5.Write(md5Digest) - md5Digest = md5.Sum(nil) - - sha1.Write(magic) - sha1.Write(masterSecret) - sha1.Write(ssl30Pad1[:40]) - sha1Digest := sha1.Sum(nil) - - sha1.Reset() - sha1.Write(masterSecret) - sha1.Write(ssl30Pad2[:40]) - sha1.Write(sha1Digest) - sha1Digest = sha1.Sum(nil) - - ret := make([]byte, len(md5Digest)+len(sha1Digest)) - copy(ret, md5Digest) - copy(ret[len(md5Digest):], sha1Digest) - return ret -} - -var ssl3ClientFinishedMagic = [4]byte{0x43, 0x4c, 0x4e, 0x54} -var ssl3ServerFinishedMagic = [4]byte{0x53, 0x52, 0x56, 0x52} - -// clientSum returns the contents of the verify_data member of a client's -// Finished message. -func (h finishedHash) clientSum(masterSecret []byte) []byte { - if h.version == VersionSSL30 { - return finishedSum30(h.clientMD5, h.client, masterSecret, ssl3ClientFinishedMagic[:]) - } - - out := make([]byte, finishedVerifyLength) - h.prf(out, masterSecret, clientFinishedLabel, h.Sum()) - return out -} - -// serverSum returns the contents of the verify_data member of a server's -// Finished message. -func (h finishedHash) serverSum(masterSecret []byte) []byte { - if h.version == VersionSSL30 { - return finishedSum30(h.serverMD5, h.server, masterSecret, ssl3ServerFinishedMagic[:]) - } - - out := make([]byte, finishedVerifyLength) - h.prf(out, masterSecret, serverFinishedLabel, h.Sum()) - return out -} - -// hashForClientCertificate returns a digest over the handshake messages so far, -// suitable for signing by a TLS client certificate. -func (h finishedHash) hashForClientCertificate(sigType uint8, hashAlg crypto.Hash, masterSecret []byte) ([]byte, error) { - if (h.version == VersionSSL30 || h.version >= VersionTLS12) && h.buffer == nil { - panic("a handshake hash for a client-certificate was requested after discarding the handshake buffer") - } - - if h.version == VersionSSL30 { - if sigType != signaturePKCS1v15 { - return nil, errors.New("tls: unsupported signature type for client certificate") - } - - md5Hash := md5.New() - md5Hash.Write(h.buffer) - sha1Hash := sha1.New() - sha1Hash.Write(h.buffer) - return finishedSum30(md5Hash, sha1Hash, masterSecret, nil), nil - } - if h.version >= VersionTLS12 { - hash := hashAlg.New() - hash.Write(h.buffer) - return hash.Sum(nil), nil - } - - if sigType == signatureECDSA { - return h.server.Sum(nil), nil - } - - return h.Sum(), nil -} - -// discardHandshakeBuffer is called when there is no more need to -// buffer the entirety of the handshake messages. -func (h *finishedHash) discardHandshakeBuffer() { - h.buffer = nil -} - -// noExportedKeyingMaterial is used as a value of -// ConnectionState.ekm when renegotation is enabled and thus -// we wish to fail all key-material export requests. -func noExportedKeyingMaterial(label string, context []byte, length int) ([]byte, error) { - return nil, errors.New("crypto/tls: ExportKeyingMaterial is unavailable when renegotiation is enabled") -} - -// ekmFromMasterSecret generates exported keying material as defined in RFC 5705. -func ekmFromMasterSecret(version uint16, suite *cipherSuite, masterSecret, clientRandom, serverRandom []byte) func(string, []byte, int) ([]byte, error) { - return func(label string, context []byte, length int) ([]byte, error) { - switch label { - case "client finished", "server finished", "master secret", "key expansion": - // These values are reserved and may not be used. - return nil, fmt.Errorf("crypto/tls: reserved ExportKeyingMaterial label: %s", label) - } - - seedLen := len(serverRandom) + len(clientRandom) - if context != nil { - seedLen += 2 + len(context) - } - seed := make([]byte, 0, seedLen) - - seed = append(seed, clientRandom...) - seed = append(seed, serverRandom...) - - if context != nil { - if len(context) >= 1<<16 { - return nil, fmt.Errorf("crypto/tls: ExportKeyingMaterial context too long") - } - seed = append(seed, byte(len(context)>>8), byte(len(context))) - seed = append(seed, context...) - } - - keyMaterial := make([]byte, length) - prfForVersion(version, suite)(keyMaterial, masterSecret, []byte(label), seed) - return keyMaterial, nil - } -} diff --git a/vendor/github.com/marten-seemann/qtls/ticket.go b/vendor/github.com/marten-seemann/qtls/ticket.go deleted file mode 100644 index 989c9787d..000000000 --- a/vendor/github.com/marten-seemann/qtls/ticket.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package qtls - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "crypto/sha256" - "crypto/subtle" - "errors" - "io" - - "golang.org/x/crypto/cryptobyte" -) - -// sessionState contains the information that is serialized into a session -// ticket in order to later resume a connection. -type sessionState struct { - vers uint16 - cipherSuite uint16 - masterSecret []byte - certificates [][]byte - // usedOldKey is true if the ticket from which this session came from - // was encrypted with an older key and thus should be refreshed. - usedOldKey bool -} - -func (s *sessionState) marshal() []byte { - length := 2 + 2 + 2 + len(s.masterSecret) + 2 - for _, cert := range s.certificates { - length += 4 + len(cert) - } - - ret := make([]byte, length) - x := ret - x[0] = byte(s.vers >> 8) - x[1] = byte(s.vers) - x[2] = byte(s.cipherSuite >> 8) - x[3] = byte(s.cipherSuite) - x[4] = byte(len(s.masterSecret) >> 8) - x[5] = byte(len(s.masterSecret)) - x = x[6:] - copy(x, s.masterSecret) - x = x[len(s.masterSecret):] - - x[0] = byte(len(s.certificates) >> 8) - x[1] = byte(len(s.certificates)) - x = x[2:] - - for _, cert := range s.certificates { - x[0] = byte(len(cert) >> 24) - x[1] = byte(len(cert) >> 16) - x[2] = byte(len(cert) >> 8) - x[3] = byte(len(cert)) - copy(x[4:], cert) - x = x[4+len(cert):] - } - - return ret -} - -func (s *sessionState) unmarshal(data []byte) bool { - if len(data) < 8 { - return false - } - - s.vers = uint16(data[0])<<8 | uint16(data[1]) - s.cipherSuite = uint16(data[2])<<8 | uint16(data[3]) - masterSecretLen := int(data[4])<<8 | int(data[5]) - data = data[6:] - if len(data) < masterSecretLen { - return false - } - - s.masterSecret = data[:masterSecretLen] - data = data[masterSecretLen:] - - if len(data) < 2 { - return false - } - - numCerts := int(data[0])<<8 | int(data[1]) - data = data[2:] - - s.certificates = make([][]byte, numCerts) - for i := range s.certificates { - if len(data) < 4 { - return false - } - certLen := int(data[0])<<24 | int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - data = data[4:] - if certLen < 0 { - return false - } - if len(data) < certLen { - return false - } - s.certificates[i] = data[:certLen] - data = data[certLen:] - } - - return len(data) == 0 -} - -// sessionStateTLS13 is the content of a TLS 1.3 session ticket. Its first -// version (revision = 0) doesn't carry any of the information needed for 0-RTT -// validation and the nonce is always empty. -type sessionStateTLS13 struct { - // uint8 version = 0x0304; - // uint8 revision = 0; - cipherSuite uint16 - createdAt uint64 - resumptionSecret []byte // opaque resumption_master_secret<1..2^8-1>; - certificate Certificate // CertificateEntry certificate_list<0..2^24-1>; -} - -func (m *sessionStateTLS13) marshal() []byte { - var b cryptobyte.Builder - b.AddUint16(VersionTLS13) - b.AddUint8(0) // revision - b.AddUint16(m.cipherSuite) - addUint64(&b, m.createdAt) - b.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { - b.AddBytes(m.resumptionSecret) - }) - marshalCertificate(&b, m.certificate) - return b.BytesOrPanic() -} - -func (m *sessionStateTLS13) unmarshal(data []byte) bool { - *m = sessionStateTLS13{} - s := cryptobyte.String(data) - var version uint16 - var revision uint8 - return s.ReadUint16(&version) && - version == VersionTLS13 && - s.ReadUint8(&revision) && - revision == 0 && - s.ReadUint16(&m.cipherSuite) && - readUint64(&s, &m.createdAt) && - readUint8LengthPrefixed(&s, &m.resumptionSecret) && - len(m.resumptionSecret) != 0 && - unmarshalCertificate(&s, &m.certificate) && - s.Empty() -} - -func (c *Conn) encryptTicket(state []byte) ([]byte, error) { - encrypted := make([]byte, ticketKeyNameLen+aes.BlockSize+len(state)+sha256.Size) - keyName := encrypted[:ticketKeyNameLen] - iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize] - macBytes := encrypted[len(encrypted)-sha256.Size:] - - if _, err := io.ReadFull(c.config.rand(), iv); err != nil { - return nil, err - } - key := c.config.ticketKeys()[0] - copy(keyName, key.keyName[:]) - block, err := aes.NewCipher(key.aesKey[:]) - if err != nil { - return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error()) - } - cipher.NewCTR(block, iv).XORKeyStream(encrypted[ticketKeyNameLen+aes.BlockSize:], state) - - mac := hmac.New(sha256.New, key.hmacKey[:]) - mac.Write(encrypted[:len(encrypted)-sha256.Size]) - mac.Sum(macBytes[:0]) - - return encrypted, nil -} - -func (c *Conn) decryptTicket(encrypted []byte) (plaintext []byte, usedOldKey bool) { - if len(encrypted) < ticketKeyNameLen+aes.BlockSize+sha256.Size { - return nil, false - } - - keyName := encrypted[:ticketKeyNameLen] - iv := encrypted[ticketKeyNameLen : ticketKeyNameLen+aes.BlockSize] - macBytes := encrypted[len(encrypted)-sha256.Size:] - ciphertext := encrypted[ticketKeyNameLen+aes.BlockSize : len(encrypted)-sha256.Size] - - keys := c.config.ticketKeys() - keyIndex := -1 - for i, candidateKey := range keys { - if bytes.Equal(keyName, candidateKey.keyName[:]) { - keyIndex = i - break - } - } - - if keyIndex == -1 { - return nil, false - } - key := &keys[keyIndex] - - mac := hmac.New(sha256.New, key.hmacKey[:]) - mac.Write(encrypted[:len(encrypted)-sha256.Size]) - expected := mac.Sum(nil) - - if subtle.ConstantTimeCompare(macBytes, expected) != 1 { - return nil, false - } - - block, err := aes.NewCipher(key.aesKey[:]) - if err != nil { - return nil, false - } - plaintext = make([]byte, len(ciphertext)) - cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext) - - return plaintext, keyIndex > 0 -} diff --git a/vendor/github.com/marten-seemann/qtls/tls.go b/vendor/github.com/marten-seemann/qtls/tls.go deleted file mode 100644 index 818997d3e..000000000 --- a/vendor/github.com/marten-seemann/qtls/tls.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package qtls partially implements TLS 1.2, as specified in RFC 5246, -// and TLS 1.3, as specified in RFC 8446. -// -// TLS 1.3 is available only on an opt-in basis in Go 1.12. To enable -// it, set the GODEBUG environment variable (comma-separated key=value -// options) such that it includes "tls13=1". To enable it from within -// the process, set the environment variable before any use of TLS: -// -// func init() { -// os.Setenv("GODEBUG", os.Getenv("GODEBUG")+",tls13=1") -// } -package qtls - -// BUG(agl): The crypto/tls package only implements some countermeasures -// against Lucky13 attacks on CBC-mode encryption, and only on SHA1 -// variants. See http://www.isg.rhul.ac.uk/tls/TLStiming.pdf and -// https://www.imperialviolet.org/2013/02/04/luckythirteen.html. - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "net" - "strings" - "time" -) - -// Server returns a new TLS server side connection -// using conn as the underlying transport. -// The configuration config must be non-nil and must include -// at least one certificate or else set GetCertificate. -func Server(conn net.Conn, config *Config) *Conn { - return &Conn{conn: conn, config: config} -} - -// Client returns a new TLS client side connection -// using conn as the underlying transport. -// The config cannot be nil: users must set either ServerName or -// InsecureSkipVerify in the config. -func Client(conn net.Conn, config *Config) *Conn { - return &Conn{conn: conn, config: config, isClient: true} -} - -// A listener implements a network listener (net.Listener) for TLS connections. -type listener struct { - net.Listener - config *Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection is of type *Conn. -func (l *listener) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err - } - return Server(c, l.config), nil -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must include -// at least one certificate or else set GetCertificate. -func NewListener(inner net.Listener, config *Config) net.Listener { - l := new(listener) - l.Listener = inner - l.config = config - return l -} - -// Listen creates a TLS listener accepting connections on the -// given network address using net.Listen. -// The configuration config must be non-nil and must include -// at least one certificate or else set GetCertificate. -func Listen(network, laddr string, config *Config) (net.Listener, error) { - if config == nil || (len(config.Certificates) == 0 && config.GetCertificate == nil) { - return nil, errors.New("tls: neither Certificates nor GetCertificate set in Config") - } - l, err := net.Listen(network, laddr) - if err != nil { - return nil, err - } - return NewListener(l, config), nil -} - -type timeoutError struct{} - -func (timeoutError) Error() string { return "tls: DialWithDialer timed out" } -func (timeoutError) Timeout() bool { return true } -func (timeoutError) Temporary() bool { return true } - -// DialWithDialer connects to the given network address using dialer.Dial and -// then initiates a TLS handshake, returning the resulting TLS connection. Any -// timeout or deadline given in the dialer apply to connection and TLS -// handshake as a whole. -// -// DialWithDialer interprets a nil configuration as equivalent to the zero -// configuration; see the documentation of Config for the defaults. -func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config) (*Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := time.Until(dialer.Deadline) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- timeoutError{} - }) - } - - rawConn, err := dialer.Dial(network, addr) - if err != nil { - return nil, err - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - if config == nil { - config = defaultConfig() - } - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := config.Clone() - c.ServerName = hostname - config = c - } - - conn := Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - return conn, nil -} - -// Dial connects to the given network address using net.Dial -// and then initiates a TLS handshake, returning the resulting -// TLS connection. -// Dial interprets a nil configuration as equivalent to -// the zero configuration; see the documentation of Config -// for the defaults. -func Dial(network, addr string, config *Config) (*Conn, error) { - return DialWithDialer(new(net.Dialer), network, addr, config) -} - -// LoadX509KeyPair reads and parses a public/private key pair from a pair -// of files. The files must contain PEM encoded data. The certificate file -// may contain intermediate certificates following the leaf certificate to -// form a certificate chain. On successful return, Certificate.Leaf will -// be nil because the parsed form of the certificate is not retained. -func LoadX509KeyPair(certFile, keyFile string) (Certificate, error) { - certPEMBlock, err := ioutil.ReadFile(certFile) - if err != nil { - return Certificate{}, err - } - keyPEMBlock, err := ioutil.ReadFile(keyFile) - if err != nil { - return Certificate{}, err - } - return X509KeyPair(certPEMBlock, keyPEMBlock) -} - -// X509KeyPair parses a public/private key pair from a pair of -// PEM encoded data. On successful return, Certificate.Leaf will be nil because -// the parsed form of the certificate is not retained. -func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (Certificate, error) { - fail := func(err error) (Certificate, error) { return Certificate{}, err } - - var cert Certificate - var skippedBlockTypes []string - for { - var certDERBlock *pem.Block - certDERBlock, certPEMBlock = pem.Decode(certPEMBlock) - if certDERBlock == nil { - break - } - if certDERBlock.Type == "CERTIFICATE" { - cert.Certificate = append(cert.Certificate, certDERBlock.Bytes) - } else { - skippedBlockTypes = append(skippedBlockTypes, certDERBlock.Type) - } - } - - if len(cert.Certificate) == 0 { - if len(skippedBlockTypes) == 0 { - return fail(errors.New("tls: failed to find any PEM data in certificate input")) - } - if len(skippedBlockTypes) == 1 && strings.HasSuffix(skippedBlockTypes[0], "PRIVATE KEY") { - return fail(errors.New("tls: failed to find certificate PEM data in certificate input, but did find a private key; PEM inputs may have been switched")) - } - return fail(fmt.Errorf("tls: failed to find \"CERTIFICATE\" PEM block in certificate input after skipping PEM blocks of the following types: %v", skippedBlockTypes)) - } - - skippedBlockTypes = skippedBlockTypes[:0] - var keyDERBlock *pem.Block - for { - keyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock) - if keyDERBlock == nil { - if len(skippedBlockTypes) == 0 { - return fail(errors.New("tls: failed to find any PEM data in key input")) - } - if len(skippedBlockTypes) == 1 && skippedBlockTypes[0] == "CERTIFICATE" { - return fail(errors.New("tls: found a certificate rather than a key in the PEM for the private key")) - } - return fail(fmt.Errorf("tls: failed to find PEM block with type ending in \"PRIVATE KEY\" in key input after skipping PEM blocks of the following types: %v", skippedBlockTypes)) - } - if keyDERBlock.Type == "PRIVATE KEY" || strings.HasSuffix(keyDERBlock.Type, " PRIVATE KEY") { - break - } - skippedBlockTypes = append(skippedBlockTypes, keyDERBlock.Type) - } - - // We don't need to parse the public key for TLS, but we so do anyway - // to check that it looks sane and matches the private key. - x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return fail(err) - } - - cert.PrivateKey, err = parsePrivateKey(keyDERBlock.Bytes) - if err != nil { - return fail(err) - } - - switch pub := x509Cert.PublicKey.(type) { - case *rsa.PublicKey: - priv, ok := cert.PrivateKey.(*rsa.PrivateKey) - if !ok { - return fail(errors.New("tls: private key type does not match public key type")) - } - if pub.N.Cmp(priv.N) != 0 { - return fail(errors.New("tls: private key does not match public key")) - } - case *ecdsa.PublicKey: - priv, ok := cert.PrivateKey.(*ecdsa.PrivateKey) - if !ok { - return fail(errors.New("tls: private key type does not match public key type")) - } - if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 { - return fail(errors.New("tls: private key does not match public key")) - } - default: - return fail(errors.New("tls: unknown public key algorithm")) - } - - return cert, nil -} - -// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates -// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. -// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. -func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { - if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { - return key, nil - } - if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { - switch key := key.(type) { - case *rsa.PrivateKey, *ecdsa.PrivateKey: - return key, nil - default: - return nil, errors.New("tls: found unknown private key type in PKCS#8 wrapping") - } - } - if key, err := x509.ParseECPrivateKey(der); err == nil { - return key, nil - } - - return nil, errors.New("tls: failed to parse private key") -} diff --git a/vendor/github.com/mholt/caddy/.gitattributes b/vendor/github.com/mholt/caddy/.gitattributes deleted file mode 100644 index 2e5077856..000000000 --- a/vendor/github.com/mholt/caddy/.gitattributes +++ /dev/null @@ -1,19 +0,0 @@ -# shell scripts should not use tabs to indent! -*.bash text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.sh text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 - -# files for systemd (shell-similar) -*.path text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.service text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.timer text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 - -# go fmt will enforce this, but in case a user has not called "go fmt" allow GIT to catch this: -*.go text eol=lf core.whitespace whitespace=indent-with-non-tab,trailing-space,tabwidth=4 - -*.txt text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.tpl text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.htm text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.html text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.md text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -*.yml text eol=lf core.whitespace whitespace=tab-in-indent,trailing-space,tabwidth=2 -.git* text eol=auto core.whitespace whitespace=trailing-space diff --git a/vendor/github.com/mholt/caddy/.gitignore b/vendor/github.com/mholt/caddy/.gitignore deleted file mode 100644 index d996add1e..000000000 --- a/vendor/github.com/mholt/caddy/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -.DS_Store -Thumbs.db -_gitignore/ -Vagrantfile -.vagrant/ -/.idea - -dist/builds/ -dist/release/ - -error.log -access.log - -/*.conf -Caddyfile -!caddyfile/ - -og_static/ - -.vscode/ - -*.bat diff --git a/vendor/github.com/mholt/caddy/LICENSE.txt b/vendor/github.com/mholt/caddy/LICENSE.txt deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/mholt/caddy/LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/mholt/caddy/README.md b/vendor/github.com/mholt/caddy/README.md deleted file mode 100644 index 0acd13657..000000000 --- a/vendor/github.com/mholt/caddy/README.md +++ /dev/null @@ -1,225 +0,0 @@ -

- Caddy -

-

Every Site on HTTPS

-

Caddy is a general-purpose HTTP/2 web server that serves HTTPS by default.

-

- - - -
- @caddyserver on Twitter - Caddy Forum - Caddy on Sourcegraph -

-

- Download · - Documentation · - Community -

- ---- - -Caddy is a **production-ready** open-source web server that is fast, easy to use, and makes you more productive. - -Available for Windows, Mac, Linux, BSD, Solaris, and [Android](https://github.com/mholt/caddy/wiki/Running-Caddy-on-Android). - -

- Thanks to our special sponsor: -

- Relica - Cross-platform file backup to the cloud, local disks, or other computers -

- -## Menu - -- [Features](#features) -- [Install](#install) -- [Quick Start](#quick-start) -- [Running in Production](#running-in-production) -- [Contributing](#contributing) -- [Donors](#donors) -- [About the Project](#about-the-project) - -## Features - -- **Easy configuration** with the Caddyfile -- **Automatic HTTPS** on by default (via [Let's Encrypt](https://letsencrypt.org)) -- **HTTP/2** by default -- **Virtual hosting** so multiple sites just work -- Experimental **QUIC support** for cutting-edge transmissions -- TLS session ticket **key rotation** for more secure connections -- **Extensible with plugins** because a convenient web server is a helpful one -- **Runs anywhere** with **no external dependencies** (not even libc) - -[See a more complete list of features built into Caddy.](https://caddyserver.com/features) On top of all those, Caddy does even more with plugins: choose which plugins you want at [download](https://caddyserver.com/download). - -Altogether, Caddy can do things other web servers simply cannot do. Its features and plugins save you time and mistakes, and will cheer you up. Your Caddy instance takes care of the details for you! - - -

- Powered by -
- CertMagic -

- - -## Install - -Caddy binaries have no dependencies and are available for every platform. Get Caddy any of these ways: - -- **[Download page](https://caddyserver.com/download)** (RECOMMENDED) allows you to customize your build in the browser -- **[Latest release](https://github.com/mholt/caddy/releases/latest)** for pre-built, vanilla binaries -- **[AWS Marketplace](https://aws.amazon.com/marketplace/pp/B07J1WNK75?qid=1539015041932&sr=0-1&ref_=srh_res_product_title&cl_spe=C)** makes it easy to deploy directly to your cloud environment. -Get Caddy on the AWS Marketplace - - -## Build - -To build from source you need **[Git](https://git-scm.com/downloads)** and **[Go](https://golang.org/doc/install)** (1.12 or newer). - -**To build Caddy without plugins:** - - -1. Set the transitional environment variable for Go modules: `export GO111MODULE=on` -2. Run `go get github.com/mholt/caddy/caddy` - -Caddy will be installed to your `$GOPATH/bin` folder. - -With these instructions, the binary will not have embedded version information (see [golang/go#29228](https://github.com/golang/go/issues/29228)), but it is fine for a quick start. - -**To build Caddy with plugins (and with version information):** - -There is no need to modify the Caddy code to build it with plugins. We will create a simple Go module with our own `main()` that you can use to make custom Caddy builds. - - -1. Set the transitional environment variable for Go modules: `export GO111MODULE=on` -2. Create a new folder anywhere, and put this Go file into it, then import the plugins you want to include: -```go -package main - -import ( - "github.com/mholt/caddy/caddy/caddymain" - - // plug in plugins here, for example: - // _ "import/path/here" -) - -func main() { - // optional: disable telemetry - // caddymain.EnableTelemetry = false - caddymain.Run() -} -``` -3. `go mod init caddy` -4. Run `go get github.com/mholt/caddy` -5. `go install` will then create your binary at `$GOPATH/bin`, or `go build` will put it in the current directory. - -**To install Caddy's source code for development:** - - -1. Set the transitional environment variable for Go modules: `export GO111MODULE=on` -2. Run `git clone https://github.com/mholt/caddy.git` in any folder (doesn't have to be in GOPATH). - -You can make changes to the source code from that clone and checkout any commit or tag you wish to develop on. - -When building from source, telemetry is enabled by default. You can disable it by changing `caddymain.EnableTelemetry = false` in run.go, or use the `-disabled-metrics` flag at runtime to disable only certain metrics. - - -## Quick Start - -To serve static files from the current working directory, run: - -``` -caddy -``` - -Caddy's default port is 2015, so open your browser to [http://localhost:2015](http://localhost:2015). - -### Go from 0 to HTTPS in 5 seconds - -If the `caddy` binary has permission to bind to low ports and your domain name's DNS records point to the machine you're on: - -``` -caddy -host example.com -``` - -This command serves static files from the current directory over HTTPS. Certificates are automatically obtained and renewed for you! Caddy is also automatically configuring ports 80 and 443 for you, and redirecting HTTP to HTTPS. Cool, huh? - -### Customizing your site - -To customize how your site is served, create a file named Caddyfile by your site and paste this into it: - -```plain -localhost - -push -browse -websocket /echo cat -ext .html -log /var/log/access.log -proxy /api 127.0.0.1:7005 -header /api Access-Control-Allow-Origin * -``` - -When you run `caddy` in that directory, it will automatically find and use that Caddyfile. - -This simple file enables server push (via Link headers), allows directory browsing (for folders without an index file), hosts a WebSocket echo server at /echo, serves clean URLs, logs requests to an access log, proxies all API requests to a backend on port 7005, and adds the coveted `Access-Control-Allow-Origin: *` header for all responses from the API. - -Wow! Caddy can do a lot with just a few lines. - -### Doing more with Caddy - -To host multiple sites and do more with the Caddyfile, please see the [Caddyfile tutorial](https://caddyserver.com/tutorial/caddyfile). - -Sites with qualifying hostnames are served over [HTTPS by default](https://caddyserver.com/docs/automatic-https). - -Caddy has a nice little command line interface. Run `caddy -h` to view basic help or see the [CLI documentation](https://caddyserver.com/docs/cli) for details. - - -## Running in Production - -Caddy is production-ready if you find it to be a good fit for your site and workflow. - -**Running as root:** We advise against this. You can still listen on ports < 1024 on Linux using setcap like so: `sudo setcap cap_net_bind_service=+ep ./caddy` - -The Caddy project does not officially maintain any system-specific integrations nor suggest how to administer your own system. But your download file includes [unofficial resources](https://github.com/mholt/caddy/tree/master/dist/init) contributed by the community that you may find helpful for running Caddy in production. - -How you choose to run Caddy is up to you. Many users are satisfied with `nohup caddy &`. Others use `screen`. Users who need Caddy to come back up after reboots either do so in the script that caused the reboot, add a command to an init script, or configure a service with their OS. - -If you have questions or concerns about Caddy' underlying crypto implementations, consult Go's [crypto packages](https://golang.org/pkg/crypto), starting with their documentation, then issues, then the code itself; as Caddy uses mainly those libraries. - - -## Contributing - -**[Join our forum](https://caddy.community) where you can chat with other Caddy users and developers!** To get familiar with the code base, try [Caddy code search on Sourcegraph](https://sourcegraph.com/github.com/mholt/caddy/)! - -Please see our [contributing guidelines](https://github.com/mholt/caddy/blob/master/.github/CONTRIBUTING.md) for instructions. If you want to write a plugin, check out the [developer wiki](https://github.com/mholt/caddy/wiki). - -We use GitHub issues and pull requests only for discussing bug reports and the development of specific changes. We welcome all other topics on the [forum](https://caddy.community)! - -If you want to contribute to the documentation, please [submit an issue](https://github.com/mholt/caddy/issues/new) describing the change that should be made. - -### Good First Issue - -If you are looking for somewhere to start and would like to help out by working on an existing issue, take a look at our [`Good First Issue`](https://github.com/mholt/caddy/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) tag - -Thanks for making Caddy -- and the Web -- better! - - -## Donors - -- [DigitalOcean](https://m.do.co/c/6d7bdafccf96) is hosting the Caddy project. -- [DNSimple](https://dnsimple.link/resolving-caddy) provides DNS services for Caddy's sites. -- [DNS Spy](https://dnsspy.io) keeps an eye on Caddy's DNS properties. - -We thank them for their services. **If you want to help keep Caddy free, please [become a sponsor](https://caddyserver.com/pricing)!** - - -## About the Project - -Caddy was born out of the need for a "batteries-included" web server that runs anywhere and doesn't have to take its configuration with it. Caddy took inspiration from [spark](https://github.com/rif/spark), [nginx](https://github.com/nginx/nginx), lighttpd, -[Websocketd](https://github.com/joewalnes/websocketd) and [Vagrant](https://www.vagrantup.com/), which provides a pleasant mixture of features from each of them. - -**The name "Caddy" is trademarked:** The name of the software is "Caddy", not "Caddy Server" or "CaddyServer". Please call it "Caddy" or, if you wish to clarify, "the Caddy web server". See [brand guidelines](https://caddyserver.com/brand). Caddy is a registered trademark of Light Code Labs, LLC. - -*Author on Twitter: [@mholt6](https://twitter.com/mholt6)* diff --git a/vendor/github.com/mholt/caddy/assets.go b/vendor/github.com/mholt/caddy/assets.go deleted file mode 100644 index 893653fef..000000000 --- a/vendor/github.com/mholt/caddy/assets.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "os" - "path/filepath" - "runtime" -) - -// AssetsPath returns the path to the folder -// where the application may store data. If -// CADDYPATH env variable is set, that value -// is used. Otherwise, the path is the result -// of evaluating "$HOME/.caddy". -func AssetsPath() string { - if caddyPath := os.Getenv("CADDYPATH"); caddyPath != "" { - return caddyPath - } - return filepath.Join(userHomeDir(), ".caddy") -} - -// userHomeDir returns the user's home directory according to -// environment variables. -// -// Credit: http://stackoverflow.com/a/7922977/1048862 -func userHomeDir() string { - if runtime.GOOS == "windows" { - home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - if home == "" { - home = os.Getenv("USERPROFILE") - } - return home - } - return os.Getenv("HOME") -} diff --git a/vendor/github.com/mholt/caddy/azure-pipelines.yml b/vendor/github.com/mholt/caddy/azure-pipelines.yml deleted file mode 100644 index 72796a51b..000000000 --- a/vendor/github.com/mholt/caddy/azure-pipelines.yml +++ /dev/null @@ -1,88 +0,0 @@ -# Mutilated beyond recognition from the example at: -# https://docs.microsoft.com/azure/devops/pipelines/languages/go - -trigger: -- master - -strategy: - matrix: - linux: - imageName: ubuntu-16.04 - gorootDir: /usr/local - mac: - imageName: macos-10.13 - gorootDir: /usr/local - windows: - imageName: windows-2019 - gorootDir: C:\ - -pool: - vmImage: $(imageName) - -variables: - GOROOT: $(gorootDir)/go - GOPATH: $(system.defaultWorkingDirectory)/gopath - GOBIN: $(GOPATH)/bin - modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' - # TODO: modules should be the default in Go 1.13, so this won't be needed - GO111MODULE: on - -steps: -- bash: | - latestGo=$(curl "https://golang.org/VERSION?m=text") - echo "##vso[task.setvariable variable=LATEST_GO]$latestGo" - echo "Latest Go version: $latestGo" - displayName: "Get latest Go version" - -- bash: | - sudo rm -f $(which go) - echo '##vso[task.prependpath]$(GOBIN)' - echo '##vso[task.prependpath]$(GOROOT)/bin' - mkdir -p '$(modulePath)' - shopt -s extglob - shopt -s dotglob - mv !(gopath) '$(modulePath)' - displayName: Remove old Go, set GOBIN/GOROOT, and move project into GOPATH - -# Install Go (this varies by platform) - -- bash: | - wget "https://dl.google.com/go/$(LATEST_GO).linux-amd64.tar.gz" - sudo tar -C $(gorootDir) -xzf "$(LATEST_GO).linux-amd64.tar.gz" - condition: eq( variables['Agent.OS'], 'Linux' ) - displayName: Install Go on Linux - -- bash: | - wget "https://dl.google.com/go/$(LATEST_GO).darwin-amd64.tar.gz" - sudo tar -C $(gorootDir) -xzf "$(LATEST_GO).darwin-amd64.tar.gz" - condition: eq( variables['Agent.OS'], 'Darwin' ) - displayName: Install Go on macOS - -- powershell: | - Write-Host "Downloading Go... (please be patient, I am very slow)" - (New-Object System.Net.WebClient).DownloadFile("https://dl.google.com/go/$(LATEST_GO).windows-amd64.zip", "$(LATEST_GO).windows-amd64.zip") - Write-Host "Extracting Go... (I'm slow too)" - Expand-Archive "$(LATEST_GO).windows-amd64.zip" -DestinationPath "$(gorootDir)" - condition: eq( variables['Agent.OS'], 'Windows_NT' ) - displayName: Install Go on Windows - -# TODO: When this issue is fixed, replace with installer script: -# https://github.com/golangci/golangci-lint/issues/472 -- script: go get -v github.com/golangci/golangci-lint/cmd/golangci-lint - displayName: Install golangci-lint - -- bash: | - printf "Using go at: $(which go)\n" - printf "Go version: $(go version)\n" - printf "\n\nGo environment:\n\n" - go env - printf "\n\nSystem environment:\n\n" - env - displayName: Print Go version and environment - -- script: | - go get -v -t -d ./... - golangci-lint run -E gofmt -E goimports -E misspell - go test -race ./... - workingDirectory: '$(modulePath)' - displayName: Run tests diff --git a/vendor/github.com/mholt/caddy/caddy.go b/vendor/github.com/mholt/caddy/caddy.go deleted file mode 100644 index 2aafd3758..000000000 --- a/vendor/github.com/mholt/caddy/caddy.go +++ /dev/null @@ -1,1026 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package caddy implements the Caddy server manager. -// -// To use this package: -// -// 1. Set the AppName and AppVersion variables. -// 2. Call LoadCaddyfile() to get the Caddyfile. -// Pass in the name of the server type (like "http"). -// Make sure the server type's package is imported -// (import _ "github.com/mholt/caddy/caddyhttp"). -// 3. Call caddy.Start() to start Caddy. You get back -// an Instance, on which you can call Restart() to -// restart it or Stop() to stop it. -// -// You should call Wait() on your instance to wait for -// all servers to quit before your process exits. -package caddy - -import ( - "bytes" - "encoding/gob" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/mholt/caddy/caddyfile" - "github.com/mholt/caddy/telemetry" -) - -// Configurable application parameters -var ( - // AppName is the name of the application. - AppName string - - // AppVersion is the version of the application. - AppVersion string - - // Quiet mode will not show any informative output on initialization. - Quiet bool - - // PidFile is the path to the pidfile to create. - PidFile string - - // GracefulTimeout is the maximum duration of a graceful shutdown. - GracefulTimeout time.Duration - - // isUpgrade will be set to true if this process - // was started as part of an upgrade, where a parent - // Caddy process started this one. - isUpgrade = os.Getenv("CADDY__UPGRADE") == "1" - - // started will be set to true when the first - // instance is started; it never gets set to - // false after that. - started bool - - // mu protects the variables 'isUpgrade' and 'started'. - mu sync.Mutex -) - -func init() { - OnProcessExit = append(OnProcessExit, func() { - if PidFile != "" { - os.Remove(PidFile) - } - }) -} - -// Instance contains the state of servers created as a result of -// calling Start and can be used to access or control those servers. -// It is literally an instance of a server type. Instance values -// should NOT be copied. Use *Instance for safety. -type Instance struct { - // serverType is the name of the instance's server type - serverType string - - // caddyfileInput is the input configuration text used for this process - caddyfileInput Input - - // wg is used to wait for all servers to shut down - wg *sync.WaitGroup - - // context is the context created for this instance, - // used to coordinate the setting up of the server type - context Context - - // servers is the list of servers with their listeners - servers []ServerListener - - // these callbacks execute when certain events occur - OnFirstStartup []func() error // starting, not as part of a restart - OnStartup []func() error // starting, even as part of a restart - OnRestart []func() error // before restart commences - OnRestartFailed []func() error // if restart failed - OnShutdown []func() error // stopping, even as part of a restart - OnFinalShutdown []func() error // stopping, not as part of a restart - - // storing values on an instance is preferable to - // global state because these will get garbage- - // collected after in-process reloads when the - // old instances are destroyed; use StorageMu - // to access this value safely - Storage map[interface{}]interface{} - StorageMu sync.RWMutex -} - -// Instances returns the list of instances. -func Instances() []*Instance { - return instances -} - -// Servers returns the ServerListeners in i. -func (i *Instance) Servers() []ServerListener { return i.servers } - -// Stop stops all servers contained in i. It does NOT -// execute shutdown callbacks. -func (i *Instance) Stop() error { - // stop the servers - for _, s := range i.servers { - if gs, ok := s.server.(GracefulServer); ok { - if err := gs.Stop(); err != nil { - log.Printf("[ERROR] Stopping %s: %v", gs.Address(), err) - } - } - } - - // splice i out of instance list, causing it to be garbage-collected - instancesMu.Lock() - for j, other := range instances { - if other == i { - instances = append(instances[:j], instances[j+1:]...) - break - } - } - instancesMu.Unlock() - - return nil -} - -// ShutdownCallbacks executes all the shutdown callbacks of i, -// including ones that are scheduled only for the final shutdown -// of i. An error returned from one does not stop execution of -// the rest. All the non-nil errors will be returned. -func (i *Instance) ShutdownCallbacks() []error { - var errs []error - for _, shutdownFunc := range i.OnShutdown { - err := shutdownFunc() - if err != nil { - errs = append(errs, err) - } - } - for _, finalShutdownFunc := range i.OnFinalShutdown { - err := finalShutdownFunc() - if err != nil { - errs = append(errs, err) - } - } - return errs -} - -// Restart replaces the servers in i with new servers created from -// executing the newCaddyfile. Upon success, it returns the new -// instance to replace i. Upon failure, i will not be replaced. -func (i *Instance) Restart(newCaddyfile Input) (*Instance, error) { - log.Println("[INFO] Reloading") - - i.wg.Add(1) - defer i.wg.Done() - - var err error - // if something went wrong on restart then run onRestartFailed callbacks - defer func() { - r := recover() - if err != nil || r != nil { - for _, fn := range i.OnRestartFailed { - if err := fn(); err != nil { - log.Printf("[ERROR] Restart failed callback returned error: %v", err) - } - } - if err != nil { - log.Printf("[ERROR] Restart failed: %v", err) - } - if r != nil { - log.Printf("[PANIC] Restart: %v", r) - } - } - }() - - // run restart callbacks - for _, fn := range i.OnRestart { - err = fn() - if err != nil { - return i, err - } - } - - if newCaddyfile == nil { - newCaddyfile = i.caddyfileInput - } - - // Add file descriptors of all the sockets that are capable of it - restartFds := make(map[string]restartTriple) - for _, s := range i.servers { - gs, srvOk := s.server.(GracefulServer) - ln, lnOk := s.listener.(Listener) - pc, pcOk := s.packet.(PacketConn) - if srvOk { - if lnOk && pcOk { - restartFds[gs.Address()] = restartTriple{server: gs, listener: ln, packet: pc} - continue - } - if lnOk { - restartFds[gs.Address()] = restartTriple{server: gs, listener: ln} - continue - } - if pcOk { - restartFds[gs.Address()] = restartTriple{server: gs, packet: pc} - continue - } - } - } - - // create new instance; if the restart fails, it is simply discarded - newInst := &Instance{serverType: newCaddyfile.ServerType(), wg: i.wg, Storage: make(map[interface{}]interface{})} - - // attempt to start new instance - err = startWithListenerFds(newCaddyfile, newInst, restartFds) - if err != nil { - return i, fmt.Errorf("starting with listener file descriptors: %v", err) - } - - // success! stop the old instance - err = i.Stop() - if err != nil { - return i, err - } - for _, shutdownFunc := range i.OnShutdown { - err = shutdownFunc() - if err != nil { - return i, err - } - } - - // Execute instantiation events - EmitEvent(InstanceStartupEvent, newInst) - - log.Println("[INFO] Reloading complete") - - return newInst, nil -} - -// SaveServer adds s and its associated listener ln to the -// internally-kept list of servers that is running. For -// saved servers, graceful restarts will be provided. -func (i *Instance) SaveServer(s Server, ln net.Listener) { - i.servers = append(i.servers, ServerListener{server: s, listener: ln}) -} - -// TCPServer is a type that can listen and serve connections. -// A TCPServer must associate with exactly zero or one net.Listeners. -type TCPServer interface { - // Listen starts listening by creating a new listener - // and returning it. It does not start accepting - // connections. For UDP-only servers, this method - // can be a no-op that returns (nil, nil). - Listen() (net.Listener, error) - - // Serve starts serving using the provided listener. - // Serve must start the server loop nearly immediately, - // or at least not return any errors before the server - // loop begins. Serve blocks indefinitely, or in other - // words, until the server is stopped. For UDP-only - // servers, this method can be a no-op that returns nil. - Serve(net.Listener) error -} - -// UDPServer is a type that can listen and serve packets. -// A UDPServer must associate with exactly zero or one net.PacketConns. -type UDPServer interface { - // ListenPacket starts listening by creating a new packetconn - // and returning it. It does not start accepting connections. - // TCP-only servers may leave this method blank and return - // (nil, nil). - ListenPacket() (net.PacketConn, error) - - // ServePacket starts serving using the provided packetconn. - // ServePacket must start the server loop nearly immediately, - // or at least not return any errors before the server - // loop begins. ServePacket blocks indefinitely, or in other - // words, until the server is stopped. For TCP-only servers, - // this method can be a no-op that returns nil. - ServePacket(net.PacketConn) error -} - -// Server is a type that can listen and serve. It supports both -// TCP and UDP, although the UDPServer interface can be used -// for more than just UDP. -// -// If the server uses TCP, it should implement TCPServer completely. -// If it uses UDP or some other protocol, it should implement -// UDPServer completely. If it uses both, both interfaces should be -// fully implemented. Any unimplemented methods should be made as -// no-ops that simply return nil values. -type Server interface { - TCPServer - UDPServer -} - -// Stopper is a type that can stop serving. The stop -// does not necessarily have to be graceful. -type Stopper interface { - // Stop stops the server. It blocks until the - // server is completely stopped. - Stop() error -} - -// GracefulServer is a Server and Stopper, the stopping -// of which is graceful (whatever that means for the kind -// of server being implemented). It must be able to return -// the address it is configured to listen on so that its -// listener can be paired with it upon graceful restarts. -// The net.Listener that a GracefulServer creates must -// implement the Listener interface for restarts to be -// graceful (assuming the listener is for TCP). -type GracefulServer interface { - Server - Stopper - - // Address returns the address the server should - // listen on; it is used to pair the server to - // its listener during a graceful/zero-downtime - // restart. Thus when implementing this method, - // you must not access a listener to get the - // address; you must store the address the - // server is to serve on some other way. - Address() string - - // WrapListener wraps a listener with the - // listener middlewares configured for this - // server, if any. - WrapListener(net.Listener) net.Listener -} - -// Listener is a net.Listener with an underlying file descriptor. -// A server's listener should implement this interface if it is -// to support zero-downtime reloads. -type Listener interface { - net.Listener - File() (*os.File, error) -} - -// PacketConn is a net.PacketConn with an underlying file descriptor. -// A server's packetconn should implement this interface if it is -// to support zero-downtime reloads (in sofar this holds true for datagram -// connections). -type PacketConn interface { - net.PacketConn - File() (*os.File, error) -} - -// AfterStartup is an interface that can be implemented -// by a server type that wants to run some code after all -// servers for the same Instance have started. -type AfterStartup interface { - OnStartupComplete() -} - -// LoadCaddyfile loads a Caddyfile by calling the plugged in -// Caddyfile loader methods. An error is returned if more than -// one loader returns a non-nil Caddyfile input. If no loaders -// load a Caddyfile, the default loader is used. If no default -// loader is registered or it returns nil, the server type's -// default Caddyfile is loaded. If the server type does not -// specify any default Caddyfile value, then an empty Caddyfile -// is returned. Consequently, this function never returns a nil -// value as long as there are no errors. -func LoadCaddyfile(serverType string) (Input, error) { - // If we are finishing an upgrade, we must obtain the Caddyfile - // from our parent process, regardless of configured loaders. - if IsUpgrade() { - err := gob.NewDecoder(os.Stdin).Decode(&loadedGob) - if err != nil { - return nil, err - } - return loadedGob.Caddyfile, nil - } - - // Ask plugged-in loaders for a Caddyfile - cdyfile, err := loadCaddyfileInput(serverType) - if err != nil { - return nil, err - } - - // Otherwise revert to default - if cdyfile == nil { - cdyfile = DefaultInput(serverType) - } - - // Still nil? Geez. - if cdyfile == nil { - cdyfile = CaddyfileInput{ServerTypeName: serverType} - } - - return cdyfile, nil -} - -// Wait blocks until all of i's servers have stopped. -func (i *Instance) Wait() { - i.wg.Wait() -} - -// CaddyfileFromPipe loads the Caddyfile input from f if f is -// not interactive input. f is assumed to be a pipe or stream, -// such as os.Stdin. If f is not a pipe, no error is returned -// but the Input value will be nil. An error is only returned -// if there was an error reading the pipe, even if the length -// of what was read is 0. -func CaddyfileFromPipe(f *os.File, serverType string) (Input, error) { - fi, err := f.Stat() - if err == nil && fi.Mode()&os.ModeCharDevice == 0 { - // Note that a non-nil error is not a problem. Windows - // will not create a stdin if there is no pipe, which - // produces an error when calling Stat(). But Unix will - // make one either way, which is why we also check that - // bitmask. - // NOTE: Reading from stdin after this fails (e.g. for the let's encrypt email address) (OS X) - confBody, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - return CaddyfileInput{ - Contents: confBody, - Filepath: f.Name(), - ServerTypeName: serverType, - }, nil - } - - // not having input from the pipe is not itself an error, - // just means no input to return. - return nil, nil -} - -// Caddyfile returns the Caddyfile used to create i. -func (i *Instance) Caddyfile() Input { - return i.caddyfileInput -} - -// Start starts Caddy with the given Caddyfile. -// -// This function blocks until all the servers are listening. -func Start(cdyfile Input) (*Instance, error) { - inst := &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup), Storage: make(map[interface{}]interface{})} - err := startWithListenerFds(cdyfile, inst, nil) - if err != nil { - return inst, err - } - signalSuccessToParent() - if pidErr := writePidFile(); pidErr != nil { - log.Printf("[ERROR] Could not write pidfile: %v", pidErr) - } - - // Execute instantiation events - EmitEvent(InstanceStartupEvent, inst) - - return inst, nil -} - -func startWithListenerFds(cdyfile Input, inst *Instance, restartFds map[string]restartTriple) error { - // save this instance in the list now so that - // plugins can access it if need be, for example - // the caddytls package, so it can perform cert - // renewals while starting up; we just have to - // remove the instance from the list later if - // it fails - instancesMu.Lock() - instances = append(instances, inst) - instancesMu.Unlock() - var err error - defer func() { - if err != nil { - instancesMu.Lock() - for i, otherInst := range instances { - if otherInst == inst { - instances = append(instances[:i], instances[i+1:]...) - break - } - } - instancesMu.Unlock() - } - }() - - if cdyfile == nil { - cdyfile = CaddyfileInput{} - } - - err = ValidateAndExecuteDirectives(cdyfile, inst, false) - if err != nil { - return err - } - - slist, err := inst.context.MakeServers() - if err != nil { - return err - } - - // run startup callbacks - if !IsUpgrade() && restartFds == nil { - // first startup means not a restart or upgrade - for _, firstStartupFunc := range inst.OnFirstStartup { - err = firstStartupFunc() - if err != nil { - return err - } - } - } - for _, startupFunc := range inst.OnStartup { - err = startupFunc() - if err != nil { - return err - } - } - - err = startServers(slist, inst, restartFds) - if err != nil { - return err - } - - // run any AfterStartup callbacks if this is not - // part of a restart; then show file descriptor notice - if restartFds == nil { - for _, srvln := range inst.servers { - if srv, ok := srvln.server.(AfterStartup); ok { - srv.OnStartupComplete() - } - } - if !Quiet { - for _, srvln := range inst.servers { - // only show FD notice if the listener is not nil. - // This can happen when only serving UDP or TCP - if srvln.listener == nil { - continue - } - if !IsLoopback(srvln.listener.Addr().String()) { - checkFdlimit() - break - } - } - } - } - - mu.Lock() - started = true - mu.Unlock() - - return nil -} - -// ValidateAndExecuteDirectives will load the server blocks from cdyfile -// by parsing it, then execute the directives configured by it and store -// the resulting server blocks into inst. If justValidate is true, parse -// callbacks will not be executed between directives, since the purpose -// is only to check the input for valid syntax. -func ValidateAndExecuteDirectives(cdyfile Input, inst *Instance, justValidate bool) error { - // If parsing only inst will be nil, create an instance for this function call only. - if justValidate { - inst = &Instance{serverType: cdyfile.ServerType(), wg: new(sync.WaitGroup), Storage: make(map[interface{}]interface{})} - } - - stypeName := cdyfile.ServerType() - - stype, err := getServerType(stypeName) - if err != nil { - return err - } - - inst.caddyfileInput = cdyfile - - sblocks, err := loadServerBlocks(stypeName, cdyfile.Path(), bytes.NewReader(cdyfile.Body())) - if err != nil { - return err - } - - inst.context = stype.NewContext(inst) - if inst.context == nil { - return fmt.Errorf("server type %s produced a nil Context", stypeName) - } - - sblocks, err = inst.context.InspectServerBlocks(cdyfile.Path(), sblocks) - if err != nil { - return fmt.Errorf("error inspecting server blocks: %v", err) - } - - telemetry.Set("num_server_blocks", len(sblocks)) - - return executeDirectives(inst, cdyfile.Path(), stype.Directives(), sblocks, justValidate) -} - -func executeDirectives(inst *Instance, filename string, - directives []string, sblocks []caddyfile.ServerBlock, justValidate bool) error { - // map of server block ID to map of directive name to whatever. - storages := make(map[int]map[string]interface{}) - - // It is crucial that directives are executed in the proper order. - // We loop with the directives on the outer loop so we execute - // a directive for all server blocks before going to the next directive. - // This is important mainly due to the parsing callbacks (below). - for _, dir := range directives { - for i, sb := range sblocks { - var once sync.Once - if _, ok := storages[i]; !ok { - storages[i] = make(map[string]interface{}) - } - - for j, key := range sb.Keys { - // Execute directive if it is in the server block - if tokens, ok := sb.Tokens[dir]; ok { - controller := &Controller{ - instance: inst, - Key: key, - Dispenser: caddyfile.NewDispenserTokens(filename, tokens), - OncePerServerBlock: func(f func() error) error { - var err error - once.Do(func() { - err = f() - }) - return err - }, - ServerBlockIndex: i, - ServerBlockKeyIndex: j, - ServerBlockKeys: sb.Keys, - ServerBlockStorage: storages[i][dir], - } - - setup, err := DirectiveAction(inst.serverType, dir) - if err != nil { - return err - } - - err = setup(controller) - if err != nil { - return err - } - - storages[i][dir] = controller.ServerBlockStorage // persist for this server block - } - } - } - - if !justValidate { - // See if there are any callbacks to execute after this directive - if allCallbacks, ok := parsingCallbacks[inst.serverType]; ok { - callbacks := allCallbacks[dir] - for _, callback := range callbacks { - if err := callback(inst.context); err != nil { - return err - } - } - } - } - } - - return nil -} - -func startServers(serverList []Server, inst *Instance, restartFds map[string]restartTriple) error { - errChan := make(chan error, len(serverList)) - - // used for signaling to error logging goroutine to terminate - stopChan := make(chan struct{}) - // used to track termination of servers - stopWg := &sync.WaitGroup{} - - for _, s := range serverList { - var ( - ln net.Listener - pc net.PacketConn - err error - ) - - // if performing an upgrade, obtain listener file descriptors - // from parent process - if IsUpgrade() { - if gs, ok := s.(GracefulServer); ok { - addr := gs.Address() - if fdIndex, ok := loadedGob.ListenerFds["tcp"+addr]; ok { - file := os.NewFile(fdIndex, "") - ln, err = net.FileListener(file) - if err != nil { - return fmt.Errorf("making listener from file: %v", err) - } - err = file.Close() - if err != nil { - return fmt.Errorf("closing copy of listener file: %v", err) - } - } - if fdIndex, ok := loadedGob.ListenerFds["udp"+addr]; ok { - file := os.NewFile(fdIndex, "") - pc, err = net.FilePacketConn(file) - if err != nil { - return fmt.Errorf("making packet connection from file: %v", err) - } - err = file.Close() - if err != nil { - return fmt.Errorf("closing copy of packet connection file: %v", err) - } - } - ln = gs.WrapListener(ln) - } - } - - // If this is a reload and s is a GracefulServer, - // reuse the listener for a graceful restart. - if gs, ok := s.(GracefulServer); ok && restartFds != nil { - addr := gs.Address() - if old, ok := restartFds[addr]; ok { - // listener - if old.listener != nil { - file, err := old.listener.File() - if err != nil { - return fmt.Errorf("getting old listener file: %v", err) - } - ln, err = net.FileListener(file) - if err != nil { - return fmt.Errorf("getting file listener: %v", err) - } - err = file.Close() - if err != nil { - return fmt.Errorf("closing copy of listener file: %v", err) - } - } - // packetconn - if old.packet != nil { - file, err := old.packet.File() - if err != nil { - return fmt.Errorf("getting old packet file: %v", err) - } - pc, err = net.FilePacketConn(file) - if err != nil { - return fmt.Errorf("getting file packet connection: %v", err) - } - err = file.Close() - if err != nil { - return fmt.Errorf("close copy of packet file: %v", err) - } - } - ln = gs.WrapListener(ln) - } - } - - if ln == nil { - ln, err = s.Listen() - if err != nil { - return fmt.Errorf("Listen: %v", err) - } - } - if pc == nil { - pc, err = s.ListenPacket() - if err != nil { - return fmt.Errorf("ListenPacket: %v", err) - } - } - - inst.servers = append(inst.servers, ServerListener{server: s, listener: ln, packet: pc}) - } - - for _, s := range inst.servers { - inst.wg.Add(2) - stopWg.Add(2) - func(s Server, ln net.Listener, pc net.PacketConn, inst *Instance) { - go func() { - defer func() { - inst.wg.Done() - stopWg.Done() - }() - errChan <- s.Serve(ln) - }() - - go func() { - defer func() { - inst.wg.Done() - stopWg.Done() - }() - errChan <- s.ServePacket(pc) - }() - }(s.server, s.listener, s.packet, inst) - } - - // Log errors that may be returned from Serve() calls, - // these errors should only be occurring in the server loop. - go func() { - for { - select { - case err := <-errChan: - if err != nil { - if !strings.Contains(err.Error(), "use of closed network connection") { - // this error is normal when closing the listener; see https://github.com/golang/go/issues/4373 - log.Println(err) - } - } - case <-stopChan: - return - } - } - }() - - go func() { - stopWg.Wait() - stopChan <- struct{}{} - }() - - return nil -} - -func getServerType(serverType string) (ServerType, error) { - stype, ok := serverTypes[serverType] - if ok { - return stype, nil - } - if len(serverTypes) == 0 { - return ServerType{}, fmt.Errorf("no server types plugged in") - } - if serverType == "" { - if len(serverTypes) == 1 { - for _, stype := range serverTypes { - return stype, nil - } - } - return ServerType{}, fmt.Errorf("multiple server types available; must choose one") - } - return ServerType{}, fmt.Errorf("unknown server type '%s'", serverType) -} - -func loadServerBlocks(serverType, filename string, input io.Reader) ([]caddyfile.ServerBlock, error) { - validDirectives := ValidDirectives(serverType) - serverBlocks, err := caddyfile.Parse(filename, input, validDirectives) - if err != nil { - return nil, err - } - if len(serverBlocks) == 0 && serverTypes[serverType].DefaultInput != nil { - newInput := serverTypes[serverType].DefaultInput() - serverBlocks, err = caddyfile.Parse(newInput.Path(), - bytes.NewReader(newInput.Body()), validDirectives) - if err != nil { - return nil, err - } - } - return serverBlocks, nil -} - -// Stop stops ALL servers. It blocks until they are all stopped. -// It does NOT execute shutdown callbacks, and it deletes all -// instances after stopping is completed. Do not re-use any -// references to old instances after calling Stop. -func Stop() error { - // This awkward for loop is to avoid a deadlock since - // inst.Stop() also acquires the instancesMu lock. - for { - instancesMu.Lock() - if len(instances) == 0 { - instancesMu.Unlock() - break - } - inst := instances[0] - instancesMu.Unlock() - if err := inst.Stop(); err != nil { - log.Printf("[ERROR] Stopping %s: %v", inst.serverType, err) - } - } - return nil -} - -// IsLoopback returns true if the hostname of addr looks -// explicitly like a common local hostname. addr must only -// be a host or a host:port combination. -func IsLoopback(addr string) bool { - host, _, err := net.SplitHostPort(strings.ToLower(addr)) - if err != nil { - host = addr // happens if the addr is just a hostname - } - return host == "localhost" || - strings.Trim(host, "[]") == "::1" || - strings.HasPrefix(host, "127.") -} - -// IsInternal returns true if the IP of addr -// belongs to a private network IP range. addr must only -// be an IP or an IP:port combination. -// Loopback addresses are considered false. -func IsInternal(addr string) bool { - privateNetworks := []string{ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - "fc00::/7", - } - - host, _, err := net.SplitHostPort(addr) - if err != nil { - host = addr // happens if the addr is just a hostname, missing port - // if we encounter an error, the brackets need to be stripped - // because SplitHostPort didn't do it for us - host = strings.Trim(host, "[]") - } - ip := net.ParseIP(host) - if ip == nil { - return false - } - for _, privateNetwork := range privateNetworks { - _, ipnet, _ := net.ParseCIDR(privateNetwork) - if ipnet.Contains(ip) { - return true - } - } - return false -} - -// Started returns true if at least one instance has been -// started by this package. It never gets reset to false -// once it is set to true. -func Started() bool { - mu.Lock() - defer mu.Unlock() - return started -} - -// CaddyfileInput represents a Caddyfile as input -// and is simply a convenient way to implement -// the Input interface. -type CaddyfileInput struct { - Filepath string - Contents []byte - ServerTypeName string -} - -// Body returns c.Contents. -func (c CaddyfileInput) Body() []byte { return c.Contents } - -// Path returns c.Filepath. -func (c CaddyfileInput) Path() string { return c.Filepath } - -// ServerType returns c.ServerType. -func (c CaddyfileInput) ServerType() string { return c.ServerTypeName } - -// Input represents a Caddyfile; its contents and file path -// (which should include the file name at the end of the path). -// If path does not apply (e.g. piped input) you may use -// any understandable value. The path is mainly used for logging, -// error messages, and debugging. -type Input interface { - // Gets the Caddyfile contents - Body() []byte - - // Gets the path to the origin file - Path() string - - // The type of server this input is intended for - ServerType() string -} - -// DefaultInput returns the default Caddyfile input -// to use when it is otherwise empty or missing. -// It uses the default host and port (depends on -// host, e.g. localhost is 2015, otherwise 443) and -// root. -func DefaultInput(serverType string) Input { - if _, ok := serverTypes[serverType]; !ok { - return nil - } - if serverTypes[serverType].DefaultInput == nil { - return nil - } - return serverTypes[serverType].DefaultInput() -} - -// writePidFile writes the process ID to the file at PidFile. -// It does nothing if PidFile is not set. -func writePidFile() error { - if PidFile == "" { - return nil - } - pid := []byte(strconv.Itoa(os.Getpid()) + "\n") - return ioutil.WriteFile(PidFile, pid, 0644) -} - -type restartTriple struct { - server GracefulServer - listener Listener - packet PacketConn -} - -var ( - // instances is the list of running Instances. - instances []*Instance - - // instancesMu protects instances. - instancesMu sync.Mutex -) - -var ( - // DefaultConfigFile is the name of the configuration file that is loaded - // by default if no other file is specified. - DefaultConfigFile = "Caddyfile" -) - -// CtxKey is a value type for use with context.WithValue. -type CtxKey string diff --git a/vendor/github.com/mholt/caddy/caddy/caddymain/run.go b/vendor/github.com/mholt/caddy/caddy/caddymain/run.go deleted file mode 100644 index e9ce689bb..000000000 --- a/vendor/github.com/mholt/caddy/caddy/caddymain/run.go +++ /dev/null @@ -1,601 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddymain - -import ( - "bufio" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "runtime" - "runtime/debug" - "strconv" - "strings" - - "github.com/google/uuid" - "github.com/klauspost/cpuid" - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyfile" - "github.com/mholt/caddy/caddytls" - "github.com/mholt/caddy/telemetry" - "github.com/mholt/certmagic" - lumberjack "gopkg.in/natefinch/lumberjack.v2" - - _ "github.com/mholt/caddy/caddyhttp" // plug in the HTTP server type - // This is where other plugins get plugged in (imported) -) - -func init() { - caddy.TrapSignals() - - flag.BoolVar(&certmagic.Default.Agreed, "agree", false, "Agree to the CA's Subscriber Agreement") - flag.StringVar(&certmagic.Default.CA, "ca", certmagic.Default.CA, "URL to certificate authority's ACME server directory") - flag.StringVar(&certmagic.Default.DefaultServerName, "default-sni", certmagic.Default.DefaultServerName, "If a ClientHello ServerName is empty, use this ServerName to choose a TLS certificate") - flag.BoolVar(&certmagic.Default.DisableHTTPChallenge, "disable-http-challenge", certmagic.Default.DisableHTTPChallenge, "Disable the ACME HTTP challenge") - flag.BoolVar(&certmagic.Default.DisableTLSALPNChallenge, "disable-tls-alpn-challenge", certmagic.Default.DisableTLSALPNChallenge, "Disable the ACME TLS-ALPN challenge") - flag.StringVar(&disabledMetrics, "disabled-metrics", "", "Comma-separated list of telemetry metrics to disable") - flag.StringVar(&conf, "conf", "", "Caddyfile to load (default \""+caddy.DefaultConfigFile+"\")") - flag.StringVar(&cpu, "cpu", "100%", "CPU cap") - flag.BoolVar(&printEnv, "env", false, "Enable to print environment variables") - flag.StringVar(&envFile, "envfile", "", "Path to file with environment variables to load in KEY=VALUE format") - flag.BoolVar(&fromJSON, "json-to-caddyfile", false, "From JSON stdin to Caddyfile stdout") - flag.BoolVar(&plugins, "plugins", false, "List installed plugins") - flag.StringVar(&certmagic.Default.Email, "email", "", "Default ACME CA account email address") - flag.DurationVar(&certmagic.HTTPTimeout, "catimeout", certmagic.HTTPTimeout, "Default ACME CA HTTP timeout") - flag.StringVar(&logfile, "log", "", "Process log file") - flag.IntVar(&logRollMB, "log-roll-mb", 100, "Roll process log when it reaches this many megabytes (0 to disable rolling)") - flag.BoolVar(&logRollCompress, "log-roll-compress", true, "Gzip-compress rolled process log files") - flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file") - flag.BoolVar(&caddy.Quiet, "quiet", false, "Quiet mode (no initialization output)") - flag.StringVar(&revoke, "revoke", "", "Hostname for which to revoke the certificate") - flag.StringVar(&serverType, "type", "http", "Type of server to run") - flag.BoolVar(&toJSON, "caddyfile-to-json", false, "From Caddyfile stdin to JSON stdout") - flag.BoolVar(&version, "version", false, "Show version") - flag.BoolVar(&validate, "validate", false, "Parse the Caddyfile but do not start the server") - - caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader)) - caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader)) -} - -// Run is Caddy's main() function. -func Run() { - flag.Parse() - - module := getBuildModule() - cleanModVersion := strings.TrimPrefix(module.Version, "v") - - caddy.AppName = appName - caddy.AppVersion = module.Version - certmagic.UserAgent = appName + "/" + cleanModVersion - - // Set up process log before anything bad happens - switch logfile { - case "stdout": - log.SetOutput(os.Stdout) - case "stderr": - log.SetOutput(os.Stderr) - case "": - log.SetOutput(ioutil.Discard) - default: - if logRollMB > 0 { - log.SetOutput(&lumberjack.Logger{ - Filename: logfile, - MaxSize: logRollMB, - MaxAge: 14, - MaxBackups: 10, - Compress: logRollCompress, - }) - } else { - err := os.MkdirAll(filepath.Dir(logfile), 0755) - if err != nil { - mustLogFatalf("%v", err) - } - f, err := os.OpenFile(logfile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - mustLogFatalf("%v", err) - } - // don't close file; log should be writeable for duration of process - log.SetOutput(f) - } - } - - // load all additional envs as soon as possible - if err := LoadEnvFromFile(envFile); err != nil { - mustLogFatalf("%v", err) - } - - if printEnv { - for _, v := range os.Environ() { - fmt.Println(v) - } - } - - // initialize telemetry client - if EnableTelemetry { - err := initTelemetry() - if err != nil { - mustLogFatalf("[ERROR] Initializing telemetry: %v", err) - } - } else if disabledMetrics != "" { - mustLogFatalf("[ERROR] Cannot disable specific metrics because telemetry is disabled") - } - - // Check for one-time actions - if revoke != "" { - err := caddytls.Revoke(revoke) - if err != nil { - mustLogFatalf("%v", err) - } - fmt.Printf("Revoked certificate for %s\n", revoke) - os.Exit(0) - } - if version { - if module.Sum != "" { - // a build with a known version will also have a checksum - fmt.Printf("Caddy %s (%s)\n", module.Version, module.Sum) - } else { - fmt.Println(module.Version) - } - os.Exit(0) - } - if plugins { - fmt.Println(caddy.DescribePlugins()) - os.Exit(0) - } - - // Check if we just need to do a Caddyfile Convert and exit - checkJSONCaddyfile() - - // Set CPU cap - err := setCPU(cpu) - if err != nil { - mustLogFatalf("%v", err) - } - - // Executes Startup events - caddy.EmitEvent(caddy.StartupEvent, nil) - - // Get Caddyfile input - caddyfileinput, err := caddy.LoadCaddyfile(serverType) - if err != nil { - mustLogFatalf("%v", err) - } - - if validate { - err := caddy.ValidateAndExecuteDirectives(caddyfileinput, nil, true) - if err != nil { - mustLogFatalf("%v", err) - } - msg := "Caddyfile is valid" - fmt.Println(msg) - log.Printf("[INFO] %s", msg) - os.Exit(0) - } - - // Start your engines - instance, err := caddy.Start(caddyfileinput) - if err != nil { - mustLogFatalf("%v", err) - } - - // Begin telemetry (these are no-ops if telemetry disabled) - telemetry.Set("caddy_version", module.Version) - telemetry.Set("num_listeners", len(instance.Servers())) - telemetry.Set("server_type", serverType) - telemetry.Set("os", runtime.GOOS) - telemetry.Set("arch", runtime.GOARCH) - telemetry.Set("cpu", struct { - BrandName string `json:"brand_name,omitempty"` - NumLogical int `json:"num_logical,omitempty"` - AESNI bool `json:"aes_ni,omitempty"` - }{ - BrandName: cpuid.CPU.BrandName, - NumLogical: runtime.NumCPU(), - AESNI: cpuid.CPU.AesNi(), - }) - if containerized := detectContainer(); containerized { - telemetry.Set("container", containerized) - } - telemetry.StartEmitting() - - // Twiddle your thumbs - instance.Wait() -} - -// mustLogFatalf wraps log.Fatalf() in a way that ensures the -// output is always printed to stderr so the user can see it -// if the user is still there, even if the process log was not -// enabled. If this process is an upgrade, however, and the user -// might not be there anymore, this just logs to the process -// log and exits. -func mustLogFatalf(format string, args ...interface{}) { - if !caddy.IsUpgrade() { - log.SetOutput(os.Stderr) - } - log.Fatalf(format, args...) -} - -// confLoader loads the Caddyfile using the -conf flag. -func confLoader(serverType string) (caddy.Input, error) { - if conf == "" { - return nil, nil - } - - if conf == "stdin" { - return caddy.CaddyfileFromPipe(os.Stdin, serverType) - } - - var contents []byte - if strings.Contains(conf, "*") { - // Let caddyfile.doImport logic handle the globbed path - contents = []byte("import " + conf) - } else { - var err error - contents, err = ioutil.ReadFile(conf) - if err != nil { - return nil, err - } - } - - return caddy.CaddyfileInput{ - Contents: contents, - Filepath: conf, - ServerTypeName: serverType, - }, nil -} - -// defaultLoader loads the Caddyfile from the current working directory. -func defaultLoader(serverType string) (caddy.Input, error) { - contents, err := ioutil.ReadFile(caddy.DefaultConfigFile) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - return caddy.CaddyfileInput{ - Contents: contents, - Filepath: caddy.DefaultConfigFile, - ServerTypeName: serverType, - }, nil -} - -// getBuildModule returns the build info of Caddy -// from debug.BuildInfo (requires Go modules). If -// no version information is available, a non-nil -// value will still be returned, but with an -// unknown version. -func getBuildModule() *debug.Module { - bi, ok := debug.ReadBuildInfo() - if ok { - // The recommended way to build Caddy involves - // creating a separate main module, which - // preserves caddy a read-only dependency - // TODO: track related Go issue: https://github.com/golang/go/issues/29228 - for _, mod := range bi.Deps { - if mod.Path == "github.com/mholt/caddy" { - return mod - } - } - } - return &debug.Module{Version: "unknown"} -} - -func checkJSONCaddyfile() { - if fromJSON { - jsonBytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - fmt.Fprintf(os.Stderr, "Read stdin failed: %v", err) - os.Exit(1) - } - caddyfileBytes, err := caddyfile.FromJSON(jsonBytes) - if err != nil { - fmt.Fprintf(os.Stderr, "Converting from JSON failed: %v", err) - os.Exit(2) - } - fmt.Println(string(caddyfileBytes)) - os.Exit(0) - } - if toJSON { - caddyfileBytes, err := ioutil.ReadAll(os.Stdin) - if err != nil { - fmt.Fprintf(os.Stderr, "Read stdin failed: %v", err) - os.Exit(1) - } - jsonBytes, err := caddyfile.ToJSON(caddyfileBytes) - if err != nil { - fmt.Fprintf(os.Stderr, "Converting to JSON failed: %v", err) - os.Exit(2) - } - fmt.Println(string(jsonBytes)) - os.Exit(0) - } -} - -// setCPU parses string cpu and sets GOMAXPROCS -// according to its value. It accepts either -// a number (e.g. 3) or a percent (e.g. 50%). -// If the percent resolves to less than a single -// GOMAXPROCS, it rounds it up to GOMAXPROCS=1. -func setCPU(cpu string) error { - var numCPU int - - availCPU := runtime.NumCPU() - - if strings.HasSuffix(cpu, "%") { - // Percent - var percent float32 - pctStr := cpu[:len(cpu)-1] - pctInt, err := strconv.Atoi(pctStr) - if err != nil || pctInt < 1 || pctInt > 100 { - return errors.New("invalid CPU value: percentage must be between 1-100") - } - percent = float32(pctInt) / 100 - numCPU = int(float32(availCPU) * percent) - if numCPU < 1 { - numCPU = 1 - } - } else { - // Number - num, err := strconv.Atoi(cpu) - if err != nil || num < 1 { - return errors.New("invalid CPU value: provide a number or percent greater than 0") - } - numCPU = num - } - - if numCPU > availCPU { - numCPU = availCPU - } - - runtime.GOMAXPROCS(numCPU) - return nil -} - -// detectContainer attempts to determine whether the process is -// being run inside a container. References: -// https://tuhrig.de/how-to-know-you-are-inside-a-docker-container/ -// https://stackoverflow.com/a/20012536/1048862 -// https://gist.github.com/anantkamath/623ce7f5432680749e087cf8cfba9b69 -func detectContainer() bool { - if runtime.GOOS != "linux" { - return false - } - - file, err := os.Open("/proc/1/cgroup") - if err != nil { - return false - } - defer file.Close() - - i := 0 - scanner := bufio.NewScanner(file) - for scanner.Scan() { - i++ - if i > 1000 { - return false - } - - line := scanner.Text() - parts := strings.SplitN(line, ":", 3) - if len(parts) < 3 { - continue - } - - if strings.Contains(parts[2], "docker") || - strings.Contains(parts[2], "lxc") || - strings.Contains(parts[2], "moby") { - return true - } - } - - return false -} - -// initTelemetry initializes the telemetry engine. -func initTelemetry() error { - uuidFilename := filepath.Join(caddy.AssetsPath(), "uuid") - if customUUIDFile := os.Getenv("CADDY_UUID_FILE"); customUUIDFile != "" { - uuidFilename = customUUIDFile - } - - newUUID := func() uuid.UUID { - id := uuid.New() - err := os.MkdirAll(caddy.AssetsPath(), 0700) - if err != nil { - log.Printf("[ERROR] Persisting instance UUID: %v", err) - return id - } - err = ioutil.WriteFile(uuidFilename, []byte(id.String()), 0600) // human-readable as a string - if err != nil { - log.Printf("[ERROR] Persisting instance UUID: %v", err) - } - return id - } - - var id uuid.UUID - - // load UUID from storage, or create one if we don't have one - if uuidFile, err := os.Open(uuidFilename); os.IsNotExist(err) { - // no UUID exists yet; create a new one and persist it - id = newUUID() - } else if err != nil { - log.Printf("[ERROR] Loading persistent UUID: %v", err) - id = newUUID() - } else { - defer uuidFile.Close() - uuidBytes, err := ioutil.ReadAll(uuidFile) - if err != nil { - log.Printf("[ERROR] Reading persistent UUID: %v", err) - id = newUUID() - } else { - id, err = uuid.ParseBytes(uuidBytes) - if err != nil { - log.Printf("[ERROR] Parsing UUID: %v", err) - id = newUUID() - } - } - } - - // parse and check the list of disabled metrics - var disabledMetricsSlice []string - if len(disabledMetrics) > 0 { - if len(disabledMetrics) > 1024 { - // mitigate disk space exhaustion at the collection endpoint - return fmt.Errorf("too many metrics to disable") - } - disabledMetricsSlice = splitTrim(disabledMetrics, ",") - for _, metric := range disabledMetricsSlice { - if metric == "instance_id" || metric == "timestamp" || metric == "disabled_metrics" { - return fmt.Errorf("instance_id, timestamp, and disabled_metrics cannot be disabled") - } - } - } - - // initialize telemetry - telemetry.Init(id, disabledMetricsSlice) - - // if any metrics were disabled, report which ones (so we know how representative the data is) - if len(disabledMetricsSlice) > 0 { - telemetry.Set("disabled_metrics", disabledMetricsSlice) - log.Printf("[NOTICE] The following telemetry metrics are disabled: %s", disabledMetrics) - } - - return nil -} - -// Split string s into all substrings separated by sep and returns a slice of -// the substrings between those separators. -// -// If s does not contain sep and sep is not empty, Split returns a -// slice of length 1 whose only element is s. -// -// If sep is empty, Split splits after each UTF-8 sequence. If both s -// and sep are empty, Split returns an empty slice. -// -// Each item that in result is trim space and not empty string -func splitTrim(s string, sep string) []string { - splitItems := strings.Split(s, sep) - trimItems := make([]string, 0, len(splitItems)) - for _, item := range splitItems { - if item = strings.TrimSpace(item); item != "" { - trimItems = append(trimItems, item) - } - } - return trimItems -} - -// LoadEnvFromFile loads additional envs if file provided and exists -// Envs in file should be in KEY=VALUE format -func LoadEnvFromFile(envFile string) error { - if envFile == "" { - return nil - } - - file, err := os.Open(envFile) - if err != nil { - return err - } - defer file.Close() - - envMap, err := ParseEnvFile(file) - if err != nil { - return err - } - - for k, v := range envMap { - if err := os.Setenv(k, v); err != nil { - return err - } - } - - return nil -} - -// ParseEnvFile implements parse logic for environment files -func ParseEnvFile(envInput io.Reader) (map[string]string, error) { - envMap := make(map[string]string) - - scanner := bufio.NewScanner(envInput) - var line string - lineNumber := 0 - - for scanner.Scan() { - line = strings.TrimSpace(scanner.Text()) - lineNumber++ - - // skip lines starting with comment - if strings.HasPrefix(line, "#") { - continue - } - - // skip empty line - if len(line) == 0 { - continue - } - - fields := strings.SplitN(line, "=", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("Can't parse line %d; line should be in KEY=VALUE format", lineNumber) - } - - if strings.Contains(fields[0], " ") { - return nil, fmt.Errorf("Can't parse line %d; KEY contains whitespace", lineNumber) - } - - key := fields[0] - val := fields[1] - - if key == "" { - return nil, fmt.Errorf("Can't parse line %d; KEY can't be empty string", lineNumber) - } - envMap[key] = val - } - - if err := scanner.Err(); err != nil { - return nil, err - } - - return envMap, nil -} - -const appName = "Caddy" - -// Flags that control program flow or startup -var ( - serverType string - conf string - cpu string - envFile string - fromJSON bool - logfile string - logRollMB int - logRollCompress bool - revoke string - toJSON bool - version bool - plugins bool - printEnv bool - validate bool - disabledMetrics string -) - -// EnableTelemetry defines whether telemetry is enabled in Run. -var EnableTelemetry = true diff --git a/vendor/github.com/mholt/caddy/caddyfile/dispenser.go b/vendor/github.com/mholt/caddy/caddyfile/dispenser.go deleted file mode 100644 index c7b3f4c12..000000000 --- a/vendor/github.com/mholt/caddy/caddyfile/dispenser.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "errors" - "fmt" - "io" - "strings" -) - -// Dispenser is a type that dispenses tokens, similarly to a lexer, -// except that it can do so with some notion of structure and has -// some really convenient methods. -type Dispenser struct { - filename string - tokens []Token - cursor int - nesting int -} - -// NewDispenser returns a Dispenser, ready to use for parsing the given input. -func NewDispenser(filename string, input io.Reader) Dispenser { - tokens, _ := allTokens(input) // ignoring error because nothing to do with it - return Dispenser{ - filename: filename, - tokens: tokens, - cursor: -1, - } -} - -// NewDispenserTokens returns a Dispenser filled with the given tokens. -func NewDispenserTokens(filename string, tokens []Token) Dispenser { - return Dispenser{ - filename: filename, - tokens: tokens, - cursor: -1, - } -} - -// Next loads the next token. Returns true if a token -// was loaded; false otherwise. If false, all tokens -// have been consumed. -func (d *Dispenser) Next() bool { - if d.cursor < len(d.tokens)-1 { - d.cursor++ - return true - } - return false -} - -// NextArg loads the next token if it is on the same -// line. Returns true if a token was loaded; false -// otherwise. If false, all tokens on the line have -// been consumed. It handles imported tokens correctly. -func (d *Dispenser) NextArg() bool { - if d.cursor < 0 { - d.cursor++ - return true - } - if d.cursor >= len(d.tokens) { - return false - } - if d.cursor < len(d.tokens)-1 && - d.tokens[d.cursor].File == d.tokens[d.cursor+1].File && - d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line { - d.cursor++ - return true - } - return false -} - -// NextLine loads the next token only if it is not on the same -// line as the current token, and returns true if a token was -// loaded; false otherwise. If false, there is not another token -// or it is on the same line. It handles imported tokens correctly. -func (d *Dispenser) NextLine() bool { - if d.cursor < 0 { - d.cursor++ - return true - } - if d.cursor >= len(d.tokens) { - return false - } - if d.cursor < len(d.tokens)-1 && - (d.tokens[d.cursor].File != d.tokens[d.cursor+1].File || - d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) { - d.cursor++ - return true - } - return false -} - -// NextBlock can be used as the condition of a for loop -// to load the next token as long as it opens a block or -// is already in a block. It returns true if a token was -// loaded, or false when the block's closing curly brace -// was loaded and thus the block ended. Nested blocks are -// not supported. -func (d *Dispenser) NextBlock() bool { - if d.nesting > 0 { - d.Next() - if d.Val() == "}" { - d.nesting-- - return false - } - return true - } - if !d.NextArg() { // block must open on same line - return false - } - if d.Val() != "{" { - d.cursor-- // roll back if not opening brace - return false - } - d.Next() - if d.Val() == "}" { - // Open and then closed right away - return false - } - d.nesting++ - return true -} - -// Val gets the text of the current token. If there is no token -// loaded, it returns empty string. -func (d *Dispenser) Val() string { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return "" - } - return d.tokens[d.cursor].Text -} - -// Line gets the line number of the current token. If there is no token -// loaded, it returns 0. -func (d *Dispenser) Line() int { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return 0 - } - return d.tokens[d.cursor].Line -} - -// File gets the filename of the current token. If there is no token loaded, -// it returns the filename originally given when parsing started. -func (d *Dispenser) File() string { - if d.cursor < 0 || d.cursor >= len(d.tokens) { - return d.filename - } - if tokenFilename := d.tokens[d.cursor].File; tokenFilename != "" { - return tokenFilename - } - return d.filename -} - -// Args is a convenience function that loads the next arguments -// (tokens on the same line) into an arbitrary number of strings -// pointed to in targets. If there are fewer tokens available -// than string pointers, the remaining strings will not be changed -// and false will be returned. If there were enough tokens available -// to fill the arguments, then true will be returned. -func (d *Dispenser) Args(targets ...*string) bool { - enough := true - for i := 0; i < len(targets); i++ { - if !d.NextArg() { - enough = false - break - } - *targets[i] = d.Val() - } - return enough -} - -// RemainingArgs loads any more arguments (tokens on the same line) -// into a slice and returns them. Open curly brace tokens also indicate -// the end of arguments, and the curly brace is not included in -// the return value nor is it loaded. -func (d *Dispenser) RemainingArgs() []string { - var args []string - - for d.NextArg() { - if d.Val() == "{" { - d.cursor-- - break - } - args = append(args, d.Val()) - } - - return args -} - -// ArgErr returns an argument error, meaning that another -// argument was expected but not found. In other words, -// a line break or open curly brace was encountered instead of -// an argument. -func (d *Dispenser) ArgErr() error { - if d.Val() == "{" { - return d.Err("Unexpected token '{', expecting argument") - } - return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val()) -} - -// SyntaxErr creates a generic syntax error which explains what was -// found and what was expected. -func (d *Dispenser) SyntaxErr(expected string) error { - msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected) - return errors.New(msg) -} - -// EOFErr returns an error indicating that the dispenser reached -// the end of the input when searching for the next token. -func (d *Dispenser) EOFErr() error { - return d.Errf("Unexpected EOF") -} - -// Err generates a custom parse-time error with a message of msg. -func (d *Dispenser) Err(msg string) error { - msg = fmt.Sprintf("%s:%d - Error during parsing: %s", d.File(), d.Line(), msg) - return errors.New(msg) -} - -// Errf is like Err, but for formatted error messages -func (d *Dispenser) Errf(format string, args ...interface{}) error { - return d.Err(fmt.Sprintf(format, args...)) -} - -// numLineBreaks counts how many line breaks are in the token -// value given by the token index tknIdx. It returns 0 if the -// token does not exist or there are no line breaks. -func (d *Dispenser) numLineBreaks(tknIdx int) int { - if tknIdx < 0 || tknIdx >= len(d.tokens) { - return 0 - } - return strings.Count(d.tokens[tknIdx].Text, "\n") -} - -// isNewLine determines whether the current token is on a different -// line (higher line number) than the previous token. It handles imported -// tokens correctly. If there isn't a previous token, it returns true. -func (d *Dispenser) isNewLine() bool { - if d.cursor < 1 { - return true - } - if d.cursor > len(d.tokens)-1 { - return false - } - return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File || - d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line -} diff --git a/vendor/github.com/mholt/caddy/caddyfile/json.go b/vendor/github.com/mholt/caddy/caddyfile/json.go deleted file mode 100644 index 0d37e8e98..000000000 --- a/vendor/github.com/mholt/caddy/caddyfile/json.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bytes" - "encoding/json" - "fmt" - "sort" - "strconv" - "strings" -) - -const filename = "Caddyfile" - -// ToJSON converts caddyfile to its JSON representation. -func ToJSON(caddyfile []byte) ([]byte, error) { - var j EncodedCaddyfile - - serverBlocks, err := Parse(filename, bytes.NewReader(caddyfile), nil) - if err != nil { - return nil, err - } - - for _, sb := range serverBlocks { - block := EncodedServerBlock{ - Keys: sb.Keys, - Body: [][]interface{}{}, - } - - // Extract directives deterministically by sorting them - var directives = make([]string, len(sb.Tokens)) - for dir := range sb.Tokens { - directives = append(directives, dir) - } - sort.Strings(directives) - - // Convert each directive's tokens into our JSON structure - for _, dir := range directives { - disp := NewDispenserTokens(filename, sb.Tokens[dir]) - for disp.Next() { - block.Body = append(block.Body, constructLine(&disp)) - } - } - - // tack this block onto the end of the list - j = append(j, block) - } - - result, err := json.Marshal(j) - if err != nil { - return nil, err - } - - return result, nil -} - -// constructLine transforms tokens into a JSON-encodable structure; -// but only one line at a time, to be used at the top-level of -// a server block only (where the first token on each line is a -// directive) - not to be used at any other nesting level. -func constructLine(d *Dispenser) []interface{} { - var args []interface{} - - args = append(args, d.Val()) - - for d.NextArg() { - if d.Val() == "{" { - args = append(args, constructBlock(d)) - continue - } - args = append(args, d.Val()) - } - - return args -} - -// constructBlock recursively processes tokens into a -// JSON-encodable structure. To be used in a directive's -// block. Goes to end of block. -func constructBlock(d *Dispenser) [][]interface{} { - block := [][]interface{}{} - - for d.Next() { - if d.Val() == "}" { - break - } - block = append(block, constructLine(d)) - } - - return block -} - -// FromJSON converts JSON-encoded jsonBytes to Caddyfile text -func FromJSON(jsonBytes []byte) ([]byte, error) { - var j EncodedCaddyfile - var result string - - err := json.Unmarshal(jsonBytes, &j) - if err != nil { - return nil, err - } - - for sbPos, sb := range j { - if sbPos > 0 { - result += "\n\n" - } - for i, key := range sb.Keys { - if i > 0 { - result += ", " - } - //result += standardizeScheme(key) - result += key - } - result += jsonToText(sb.Body, 1) - } - - return []byte(result), nil -} - -// jsonToText recursively transforms a scope of JSON into plain -// Caddyfile text. -func jsonToText(scope interface{}, depth int) string { - var result string - - switch val := scope.(type) { - case string: - if strings.ContainsAny(val, "\" \n\t\r") { - result += `"` + strings.Replace(val, "\"", "\\\"", -1) + `"` - } else { - result += val - } - case int: - result += strconv.Itoa(val) - case float64: - result += fmt.Sprintf("%v", val) - case bool: - result += fmt.Sprintf("%t", val) - case [][]interface{}: - result += " {\n" - for _, arg := range val { - result += strings.Repeat("\t", depth) + jsonToText(arg, depth+1) + "\n" - } - result += strings.Repeat("\t", depth-1) + "}" - case []interface{}: - for i, v := range val { - if block, ok := v.([]interface{}); ok { - result += "{\n" - for _, arg := range block { - result += strings.Repeat("\t", depth) + jsonToText(arg, depth+1) + "\n" - } - result += strings.Repeat("\t", depth-1) + "}" - continue - } - result += jsonToText(v, depth) - if i < len(val)-1 { - result += " " - } - } - } - - return result -} - -// TODO: Will this function come in handy somewhere else? -/* -// standardizeScheme turns an address like host:https into https://host, -// or "host:" into "host". -func standardizeScheme(addr string) string { - if hostname, port, err := net.SplitHostPort(addr); err == nil { - if port == "http" || port == "https" { - addr = port + "://" + hostname - } - } - return strings.TrimSuffix(addr, ":") -} -*/ - -// EncodedCaddyfile encapsulates a slice of EncodedServerBlocks. -type EncodedCaddyfile []EncodedServerBlock - -// EncodedServerBlock represents a server block ripe for encoding. -type EncodedServerBlock struct { - Keys []string `json:"keys"` - Body [][]interface{} `json:"body"` -} diff --git a/vendor/github.com/mholt/caddy/caddyfile/lexer.go b/vendor/github.com/mholt/caddy/caddyfile/lexer.go deleted file mode 100644 index 2b38627be..000000000 --- a/vendor/github.com/mholt/caddy/caddyfile/lexer.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "bufio" - "io" - "unicode" -) - -type ( - // lexer is a utility which can get values, token by - // token, from a Reader. A token is a word, and tokens - // are separated by whitespace. A word can be enclosed - // in quotes if it contains whitespace. - lexer struct { - reader *bufio.Reader - token Token - line int - } - - // Token represents a single parsable unit. - Token struct { - File string - Line int - Text string - } -) - -// load prepares the lexer to scan an input for tokens. -// It discards any leading byte order mark. -func (l *lexer) load(input io.Reader) error { - l.reader = bufio.NewReader(input) - l.line = 1 - - // discard byte order mark, if present - firstCh, _, err := l.reader.ReadRune() - if err != nil { - return err - } - if firstCh != 0xFEFF { - err := l.reader.UnreadRune() - if err != nil { - return err - } - } - - return nil -} - -// next loads the next token into the lexer. -// A token is delimited by whitespace, unless -// the token starts with a quotes character (") -// in which case the token goes until the closing -// quotes (the enclosing quotes are not included). -// Inside quoted strings, quotes may be escaped -// with a preceding \ character. No other chars -// may be escaped. The rest of the line is skipped -// if a "#" character is read in. Returns true if -// a token was loaded; false otherwise. -func (l *lexer) next() bool { - var val []rune - var comment, quoted, escaped bool - - makeToken := func() bool { - l.token.Text = string(val) - return true - } - - for { - ch, _, err := l.reader.ReadRune() - if err != nil { - if len(val) > 0 { - return makeToken() - } - if err == io.EOF { - return false - } - panic(err) - } - - if quoted { - if !escaped { - if ch == '\\' { - escaped = true - continue - } else if ch == '"' { - quoted = false - return makeToken() - } - } - if ch == '\n' { - l.line++ - } - if escaped { - // only escape quotes - if ch != '"' { - val = append(val, '\\') - } - } - val = append(val, ch) - escaped = false - continue - } - - if unicode.IsSpace(ch) { - if ch == '\r' { - continue - } - if ch == '\n' { - l.line++ - comment = false - } - if len(val) > 0 { - return makeToken() - } - continue - } - - if ch == '#' { - comment = true - } - - if comment { - continue - } - - if len(val) == 0 { - l.token = Token{Line: l.line} - if ch == '"' { - quoted = true - continue - } - } - - val = append(val, ch) - } -} diff --git a/vendor/github.com/mholt/caddy/caddyfile/parse.go b/vendor/github.com/mholt/caddy/caddyfile/parse.go deleted file mode 100644 index eb5512cbb..000000000 --- a/vendor/github.com/mholt/caddy/caddyfile/parse.go +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyfile - -import ( - "io" - "log" - "os" - "path/filepath" - "strings" - - "github.com/mholt/caddy/telemetry" -) - -// Parse parses the input just enough to group tokens, in -// order, by server block. No further parsing is performed. -// Server blocks are returned in the order in which they appear. -// Directives that do not appear in validDirectives will cause -// an error. If you do not want to check for valid directives, -// pass in nil instead. -func Parse(filename string, input io.Reader, validDirectives []string) ([]ServerBlock, error) { - p := parser{Dispenser: NewDispenser(filename, input), validDirectives: validDirectives} - return p.parseAll() -} - -// allTokens lexes the entire input, but does not parse it. -// It returns all the tokens from the input, unstructured -// and in order. -func allTokens(input io.Reader) ([]Token, error) { - l := new(lexer) - err := l.load(input) - if err != nil { - return nil, err - } - var tokens []Token - for l.next() { - tokens = append(tokens, l.token) - } - return tokens, nil -} - -type parser struct { - Dispenser - block ServerBlock // current server block being parsed - validDirectives []string // a directive must be valid or it's an error - eof bool // if we encounter a valid EOF in a hard place - definedSnippets map[string][]Token -} - -func (p *parser) parseAll() ([]ServerBlock, error) { - var blocks []ServerBlock - - for p.Next() { - err := p.parseOne() - if err != nil { - return blocks, err - } - if len(p.block.Keys) > 0 { - blocks = append(blocks, p.block) - } - } - - return blocks, nil -} - -func (p *parser) parseOne() error { - p.block = ServerBlock{Tokens: make(map[string][]Token)} - - return p.begin() -} - -func (p *parser) begin() error { - if len(p.tokens) == 0 { - return nil - } - - err := p.addresses() - - if err != nil { - return err - } - - if p.eof { - // this happens if the Caddyfile consists of only - // a line of addresses and nothing else - return nil - } - - if ok, name := p.isSnippet(); ok { - if p.definedSnippets == nil { - p.definedSnippets = map[string][]Token{} - } - if _, found := p.definedSnippets[name]; found { - return p.Errf("redeclaration of previously declared snippet %s", name) - } - // consume all tokens til matched close brace - tokens, err := p.snippetTokens() - if err != nil { - return err - } - p.definedSnippets[name] = tokens - // empty block keys so we don't save this block as a real server. - p.block.Keys = nil - return nil - } - - return p.blockContents() -} - -func (p *parser) addresses() error { - var expectingAnother bool - - for { - tkn := replaceEnvVars(p.Val()) - - // special case: import directive replaces tokens during parse-time - if tkn == "import" && p.isNewLine() { - err := p.doImport() - if err != nil { - return err - } - continue - } - - // Open brace definitely indicates end of addresses - if tkn == "{" { - if expectingAnother { - return p.Errf("Expected another address but had '%s' - check for extra comma", tkn) - } - break - } - - if tkn != "" { // empty token possible if user typed "" - // Trailing comma indicates another address will follow, which - // may possibly be on the next line - if tkn[len(tkn)-1] == ',' { - tkn = tkn[:len(tkn)-1] - expectingAnother = true - } else { - expectingAnother = false // but we may still see another one on this line - } - - p.block.Keys = append(p.block.Keys, tkn) - } - - // Advance token and possibly break out of loop or return error - hasNext := p.Next() - if expectingAnother && !hasNext { - return p.EOFErr() - } - if !hasNext { - p.eof = true - break // EOF - } - if !expectingAnother && p.isNewLine() { - break - } - } - - return nil -} - -func (p *parser) blockContents() error { - errOpenCurlyBrace := p.openCurlyBrace() - if errOpenCurlyBrace != nil { - // single-server configs don't need curly braces - p.cursor-- - } - - err := p.directives() - if err != nil { - return err - } - - // Only look for close curly brace if there was an opening - if errOpenCurlyBrace == nil { - err = p.closeCurlyBrace() - if err != nil { - return err - } - } - - return nil -} - -// directives parses through all the lines for directives -// and it expects the next token to be the first -// directive. It goes until EOF or closing curly brace -// which ends the server block. -func (p *parser) directives() error { - for p.Next() { - // end of server block - if p.Val() == "}" { - break - } - - // special case: import directive replaces tokens during parse-time - if p.Val() == "import" { - err := p.doImport() - if err != nil { - return err - } - p.cursor-- // cursor is advanced when we continue, so roll back one more - continue - } - - // normal case: parse a directive on this line - if err := p.directive(); err != nil { - return err - } - } - return nil -} - -// doImport swaps out the import directive and its argument -// (a total of 2 tokens) with the tokens in the specified file -// or globbing pattern. When the function returns, the cursor -// is on the token before where the import directive was. In -// other words, call Next() to access the first token that was -// imported. -func (p *parser) doImport() error { - // syntax checks - if !p.NextArg() { - return p.ArgErr() - } - importPattern := replaceEnvVars(p.Val()) - if importPattern == "" { - return p.Err("Import requires a non-empty filepath") - } - if p.NextArg() { - return p.Err("Import takes only one argument (glob pattern or file)") - } - // splice out the import directive and its argument (2 tokens total) - tokensBefore := p.tokens[:p.cursor-1] - tokensAfter := p.tokens[p.cursor+1:] - var importedTokens []Token - - // first check snippets. That is a simple, non-recursive replacement - if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil { - importedTokens = p.definedSnippets[importPattern] - } else { - // make path relative to the file of the _token_ being processed rather - // than current working directory (issue #867) and then use glob to get - // list of matching filenames - absFile, err := filepath.Abs(p.Dispenser.File()) - if err != nil { - return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err) - } - - var matches []string - var globPattern string - if !filepath.IsAbs(importPattern) { - globPattern = filepath.Join(filepath.Dir(absFile), importPattern) - } else { - globPattern = importPattern - } - if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 || - (strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) { - // See issue #2096 - a pattern with many glob expansions can hang for too long - return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern) - } - matches, err = filepath.Glob(globPattern) - - if err != nil { - return p.Errf("Failed to use import pattern %s: %v", importPattern, err) - } - if len(matches) == 0 { - if strings.ContainsAny(globPattern, "*?[]") { - log.Printf("[WARNING] No files matching import glob pattern: %s", importPattern) - } else { - return p.Errf("File to import not found: %s", importPattern) - } - } - - // collect all the imported tokens - - for _, importFile := range matches { - newTokens, err := p.doSingleImport(importFile) - if err != nil { - return err - } - importedTokens = append(importedTokens, newTokens...) - } - } - - // splice the imported tokens in the place of the import statement - // and rewind cursor so Next() will land on first imported token - p.tokens = append(tokensBefore, append(importedTokens, tokensAfter...)...) - p.cursor-- - - return nil -} - -// doSingleImport lexes the individual file at importFile and returns -// its tokens or an error, if any. -func (p *parser) doSingleImport(importFile string) ([]Token, error) { - file, err := os.Open(importFile) - if err != nil { - return nil, p.Errf("Could not import %s: %v", importFile, err) - } - defer file.Close() - - if info, err := file.Stat(); err != nil { - return nil, p.Errf("Could not import %s: %v", importFile, err) - } else if info.IsDir() { - return nil, p.Errf("Could not import %s: is a directory", importFile) - } - - importedTokens, err := allTokens(file) - if err != nil { - return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err) - } - - // Tack the file path onto these tokens so errors show the imported file's name - // (we use full, absolute path to avoid bugs: issue #1892) - filename, err := filepath.Abs(importFile) - if err != nil { - return nil, p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err) - } - for i := 0; i < len(importedTokens); i++ { - importedTokens[i].File = filename - } - - return importedTokens, nil -} - -// directive collects tokens until the directive's scope -// closes (either end of line or end of curly brace block). -// It expects the currently-loaded token to be a directive -// (or } that ends a server block). The collected tokens -// are loaded into the current server block for later use -// by directive setup functions. -func (p *parser) directive() error { - dir := replaceEnvVars(p.Val()) - nesting := 0 - - // TODO: More helpful error message ("did you mean..." or "maybe you need to install its server type") - if !p.validDirective(dir) { - return p.Errf("Unknown directive '%s'", dir) - } - - // The directive itself is appended as a relevant token - p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor]) - telemetry.AppendUnique("directives", dir) - - for p.Next() { - if p.Val() == "{" { - nesting++ - } else if p.isNewLine() && nesting == 0 { - p.cursor-- // read too far - break - } else if p.Val() == "}" && nesting > 0 { - nesting-- - } else if p.Val() == "}" && nesting == 0 { - return p.Err("Unexpected '}' because no matching opening brace") - } else if p.Val() == "import" && p.isNewLine() { - if err := p.doImport(); err != nil { - return err - } - p.cursor-- // cursor is advanced when we continue, so roll back one more - continue - } - p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text) - p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor]) - } - - if nesting > 0 { - return p.EOFErr() - } - return nil -} - -// openCurlyBrace expects the current token to be an -// opening curly brace. This acts like an assertion -// because it returns an error if the token is not -// a opening curly brace. It does NOT advance the token. -func (p *parser) openCurlyBrace() error { - if p.Val() != "{" { - return p.SyntaxErr("{") - } - return nil -} - -// closeCurlyBrace expects the current token to be -// a closing curly brace. This acts like an assertion -// because it returns an error if the token is not -// a closing curly brace. It does NOT advance the token. -func (p *parser) closeCurlyBrace() error { - if p.Val() != "}" { - return p.SyntaxErr("}") - } - return nil -} - -// validDirective returns true if dir is in p.validDirectives. -func (p *parser) validDirective(dir string) bool { - if p.validDirectives == nil { - return true - } - for _, d := range p.validDirectives { - if d == dir { - return true - } - } - return false -} - -// replaceEnvVars replaces environment variables that appear in the token -// and understands both the $UNIX and %WINDOWS% syntaxes. -func replaceEnvVars(s string) string { - s = replaceEnvReferences(s, "{%", "%}") - s = replaceEnvReferences(s, "{$", "}") - return s -} - -// replaceEnvReferences performs the actual replacement of env variables -// in s, given the placeholder start and placeholder end strings. -func replaceEnvReferences(s, refStart, refEnd string) string { - index := strings.Index(s, refStart) - for index != -1 { - endIndex := strings.Index(s[index:], refEnd) - if endIndex == -1 { - break - } - - endIndex += index - if endIndex > index+len(refStart) { - ref := s[index : endIndex+len(refEnd)] - s = strings.Replace(s, ref, os.Getenv(ref[len(refStart):len(ref)-len(refEnd)]), -1) - } else { - return s - } - index = strings.Index(s, refStart) - } - return s -} - -// ServerBlock associates any number of keys (usually addresses -// of some sort) with tokens (grouped by directive name). -type ServerBlock struct { - Keys []string - Tokens map[string][]Token -} - -func (p *parser) isSnippet() (bool, string) { - keys := p.block.Keys - // A snippet block is a single key with parens. Nothing else qualifies. - if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") { - return true, strings.TrimSuffix(keys[0][1:], ")") - } - return false, "" -} - -// read and store everything in a block for later replay. -func (p *parser) snippetTokens() ([]Token, error) { - // TODO: disallow imports in snippets for simplicity at import time - // snippet must have curlies. - err := p.openCurlyBrace() - if err != nil { - return nil, err - } - count := 1 - tokens := []Token{} - for p.Next() { - if p.Val() == "}" { - count-- - if count == 0 { - break - } - } - if p.Val() == "{" { - count++ - } - tokens = append(tokens, p.tokens[p.cursor]) - } - // make sure we're matched up - if count != 0 { - return nil, p.SyntaxErr("}") - } - return tokens, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/basicauth/basicauth.go b/vendor/github.com/mholt/caddy/caddyhttp/basicauth/basicauth.go deleted file mode 100644 index 93e79b97b..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/basicauth/basicauth.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package basicauth implements HTTP Basic Authentication for Caddy. -// -// This is useful for simple protections on a website, like requiring -// a password to access an admin interface. This package assumes a -// fairly small threat model. -package basicauth - -import ( - "bufio" - "context" - "crypto/sha1" - "crypto/subtle" - "fmt" - "io" - "log" - "net/http" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/jimstudt/http-authentication/basic" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// BasicAuth is middleware to protect resources with a username and password. -// Note that HTTP Basic Authentication is not secure by itself and should -// not be used to protect important assets without HTTPS. Even then, the -// security of HTTP Basic Auth is disputed. Use discretion when deciding -// what to protect with BasicAuth. -type BasicAuth struct { - Next httpserver.Handler - SiteRoot string - Rules []Rule -} - -// ServeHTTP implements the httpserver.Handler interface. -func (a BasicAuth) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - var protected, isAuthenticated bool - var realm string - var username string - var password string - var ok bool - - // do not check for basic auth on OPTIONS call - if r.Method == http.MethodOptions { - // Pass-through when no paths match - return a.Next.ServeHTTP(w, r) - } - - for _, rule := range a.Rules { - for _, res := range rule.Resources { - if !httpserver.Path(r.URL.Path).Matches(res) { - continue - } - - // path matches; this endpoint is protected - protected = true - realm = rule.Realm - - // parse auth header - username, password, ok = r.BasicAuth() - - // check credentials - if !ok || - username != rule.Username || - !rule.Password(password) { - continue - } - - // by this point, authentication was successful - isAuthenticated = true - - // let upstream middleware (e.g. fastcgi and cgi) know about authenticated - // user; this replaces the request with a wrapped instance - r = r.WithContext(context.WithValue(r.Context(), - httpserver.RemoteUserCtxKey, username)) - - // Provide username to be used in log by replacer - repl := httpserver.NewReplacer(r, nil, "-") - repl.Set("user", username) - } - } - - if protected && !isAuthenticated { - // browsers show a message that says something like: - // "The website says: " - // which is kinda dumb, but whatever. - if realm == "" { - realm = "Restricted" - } - w.Header().Set("WWW-Authenticate", "Basic realm=\""+realm+"\"") - - // Get a replacer so we can provide basic info for the authentication error. - repl := httpserver.NewReplacer(r, nil, "-") - repl.Set("user", username) - errstr := repl.Replace("BasicAuth: user \"{user}\" was not found or password was incorrect. {remote} {host} {uri} {proto}") - err := fmt.Errorf("%s", errstr) - return http.StatusUnauthorized, err - } - - // Pass-through when no paths match - return a.Next.ServeHTTP(w, r) -} - -// Rule represents a BasicAuth rule. A username and password -// combination protect the associated resources, which are -// file or directory paths. -type Rule struct { - Username string - Password func(string) bool - Resources []string - Realm string // See RFC 1945 and RFC 2617, default: "Restricted" -} - -// PasswordMatcher determines whether a password matches a rule. -type PasswordMatcher func(pw string) bool - -var ( - htpasswords map[string]map[string]PasswordMatcher - htpasswordsMu sync.Mutex -) - -// GetHtpasswdMatcher matches password rules. -func GetHtpasswdMatcher(filename, username, siteRoot string) (PasswordMatcher, error) { - filename = filepath.Join(siteRoot, filename) - htpasswordsMu.Lock() - if htpasswords == nil { - htpasswords = make(map[string]map[string]PasswordMatcher) - } - pm := htpasswords[filename] - if pm == nil { - fh, err := os.Open(filename) - if err != nil { - return nil, fmt.Errorf("open %q: %v", filename, err) - } - defer fh.Close() - pm = make(map[string]PasswordMatcher) - if err = parseHtpasswd(pm, fh); err != nil { - return nil, fmt.Errorf("parsing htpasswd %q: %v", fh.Name(), err) - } - htpasswords[filename] = pm - } - htpasswordsMu.Unlock() - if pm[username] == nil { - return nil, fmt.Errorf("username %q not found in %q", username, filename) - } - return pm[username], nil -} - -func parseHtpasswd(pm map[string]PasswordMatcher, r io.Reader) error { - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if line == "" || strings.IndexByte(line, '#') == 0 { - continue - } - i := strings.IndexByte(line, ':') - if i <= 0 { - return fmt.Errorf("malformed line, no color: %q", line) - } - user, encoded := line[:i], line[i+1:] - for _, p := range basic.DefaultSystems { - matcher, err := p(encoded) - if err != nil { - return err - } - if matcher != nil { - pm[user] = matcher.MatchesPassword - break - } - } - } - return scanner.Err() -} - -// PlainMatcher returns a PasswordMatcher that does a constant-time -// byte comparison against the password passw. -func PlainMatcher(passw string) PasswordMatcher { - // compare hashes of equal length instead of actual password - // to avoid leaking password length - passwHash := sha1.New() - if _, err := passwHash.Write([]byte(passw)); err != nil { - log.Printf("[ERROR] unable to write password hash: %v", err) - } - passwSum := passwHash.Sum(nil) - return func(pw string) bool { - pwHash := sha1.New() - if _, err := pwHash.Write([]byte(pw)); err != nil { - log.Printf("[ERROR] unable to write password hash: %v", err) - } - pwSum := pwHash.Sum(nil) - return subtle.ConstantTimeCompare([]byte(pwSum), []byte(passwSum)) == 1 - } -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/basicauth/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/basicauth/setup.go deleted file mode 100644 index 87627f179..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/basicauth/setup.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basicauth - -import ( - "strings" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("basicauth", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new BasicAuth middleware instance. -func setup(c *caddy.Controller) error { - cfg := httpserver.GetConfig(c) - root := cfg.Root - - rules, err := basicAuthParse(c) - if err != nil { - return err - } - - basic := BasicAuth{Rules: rules} - - cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - basic.Next = next - basic.SiteRoot = root - return basic - }) - - return nil -} - -func basicAuthParse(c *caddy.Controller) ([]Rule, error) { - var rules []Rule - cfg := httpserver.GetConfig(c) - - var err error - for c.Next() { - var rule Rule - - args := c.RemainingArgs() - - switch len(args) { - case 2: - rule.Username = args[0] - if rule.Password, err = passwordMatcher(rule.Username, args[1], cfg.Root); err != nil { - return rules, c.Errf("Get password matcher from %s: %v", c.Val(), err) - } - case 3: - rule.Resources = append(rule.Resources, args[0]) - rule.Username = args[1] - if rule.Password, err = passwordMatcher(rule.Username, args[2], cfg.Root); err != nil { - return rules, c.Errf("Get password matcher from %s: %v", c.Val(), err) - } - default: - return rules, c.ArgErr() - } - - // If nested block is present, process it here - for c.NextBlock() { - val := c.Val() - args = c.RemainingArgs() - switch len(args) { - case 0: - // Assume single argument is path resource - rule.Resources = append(rule.Resources, val) - case 1: - if val == "realm" { - if rule.Realm == "" { - rule.Realm = strings.Replace(args[0], `"`, `\"`, -1) - } else { - return rules, c.Errf("\"realm\" subdirective can only be specified once") - } - } else { - return rules, c.Errf("expecting \"realm\", got \"%s\"", val) - } - default: - return rules, c.ArgErr() - } - } - - rules = append(rules, rule) - } - - return rules, nil -} - -func passwordMatcher(username, passw, siteRoot string) (PasswordMatcher, error) { - htpasswdPrefix := "htpasswd=" - if !strings.HasPrefix(passw, htpasswdPrefix) { - return PlainMatcher(passw), nil - } - return GetHtpasswdMatcher(passw[len(htpasswdPrefix):], username, siteRoot) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/bind/bind.go b/vendor/github.com/mholt/caddy/caddyhttp/bind/bind.go deleted file mode 100644 index 79489fe4a..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/bind/bind.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package bind - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("bind", caddy.Plugin{ - ServerType: "http", - Action: setupBind, - }) -} - -func setupBind(c *caddy.Controller) error { - config := httpserver.GetConfig(c) - for c.Next() { - if !c.Args(&config.ListenHost) { - return c.ArgErr() - } - config.TLS.Manager.ListenHost = config.ListenHost // necessary for ACME challenges, see issue #309 - } - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/browse/browse.go b/vendor/github.com/mholt/caddy/caddyhttp/browse/browse.go deleted file mode 100644 index fd09ad659..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/browse/browse.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package browse provides middleware for listing files in a directory -// when directory path is requested instead of a specific file. -package browse - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "os" - "path" - "sort" - "strconv" - "strings" - "text/template" - "time" - - "github.com/dustin/go-humanize" - "github.com/mholt/caddy/caddyhttp/httpserver" - "github.com/mholt/caddy/caddyhttp/staticfiles" -) - -const ( - sortByName = "name" - sortByNameDirFirst = "namedirfirst" - sortBySize = "size" - sortByTime = "time" -) - -// Browse is an http.Handler that can show a file listing when -// directories in the given paths are specified. -type Browse struct { - Next httpserver.Handler - Configs []Config - IgnoreIndexes bool -} - -// Config is a configuration for browsing in a particular path. -type Config struct { - PathScope string // the base path the URL must match to enable browsing - Fs staticfiles.FileServer - Variables interface{} - Template *template.Template -} - -// A Listing is the context used to fill out a template. -type Listing struct { - // The name of the directory (the last element of the path). - Name string - - // The full path of the request. - Path string - - // Whether the parent directory is browse-able. - CanGoUp bool - - // The items (files and folders) in the path. - Items []FileInfo - - // The number of directories in the listing. - NumDirs int - - // The number of files (items that aren't directories) in the listing. - NumFiles int - - // Which sorting order is used. - Sort string - - // And which order. - Order string - - // If ≠0 then Items have been limited to that many elements. - ItemsLimitedTo int - - // Optional custom variables for use in browse templates. - User interface{} - - httpserver.Context -} - -// Crumb represents part of a breadcrumb menu. -type Crumb struct { - Link, Text string -} - -// Breadcrumbs returns l.Path where every element maps -// the link to the text to display. -func (l Listing) Breadcrumbs() []Crumb { - var result []Crumb - - if len(l.Path) == 0 { - return result - } - - // skip trailing slash - lpath := l.Path - if lpath[len(lpath)-1] == '/' { - lpath = lpath[:len(lpath)-1] - } - - parts := strings.Split(lpath, "/") - for i := range parts { - txt := parts[i] - if i == 0 && parts[i] == "" { - txt = "/" - } - result = append(result, Crumb{Link: strings.Repeat("../", len(parts)-i-1), Text: txt}) - } - - return result -} - -// FileInfo is the info about a particular file or directory -type FileInfo struct { - Name string - Size int64 - URL string - ModTime time.Time - Mode os.FileMode - IsDir bool - IsSymlink bool -} - -// HumanSize returns the size of the file as a human-readable string -// in IEC format (i.e. power of 2 or base 1024). -func (fi FileInfo) HumanSize() string { - return humanize.IBytes(uint64(fi.Size)) -} - -// HumanModTime returns the modified time of the file as a human-readable string. -func (fi FileInfo) HumanModTime(format string) string { - return fi.ModTime.Format(format) -} - -// Implement sorting for Listing -type byName Listing -type byNameDirFirst Listing -type bySize Listing -type byTime Listing - -// By Name -func (l byName) Len() int { return len(l.Items) } -func (l byName) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } - -// Treat upper and lower case equally -func (l byName) Less(i, j int) bool { - return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name) -} - -// By Name Dir First -func (l byNameDirFirst) Len() int { return len(l.Items) } -func (l byNameDirFirst) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } - -// Treat upper and lower case equally -func (l byNameDirFirst) Less(i, j int) bool { - - // if both are dir or file sort normally - if l.Items[i].IsDir == l.Items[j].IsDir { - return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name) - } - - // always sort dir ahead of file - return l.Items[i].IsDir -} - -// By Size -func (l bySize) Len() int { return len(l.Items) } -func (l bySize) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } - -const directoryOffset = -1 << 31 // = math.MinInt32 -func (l bySize) Less(i, j int) bool { - iSize, jSize := l.Items[i].Size, l.Items[j].Size - - // Directory sizes depend on the filesystem implementation, - // which is opaque to a visitor, and should indeed does not change if the operator chooses to change the fs. - // For a consistent user experience directories are pulled to the front… - if l.Items[i].IsDir { - iSize = directoryOffset - } - if l.Items[j].IsDir { - jSize = directoryOffset - } - // … and sorted by name. - if l.Items[i].IsDir && l.Items[j].IsDir { - return strings.ToLower(l.Items[i].Name) < strings.ToLower(l.Items[j].Name) - } - - return iSize < jSize -} - -// By Time -func (l byTime) Len() int { return len(l.Items) } -func (l byTime) Swap(i, j int) { l.Items[i], l.Items[j] = l.Items[j], l.Items[i] } -func (l byTime) Less(i, j int) bool { return l.Items[i].ModTime.Before(l.Items[j].ModTime) } - -// Add sorting method to "Listing" -// it will apply what's in ".Sort" and ".Order" -func (l Listing) applySort() { - // Check '.Order' to know how to sort - if l.Order == "desc" { - switch l.Sort { - case sortByName: - sort.Sort(sort.Reverse(byName(l))) - case sortByNameDirFirst: - sort.Sort(sort.Reverse(byNameDirFirst(l))) - case sortBySize: - sort.Sort(sort.Reverse(bySize(l))) - case sortByTime: - sort.Sort(sort.Reverse(byTime(l))) - default: - // If not one of the above, do nothing - return - } - } else { // If we had more Orderings we could add them here - switch l.Sort { - case sortByName: - sort.Sort(byName(l)) - case sortByNameDirFirst: - sort.Sort(byNameDirFirst(l)) - case sortBySize: - sort.Sort(bySize(l)) - case sortByTime: - sort.Sort(byTime(l)) - default: - // If not one of the above, do nothing - return - } - } -} - -func directoryListing(files []os.FileInfo, canGoUp bool, urlPath string, config *Config) (Listing, bool) { - var ( - fileInfos []FileInfo - dirCount, fileCount int - hasIndexFile bool - ) - - for _, f := range files { - name := f.Name() - - for _, indexName := range config.Fs.IndexPages { - if name == indexName { - hasIndexFile = true - break - } - } - - isDir := f.IsDir() || isSymlinkTargetDir(f, urlPath, config) - - if isDir { - name += "/" - dirCount++ - } else { - fileCount++ - } - - if config.Fs.IsHidden(f) { - continue - } - - u := url.URL{Path: "./" + name} // prepend with "./" to fix paths with ':' in the name - - fileInfos = append(fileInfos, FileInfo{ - IsDir: isDir, - IsSymlink: isSymlink(f), - Name: f.Name(), - Size: f.Size(), - URL: u.String(), - ModTime: f.ModTime().UTC(), - Mode: f.Mode(), - }) - } - - return Listing{ - Name: path.Base(urlPath), - Path: urlPath, - CanGoUp: canGoUp, - Items: fileInfos, - NumDirs: dirCount, - NumFiles: fileCount, - }, hasIndexFile -} - -// isSymlink return true if f is a symbolic link -func isSymlink(f os.FileInfo) bool { - return f.Mode()&os.ModeSymlink != 0 -} - -// isSymlinkTargetDir return true if f's symbolic link target -// is a directory. Return false if not a symbolic link. -func isSymlinkTargetDir(f os.FileInfo, urlPath string, config *Config) bool { - if !isSymlink(f) { - return false - } - - // a bit strange, but we want Stat thru the jailed filesystem to be safe - target, err := config.Fs.Root.Open(path.Join(urlPath, f.Name())) - if err != nil { - return false - } - defer target.Close() - targetInfo, err := target.Stat() - if err != nil { - return false - } - - return targetInfo.IsDir() -} - -// ServeHTTP determines if the request is for this plugin, and if all prerequisites are met. -// If so, control is handed over to ServeListing. -func (b Browse) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - // See if there's a browse configuration to match the path - var bc *Config - for i := range b.Configs { - if httpserver.Path(r.URL.Path).Matches(b.Configs[i].PathScope) { - bc = &b.Configs[i] - break - } - } - if bc == nil { - return b.Next.ServeHTTP(w, r) - } - - // Browse works on existing directories; delegate everything else - requestedFilepath, err := bc.Fs.Root.Open(r.URL.Path) - if err != nil { - switch { - case os.IsPermission(err): - return http.StatusForbidden, err - case os.IsExist(err): - return http.StatusNotFound, err - default: - return b.Next.ServeHTTP(w, r) - } - } - defer requestedFilepath.Close() - - info, err := requestedFilepath.Stat() - if err != nil { - switch { - case os.IsPermission(err): - return http.StatusForbidden, err - case os.IsExist(err): - return http.StatusGone, err - default: - return b.Next.ServeHTTP(w, r) - } - } - if !info.IsDir() { - return b.Next.ServeHTTP(w, r) - } - - // Do not reply to anything else because it might be nonsensical - switch r.Method { - case http.MethodGet, http.MethodHead: - // proceed, noop - case "PROPFIND", http.MethodOptions: - return http.StatusNotImplemented, nil - default: - return b.Next.ServeHTTP(w, r) - } - - // Browsing navigation gets messed up if browsing a directory - // that doesn't end in "/" (which it should, anyway) - u := *r.URL - if u.Path == "" { - u.Path = "/" - } - if u.Path[len(u.Path)-1] != '/' { - u.Path += "/" - http.Redirect(w, r, u.String(), http.StatusMovedPermanently) - return http.StatusMovedPermanently, nil - } - - return b.ServeListing(w, r, requestedFilepath, bc) -} - -func (b Browse) loadDirectoryContents(requestedFilepath http.File, urlPath string, config *Config) (*Listing, bool, error) { - files, err := requestedFilepath.Readdir(-1) - if err != nil { - return nil, false, err - } - - // Determine if user can browse up another folder - var canGoUp bool - curPathDir := path.Dir(strings.TrimSuffix(urlPath, "/")) - for _, other := range b.Configs { - if strings.HasPrefix(curPathDir, other.PathScope) { - canGoUp = true - break - } - } - - // Assemble listing of directory contents - listing, hasIndex := directoryListing(files, canGoUp, urlPath, config) - - return &listing, hasIndex, nil -} - -// handleSortOrder gets and stores for a Listing the 'sort' and 'order', -// and reads 'limit' if given. The latter is 0 if not given. -// -// This sets Cookies. -func (b Browse) handleSortOrder(w http.ResponseWriter, r *http.Request, scope string) (sort string, order string, limit int, err error) { - sort, order, limitQuery := r.URL.Query().Get("sort"), r.URL.Query().Get("order"), r.URL.Query().Get("limit") - - // If the query 'sort' or 'order' is empty, use defaults or any values previously saved in Cookies - switch sort { - case "": - sort = sortByNameDirFirst - if sortCookie, sortErr := r.Cookie("sort"); sortErr == nil { - sort = sortCookie.Value - } - case sortByName, sortByNameDirFirst, sortBySize, sortByTime: - http.SetCookie(w, &http.Cookie{Name: "sort", Value: sort, Path: scope, Secure: r.TLS != nil}) - } - - switch order { - case "": - order = "asc" - if orderCookie, orderErr := r.Cookie("order"); orderErr == nil { - order = orderCookie.Value - } - case "asc", "desc": - http.SetCookie(w, &http.Cookie{Name: "order", Value: order, Path: scope, Secure: r.TLS != nil}) - } - - if limitQuery != "" { - limit, err = strconv.Atoi(limitQuery) - if err != nil { // if the 'limit' query can't be interpreted as a number, return err - return - } - } - - return -} - -// ServeListing returns a formatted view of 'requestedFilepath' contents'. -func (b Browse) ServeListing(w http.ResponseWriter, r *http.Request, requestedFilepath http.File, bc *Config) (int, error) { - listing, containsIndex, err := b.loadDirectoryContents(requestedFilepath, r.URL.Path, bc) - if err != nil { - switch { - case os.IsPermission(err): - return http.StatusForbidden, err - case os.IsExist(err): - return http.StatusGone, err - default: - return http.StatusInternalServerError, err - } - } - if containsIndex && !b.IgnoreIndexes { // directory isn't browsable - return b.Next.ServeHTTP(w, r) - } - listing.Context = httpserver.Context{ - Root: bc.Fs.Root, - Req: r, - URL: r.URL, - } - listing.User = bc.Variables - - // Copy the query values into the Listing struct - var limit int - listing.Sort, listing.Order, limit, err = b.handleSortOrder(w, r, bc.PathScope) - if err != nil { - return http.StatusBadRequest, err - } - - listing.applySort() - - if limit > 0 && limit <= len(listing.Items) { - listing.Items = listing.Items[:limit] - listing.ItemsLimitedTo = limit - } - - var buf *bytes.Buffer - acceptHeader := strings.ToLower(strings.Join(r.Header["Accept"], ",")) - switch { - case strings.Contains(acceptHeader, "application/json"): - if buf, err = b.formatAsJSON(listing, bc); err != nil { - return http.StatusInternalServerError, err - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - default: // There's no 'application/json' in the 'Accept' header; browse normally - if buf, err = b.formatAsHTML(listing, bc); err != nil { - return http.StatusInternalServerError, err - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - - } - - _, _ = buf.WriteTo(w) - - return http.StatusOK, nil -} - -func (b Browse) formatAsJSON(listing *Listing, bc *Config) (*bytes.Buffer, error) { - marsh, err := json.Marshal(listing.Items) - if err != nil { - return nil, err - } - - buf := new(bytes.Buffer) - _, err = buf.Write(marsh) - return buf, err -} - -func (b Browse) formatAsHTML(listing *Listing, bc *Config) (*bytes.Buffer, error) { - buf := new(bytes.Buffer) - err := bc.Template.Execute(buf, listing) - return buf, err -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/browse/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/browse/setup.go deleted file mode 100644 index 90b1646d1..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/browse/setup.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package browse - -import ( - "fmt" - "io/ioutil" - "net/http" - "text/template" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" - "github.com/mholt/caddy/caddyhttp/staticfiles" -) - -func init() { - caddy.RegisterPlugin("browse", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Browse middleware instance. -func setup(c *caddy.Controller) error { - configs, err := browseParse(c) - if err != nil { - return err - } - - b := Browse{ - Configs: configs, - IgnoreIndexes: false, - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - b.Next = next - return b - }) - - return nil -} - -func browseParse(c *caddy.Controller) ([]Config, error) { - var configs []Config - - cfg := httpserver.GetConfig(c) - - appendCfg := func(bc Config) error { - for _, c := range configs { - if c.PathScope == bc.PathScope { - return fmt.Errorf("duplicate browsing config for %s", c.PathScope) - } - } - configs = append(configs, bc) - return nil - } - - for c.Next() { - var bc Config - - // First argument is directory to allow browsing; default is site root - if c.NextArg() { - bc.PathScope = c.Val() - } else { - bc.PathScope = "/" - } - - bc.Fs = staticfiles.FileServer{ - Root: http.Dir(cfg.Root), - Hide: cfg.HiddenFiles, - IndexPages: cfg.IndexPages, - } - - // Second argument would be the template file to use - var tplText string - if c.NextArg() { - tplBytes, err := ioutil.ReadFile(c.Val()) - if err != nil { - return configs, err - } - tplText = string(tplBytes) - } else { - tplText = defaultTemplate - } - - // Build the template - tpl, err := template.New("listing").Parse(tplText) - if err != nil { - return configs, err - } - bc.Template = tpl - - // Save configuration - err = appendCfg(bc) - if err != nil { - return configs, err - } - } - - return configs, nil -} - -// The default template to use when serving up directory listings -const defaultTemplate = ` - - - {{html .Name}} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-

- {{range $i, $crumb := .Breadcrumbs}}{{html $crumb.Text}}{{if ne $i 0}}/{{end}}{{end}} -

-
-
-
-
- {{.NumDirs}} director{{if eq 1 .NumDirs}}y{{else}}ies{{end}} - {{.NumFiles}} file{{if ne 1 .NumFiles}}s{{end}} - {{- if ne 0 .ItemsLimitedTo}} - (of which only {{.ItemsLimitedTo}} are displayed) - {{- end}} - -
-
-
- - - - - - - - - - - - {{- if .CanGoUp}} - - - - - - - - {{- end}} - {{- range .Items}} - - - - {{- if .IsDir}} - - {{- else}} - - {{- end}} - - - - {{- end}} - -
- {{- if and (eq .Sort "namedirfirst") (ne .Order "desc")}} - - {{- else if and (eq .Sort "namedirfirst") (ne .Order "asc")}} - - {{- else}} - - {{- end}} - - {{- if and (eq .Sort "name") (ne .Order "desc")}} - Name - {{- else if and (eq .Sort "name") (ne .Order "asc")}} - Name - {{- else}} - Name - {{- end}} - - {{- if and (eq .Sort "size") (ne .Order "desc")}} - Size - {{- else if and (eq .Sort "size") (ne .Order "asc")}} - Size - {{- else}} - Size - {{- end}} - - {{- if and (eq .Sort "time") (ne .Order "desc")}} - Modified - {{- else if and (eq .Sort "time") (ne .Order "asc")}} - Modified - {{- else}} - Modified - {{- end}} -
- - Go up - -
- - {{- if .IsDir}} - - {{- else}} - - {{- end}} - {{html .Name}} - - {{.HumanSize}}
-
-
- - - -` diff --git a/vendor/github.com/mholt/caddy/caddyhttp/caddyhttp.go b/vendor/github.com/mholt/caddy/caddyhttp/caddyhttp.go deleted file mode 100644 index d9d97e9d0..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/caddyhttp.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddyhttp - -import ( - // plug in the server - _ "github.com/mholt/caddy/caddyhttp/httpserver" - - // plug in the standard directives - _ "github.com/mholt/caddy/caddyhttp/basicauth" - _ "github.com/mholt/caddy/caddyhttp/bind" - _ "github.com/mholt/caddy/caddyhttp/browse" - _ "github.com/mholt/caddy/caddyhttp/errors" - _ "github.com/mholt/caddy/caddyhttp/expvar" - _ "github.com/mholt/caddy/caddyhttp/extensions" - _ "github.com/mholt/caddy/caddyhttp/fastcgi" - _ "github.com/mholt/caddy/caddyhttp/gzip" - _ "github.com/mholt/caddy/caddyhttp/header" - _ "github.com/mholt/caddy/caddyhttp/index" - _ "github.com/mholt/caddy/caddyhttp/internalsrv" - _ "github.com/mholt/caddy/caddyhttp/limits" - _ "github.com/mholt/caddy/caddyhttp/log" - _ "github.com/mholt/caddy/caddyhttp/markdown" - _ "github.com/mholt/caddy/caddyhttp/mime" - _ "github.com/mholt/caddy/caddyhttp/pprof" - _ "github.com/mholt/caddy/caddyhttp/proxy" - _ "github.com/mholt/caddy/caddyhttp/push" - _ "github.com/mholt/caddy/caddyhttp/redirect" - _ "github.com/mholt/caddy/caddyhttp/requestid" - _ "github.com/mholt/caddy/caddyhttp/rewrite" - _ "github.com/mholt/caddy/caddyhttp/root" - _ "github.com/mholt/caddy/caddyhttp/status" - _ "github.com/mholt/caddy/caddyhttp/templates" - _ "github.com/mholt/caddy/caddyhttp/timeouts" - _ "github.com/mholt/caddy/caddyhttp/websocket" - _ "github.com/mholt/caddy/onevent" -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/errors/errors.go b/vendor/github.com/mholt/caddy/caddyhttp/errors/errors.go deleted file mode 100644 index fd09f8fb1..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/errors/errors.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package errors implements an HTTP error handling middleware. -package errors - -import ( - "fmt" - "io" - "net/http" - "os" - "runtime" - "strings" - "time" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("errors", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// ErrorHandler handles HTTP errors (and errors from other middleware). -type ErrorHandler struct { - Next httpserver.Handler - GenericErrorPage string // default error page filename - ErrorPages map[int]string // map of status code to filename - Log *httpserver.Logger - Debug bool // if true, errors are written out to client rather than to a log -} - -func (h ErrorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - defer h.recovery(w, r) - - status, err := h.Next.ServeHTTP(w, r) - - if err != nil { - errMsg := fmt.Sprintf("%s [ERROR %d %s] %v", time.Now().Format(timeFormat), status, r.URL.Path, err) - if h.Debug { - // Write error to response instead of to log - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(status) - fmt.Fprintln(w, errMsg) - return 0, err // returning 0 signals that a response has been written - } - h.Log.Println(errMsg) - } - - if status >= 400 { - h.errorPage(w, r, status) - return 0, err - } - - return status, err -} - -// errorPage serves a static error page to w according to the status -// code. If there is an error serving the error page, a plaintext error -// message is written instead, and the extra error is logged. -func (h ErrorHandler) errorPage(w http.ResponseWriter, r *http.Request, code int) { - // See if an error page for this status code was specified - if pagePath, ok := h.findErrorPage(code); ok { - // Try to open it - errorPage, err := os.Open(pagePath) - if err != nil { - // An additional error handling an error... - h.Log.Printf("%s [NOTICE %d %s] could not load error page: %v", - time.Now().Format(timeFormat), code, r.URL.String(), err) - httpserver.DefaultErrorFunc(w, r, code) - return - } - defer errorPage.Close() - - // Copy the page body into the response - w.Header().Set("Content-Type", "text/html; charset=utf-8") - w.WriteHeader(code) - _, err = io.Copy(w, errorPage) - - if err != nil { - // Epic fail... sigh. - h.Log.Printf("%s [NOTICE %d %s] could not respond with %s: %v", - time.Now().Format(timeFormat), code, r.URL.String(), pagePath, err) - httpserver.DefaultErrorFunc(w, r, code) - } - - return - } - - // Default error response - httpserver.DefaultErrorFunc(w, r, code) -} - -func (h ErrorHandler) findErrorPage(code int) (string, bool) { - if pagePath, ok := h.ErrorPages[code]; ok { - return pagePath, true - } - - if h.GenericErrorPage != "" { - return h.GenericErrorPage, true - } - - return "", false -} - -func (h ErrorHandler) recovery(w http.ResponseWriter, r *http.Request) { - rec := recover() - if rec == nil { - return - } - - // Obtain source of panic - // From: https://gist.github.com/swdunlop/9629168 - var name, file string // function name, file name - var line int - var pc [16]uintptr - n := runtime.Callers(3, pc[:]) - for _, pc := range pc[:n] { - fn := runtime.FuncForPC(pc) - if fn == nil { - continue - } - file, line = fn.FileLine(pc) - name = fn.Name() - if !strings.HasPrefix(name, "runtime.") { - break - } - } - - // Trim file path - delim := "/github.com/mholt/caddy/" - pkgPathPos := strings.Index(file, delim) - if pkgPathPos > -1 && len(file) > pkgPathPos+len(delim) { - file = file[pkgPathPos+len(delim):] - } - - panicMsg := fmt.Sprintf("%s [PANIC %s] %s:%d - %v", time.Now().Format(timeFormat), r.URL.String(), file, line, rec) - if h.Debug { - // Write error and stack trace to the response rather than to a log - var stackBuf [4096]byte - stack := stackBuf[:runtime.Stack(stackBuf[:], false)] - httpserver.WriteTextResponse(w, http.StatusInternalServerError, fmt.Sprintf("%s\n\n%s", panicMsg, stack)) - } else { - // Currently we don't use the function name, since file:line is more conventional - h.Log.Printf(panicMsg) - h.errorPage(w, r, http.StatusInternalServerError) - } -} - -const timeFormat = "02/Jan/2006:15:04:05 -0700" diff --git a/vendor/github.com/mholt/caddy/caddyhttp/errors/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/errors/setup.go deleted file mode 100644 index bcda1c8b3..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/errors/setup.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "log" - "os" - "path/filepath" - "strconv" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// setup configures a new errors middleware instance. -func setup(c *caddy.Controller) error { - handler, err := errorsParse(c) - - if err != nil { - return err - } - - handler.Log.Attach(c) - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - handler.Next = next - return handler - }) - - return nil -} - -func errorsParse(c *caddy.Controller) (*ErrorHandler, error) { - - // Very important that we make a pointer because the startup - // function that opens the log file must have access to the - // same instance of the handler, not a copy. - handler := &ErrorHandler{ - ErrorPages: make(map[int]string), - Log: &httpserver.Logger{}, - } - - cfg := httpserver.GetConfig(c) - - optionalBlock := func() error { - for c.NextBlock() { - - what := c.Val() - where := c.RemainingArgs() - - if httpserver.IsLogRollerSubdirective(what) { - var err error - err = httpserver.ParseRoller(handler.Log.Roller, what, where...) - if err != nil { - return err - } - } else { - if len(where) != 1 { - return c.ArgErr() - } - where := where[0] - - // Error page; ensure it exists - if !filepath.IsAbs(where) { - where = filepath.Join(cfg.Root, where) - } - - f, err := os.Open(where) - if err != nil { - log.Printf("[WARNING] Unable to open error page '%s': %v", where, err) - } - f.Close() - - if what == "*" { - if handler.GenericErrorPage != "" { - return c.Errf("Duplicate status code entry: %s", what) - } - handler.GenericErrorPage = where - } else { - whatInt, err := strconv.Atoi(what) - if err != nil { - return c.Err("Expecting a numeric status code or '*', got '" + what + "'") - } - - if _, exists := handler.ErrorPages[whatInt]; exists { - return c.Errf("Duplicate status code entry: %s", what) - } - - handler.ErrorPages[whatInt] = where - } - } - } - return nil - } - - for c.Next() { - // weird hack to avoid having the handler values overwritten. - if c.Val() == "}" { - continue - } - - args := c.RemainingArgs() - - if len(args) == 1 { - switch args[0] { - case "visible": - handler.Debug = true - default: - handler.Log.Output = args[0] - handler.Log.Roller = httpserver.DefaultLogRoller() - } - } - - if len(args) > 1 { - return handler, c.Errf("Only 1 Argument expected for errors directive") - } - - // Configuration may be in a block - err := optionalBlock() - if err != nil { - return handler, err - } - } - - return handler, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/expvar/expvar.go b/vendor/github.com/mholt/caddy/caddyhttp/expvar/expvar.go deleted file mode 100644 index efbfa32df..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/expvar/expvar.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "expvar" - "fmt" - "net/http" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// ExpVar is a simple struct to hold expvar's configuration -type ExpVar struct { - Next httpserver.Handler - Resource Resource -} - -// ServeHTTP handles requests to expvar's configured entry point with -// expvar, or passes all other requests up the chain. -func (e ExpVar) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - if httpserver.Path(r.URL.Path).Matches(string(e.Resource)) { - expvarHandler(w, r) - return 0, nil - } - return e.Next.ServeHTTP(w, r) -} - -// expvarHandler returns a JSON object will all the published variables. -// -// This is lifted straight from the expvar package. -func expvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -// Resource contains the path to the expvar entry point -type Resource string diff --git a/vendor/github.com/mholt/caddy/caddyhttp/expvar/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/expvar/setup.go deleted file mode 100644 index 79f2763b5..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/expvar/setup.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expvar - -import ( - "expvar" - "runtime" - "sync" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("expvar", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new ExpVar middleware instance. -func setup(c *caddy.Controller) error { - resource, err := expVarParse(c) - if err != nil { - return err - } - - // publish any extra information/metrics we may want to capture - publishExtraVars() - - ev := ExpVar{Resource: resource} - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - ev.Next = next - return ev - }) - - return nil -} - -func expVarParse(c *caddy.Controller) (Resource, error) { - var resource Resource - var err error - - for c.Next() { - args := c.RemainingArgs() - switch len(args) { - case 0: - resource = Resource(defaultExpvarPath) - case 1: - resource = Resource(args[0]) - default: - return resource, c.ArgErr() - } - } - - return resource, err -} - -func publishExtraVars() { - // By using sync.Once instead of an init() function, we don't clutter - // the app's expvar export unnecessarily, or risk colliding with it. - publishOnce.Do(func() { - expvar.Publish("Goroutines", expvar.Func(func() interface{} { - return runtime.NumGoroutine() - })) - }) -} - -var publishOnce sync.Once // publishing variables should only be done once -var defaultExpvarPath = "/debug/vars" diff --git a/vendor/github.com/mholt/caddy/caddyhttp/extensions/ext.go b/vendor/github.com/mholt/caddy/caddyhttp/extensions/ext.go deleted file mode 100644 index b78f5792c..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/extensions/ext.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package extensions contains middleware for clean URLs. -// -// The root path of the site is passed in as well as possible extensions -// to try internally for paths requested that don't match an existing -// resource. The first path+ext combination that matches a valid file -// will be used. -package extensions - -import ( - "net/http" - "os" - "path" - "strings" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Ext can assume an extension from clean URLs. -// It tries extensions in the order listed in Extensions. -type Ext struct { - // Next handler in the chain - Next httpserver.Handler - - // Path to site root - Root string - - // List of extensions to try - Extensions []string -} - -// ServeHTTP implements the httpserver.Handler interface. -func (e Ext) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - urlpath := strings.TrimSuffix(r.URL.Path, "/") - if len(r.URL.Path) > 0 && path.Ext(urlpath) == "" && r.URL.Path[len(r.URL.Path)-1] != '/' { - for _, ext := range e.Extensions { - _, err := os.Stat(httpserver.SafePath(e.Root, urlpath) + ext) - if err == nil { - r.URL.Path = urlpath + ext - break - } - } - } - return e.Next.ServeHTTP(w, r) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/extensions/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/extensions/setup.go deleted file mode 100644 index 1261c49ee..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/extensions/setup.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package extensions - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("ext", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new instance of 'extensions' middleware for clean URLs. -func setup(c *caddy.Controller) error { - cfg := httpserver.GetConfig(c) - root := cfg.Root - - exts, err := extParse(c) - if err != nil { - return err - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Ext{ - Next: next, - Extensions: exts, - Root: root, - } - }) - - return nil -} - -// extParse sets up an instance of extension middleware -// from a middleware controller and returns a list of extensions. -func extParse(c *caddy.Controller) ([]string, error) { - var exts []string - - for c.Next() { - // At least one extension is required - if !c.NextArg() { - return exts, c.ArgErr() - } - exts = append(exts, c.Val()) - - // Tack on any other extensions that may have been listed - exts = append(exts, c.RemainingArgs()...) - } - - return exts, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fastcgi.go b/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fastcgi.go deleted file mode 100644 index f5c80dc56..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fastcgi.go +++ /dev/null @@ -1,496 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package fastcgi has middleware that acts as a FastCGI client. Requests -// that get forwarded to FastCGI stop the middleware execution chain. -// The most common use for this package is to serve PHP websites via php-fpm. -package fastcgi - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync/atomic" - "time" - - "crypto/tls" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" - "github.com/mholt/caddy/caddytls" -) - -// Handler is a middleware type that can handle requests as a FastCGI client. -type Handler struct { - Next httpserver.Handler - Rules []Rule - Root string - FileSys http.FileSystem - - // These are sent to CGI scripts in env variables - SoftwareName string - SoftwareVersion string - ServerName string - ServerPort string -} - -// ServeHTTP satisfies the httpserver.Handler interface. -func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - for _, rule := range h.Rules { - // First requirement: Base path must match request path. If it doesn't, - // we check to make sure the leading slash is not missing, and if so, - // we check again with it prepended. This is in case people forget - // a leading slash when performing rewrites, and we don't want to expose - // the contents of the (likely PHP) script. See issue #1645. - hpath := httpserver.Path(r.URL.Path) - if !hpath.Matches(rule.Path) { - if strings.HasPrefix(string(hpath), "/") { - // this is a normal-looking path, and it doesn't match; try next rule - continue - } - hpath = httpserver.Path("/" + string(hpath)) // prepend leading slash - if !hpath.Matches(rule.Path) { - // even after fixing the request path, it still doesn't match; try next rule - continue - } - } - // The path must also be allowed (not ignored). - if !rule.AllowedPath(r.URL.Path) { - continue - } - - // In addition to matching the path, a request must meet some - // other criteria before being proxied as FastCGI. For example, - // we probably want to exclude static assets (CSS, JS, images...) - // but we also want to be flexible for the script we proxy to. - - fpath := r.URL.Path - - if idx, ok := httpserver.IndexFile(h.FileSys, fpath, rule.IndexFiles); ok { - fpath = idx - // Index file present. - // If request path cannot be split, return error. - if !rule.canSplit(fpath) { - return http.StatusInternalServerError, ErrIndexMissingSplit - } - } else { - // No index file present. - // If request path cannot be split, ignore request. - if !rule.canSplit(fpath) { - continue - } - } - - // These criteria work well in this order for PHP sites - if !h.exists(fpath) || fpath[len(fpath)-1] == '/' || strings.HasSuffix(fpath, rule.Ext) { - - // Create environment for CGI script - env, err := h.buildEnv(r, rule, fpath) - if err != nil { - return http.StatusInternalServerError, err - } - - // Connect to FastCGI gateway - address, err := rule.Address() - if err != nil { - return http.StatusBadGateway, err - } - network, address := parseAddress(address) - - ctx := context.Background() - if rule.ConnectTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, rule.ConnectTimeout) - defer cancel() - } - - fcgiBackend, err := DialContext(ctx, network, address) - if err != nil { - return http.StatusBadGateway, err - } - defer fcgiBackend.Close() - - // read/write timeouts - if err := fcgiBackend.SetReadTimeout(rule.ReadTimeout); err != nil { - return http.StatusInternalServerError, err - } - if err := fcgiBackend.SetSendTimeout(rule.SendTimeout); err != nil { - return http.StatusInternalServerError, err - } - - var resp *http.Response - - var contentLength int64 - // if ContentLength is already set - if r.ContentLength > 0 { - contentLength = r.ContentLength - } else { - contentLength, _ = strconv.ParseInt(r.Header.Get("Content-Length"), 10, 64) - } - switch r.Method { - case "HEAD": - resp, err = fcgiBackend.Head(env) - case "GET": - resp, err = fcgiBackend.Get(env, r.Body, contentLength) - case "OPTIONS": - resp, err = fcgiBackend.Options(env) - default: - resp, err = fcgiBackend.Post(env, r.Method, r.Header.Get("Content-Type"), r.Body, contentLength) - } - - if resp != nil && resp.Body != nil { - defer resp.Body.Close() - } - - if err != nil { - if err, ok := err.(net.Error); ok && err.Timeout() { - return http.StatusGatewayTimeout, err - } else if err != io.EOF { - return http.StatusBadGateway, err - } - } - - // Write response header - writeHeader(w, resp) - - // Write the response body - _, err = io.Copy(w, resp.Body) - if err != nil { - return http.StatusBadGateway, err - } - - // Log any stderr output from upstream - if fcgiBackend.stderr.Len() != 0 { - // Remove trailing newline, error logger already does this. - err = LogError(strings.TrimSuffix(fcgiBackend.stderr.String(), "\n")) - } - - // Normally we would return the status code if it is an error status (>= 400), - // however, upstream FastCGI apps don't know about our contract and have - // probably already written an error page. So we just return 0, indicating - // that the response body is already written. However, we do return any - // error value so it can be logged. - // Note that the proxy middleware works the same way, returning status=0. - return 0, err - } - } - - return h.Next.ServeHTTP(w, r) -} - -// parseAddress returns the network and address of fcgiAddress. -// The first string is the network, "tcp" or "unix", implied from the scheme and address. -// The second string is fcgiAddress, with scheme prefixes removed. -// The two returned strings can be used as parameters to the Dial() function. -func parseAddress(fcgiAddress string) (string, string) { - // check if address has tcp scheme explicitly set - if strings.HasPrefix(fcgiAddress, "tcp://") { - return "tcp", fcgiAddress[len("tcp://"):] - } - // check if address has fastcgi scheme explicitly set - if strings.HasPrefix(fcgiAddress, "fastcgi://") { - return "tcp", fcgiAddress[len("fastcgi://"):] - } - // check if unix socket - if trim := strings.HasPrefix(fcgiAddress, "unix"); strings.HasPrefix(fcgiAddress, "/") || trim { - if trim { - return "unix", fcgiAddress[len("unix:"):] - } - return "unix", fcgiAddress - } - // default case, a plain tcp address with no scheme - return "tcp", fcgiAddress -} - -func writeHeader(w http.ResponseWriter, r *http.Response) { - for key, vals := range r.Header { - for _, val := range vals { - w.Header().Add(key, val) - } - } - w.WriteHeader(r.StatusCode) -} - -func (h Handler) exists(path string) bool { - if _, err := os.Stat(h.Root + path); err == nil { - return true - } - return false -} - -// buildEnv returns a set of CGI environment variables for the request. -func (h Handler) buildEnv(r *http.Request, rule Rule, fpath string) (map[string]string, error) { - var env map[string]string - - // Separate remote IP and port; more lenient than net.SplitHostPort - var ip, port string - if idx := strings.LastIndex(r.RemoteAddr, ":"); idx > -1 { - ip = r.RemoteAddr[:idx] - port = r.RemoteAddr[idx+1:] - } else { - ip = r.RemoteAddr - } - - // Remove [] from IPv6 addresses - ip = strings.Replace(ip, "[", "", 1) - ip = strings.Replace(ip, "]", "", 1) - - // Split path in preparation for env variables. - // Previous rule.canSplit checks ensure this can never be -1. - splitPos := rule.splitPos(fpath) - - // Request has the extension; path was split successfully - docURI := fpath[:splitPos+len(rule.SplitPath)] - pathInfo := fpath[splitPos+len(rule.SplitPath):] - scriptName := fpath - - // Strip PATH_INFO from SCRIPT_NAME - scriptName = strings.TrimSuffix(scriptName, pathInfo) - - // SCRIPT_FILENAME is the absolute path of SCRIPT_NAME - scriptFilename := filepath.Join(rule.Root, scriptName) - - // Add vhost path prefix to scriptName. Otherwise, some PHP software will - // have difficulty discovering its URL. - pathPrefix, _ := r.Context().Value(caddy.CtxKey("path_prefix")).(string) - scriptName = path.Join(pathPrefix, scriptName) - - // Get the request URI from context. The context stores the original URI in case - // it was changed by a middleware such as rewrite. By default, we pass the - // original URI in as the value of REQUEST_URI (the user can overwrite this - // if desired). Most PHP apps seem to want the original URI. Besides, this is - // how nginx defaults: http://stackoverflow.com/a/12485156/1048862 - reqURL, _ := r.Context().Value(httpserver.OriginalURLCtxKey).(url.URL) - - // Retrieve name of remote user that was set by some downstream middleware such as basicauth. - remoteUser, _ := r.Context().Value(httpserver.RemoteUserCtxKey).(string) - - requestScheme := "http" - if r.TLS != nil { - requestScheme = "https" - } - - // Some variables are unused but cleared explicitly to prevent - // the parent environment from interfering. - env = map[string]string{ - // Variables defined in CGI 1.1 spec - "AUTH_TYPE": "", // Not used - "CONTENT_LENGTH": r.Header.Get("Content-Length"), - "CONTENT_TYPE": r.Header.Get("Content-Type"), - "GATEWAY_INTERFACE": "CGI/1.1", - "PATH_INFO": pathInfo, - "QUERY_STRING": r.URL.RawQuery, - "REMOTE_ADDR": ip, - "REMOTE_HOST": ip, // For speed, remote host lookups disabled - "REMOTE_PORT": port, - "REMOTE_IDENT": "", // Not used - "REMOTE_USER": remoteUser, - "REQUEST_METHOD": r.Method, - "REQUEST_SCHEME": requestScheme, - "SERVER_NAME": h.ServerName, - "SERVER_PORT": h.ServerPort, - "SERVER_PROTOCOL": r.Proto, - "SERVER_SOFTWARE": h.SoftwareName + "/" + h.SoftwareVersion, - - // Other variables - "DOCUMENT_ROOT": rule.Root, - "DOCUMENT_URI": docURI, - "HTTP_HOST": r.Host, // added here, since not always part of headers - "REQUEST_URI": reqURL.RequestURI(), - "SCRIPT_FILENAME": scriptFilename, - "SCRIPT_NAME": scriptName, - } - - // compliance with the CGI specification requires that - // PATH_TRANSLATED should only exist if PATH_INFO is defined. - // Info: https://www.ietf.org/rfc/rfc3875 Page 14 - if env["PATH_INFO"] != "" { - env["PATH_TRANSLATED"] = filepath.Join(rule.Root, pathInfo) // Info: http://www.oreilly.com/openbook/cgi/ch02_04.html - } - - // Some web apps rely on knowing HTTPS or not - if r.TLS != nil { - env["HTTPS"] = "on" - // and pass the protocol details in a manner compatible with apache's mod_ssl - // (which is why they have a SSL_ prefix and not TLS_). - v, ok := tlsProtocolStringToMap[r.TLS.Version] - if ok { - env["SSL_PROTOCOL"] = v - } - // and pass the cipher suite in a manner compatible with apache's mod_ssl - for k, v := range caddytls.SupportedCiphersMap { - if v == r.TLS.CipherSuite { - env["SSL_CIPHER"] = k - break - } - } - } - - // Add env variables from config (with support for placeholders in values) - replacer := httpserver.NewReplacer(r, nil, "") - for _, envVar := range rule.EnvVars { - env[envVar[0]] = replacer.Replace(envVar[1]) - } - - // Add all HTTP headers to env variables - for field, val := range r.Header { - header := strings.ToUpper(field) - header = headerNameReplacer.Replace(header) - env["HTTP_"+header] = strings.Join(val, ", ") - } - return env, nil -} - -// Rule represents a FastCGI handling rule. -// It is parsed from the fastcgi directive in the Caddyfile, see setup.go. -type Rule struct { - // The base path to match. Required. - Path string - - // upstream load balancer - balancer - - // Always process files with this extension with fastcgi. - Ext string - - // Use this directory as the fastcgi root directory. Defaults to the root - // directory of the parent virtual host. - Root string - - // The path in the URL will be split into two, with the first piece ending - // with the value of SplitPath. The first piece will be assumed as the - // actual resource (CGI script) name, and the second piece will be set to - // PATH_INFO for the CGI script to use. - SplitPath string - - // If the URL ends with '/' (which indicates a directory), these index - // files will be tried instead. - IndexFiles []string - - // Environment Variables - EnvVars [][2]string - - // Ignored paths - IgnoredSubPaths []string - - // The duration used to set a deadline when connecting to an upstream. - ConnectTimeout time.Duration - - // The duration used to set a deadline when reading from the FastCGI server. - ReadTimeout time.Duration - - // The duration used to set a deadline when sending to the FastCGI server. - SendTimeout time.Duration -} - -// balancer is a fastcgi upstream load balancer. -type balancer interface { - // Address picks an upstream address from the - // underlying load balancer. - Address() (string, error) -} - -// roundRobin is a round robin balancer for fastcgi upstreams. -type roundRobin struct { - // Known Go bug: https://golang.org/pkg/sync/atomic/#pkg-note-BUG - // must be first field for 64 bit alignment - // on x86 and arm. - index int64 - addresses []string -} - -func (r *roundRobin) Address() (string, error) { - index := atomic.AddInt64(&r.index, 1) % int64(len(r.addresses)) - return r.addresses[index], nil -} - -// srvResolver is a private interface used to abstract -// the DNS resolver. It is mainly used to facilitate testing. -type srvResolver interface { - LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) -} - -// srv is a service locator for fastcgi upstreams -type srv struct { - resolver srvResolver - service string -} - -// Address looks up the service and returns the address:port -// from first result in resolved list. -// No explicit balancing is required because net.LookupSRV -// sorts the results by priority and randomizes within priority. -func (s *srv) Address() (string, error) { - _, addrs, err := s.resolver.LookupSRV(context.Background(), "", "", s.service) - if err != nil { - return "", err - } - - return fmt.Sprintf("%s:%d", strings.TrimRight(addrs[0].Target, "."), addrs[0].Port), nil -} - -// canSplit checks if path can split into two based on rule.SplitPath. -func (r Rule) canSplit(path string) bool { - return r.splitPos(path) >= 0 -} - -// splitPos returns the index where path should be split -// based on rule.SplitPath. -func (r Rule) splitPos(path string) int { - if httpserver.CaseSensitivePath { - return strings.Index(path, r.SplitPath) - } - return strings.Index(strings.ToLower(path), strings.ToLower(r.SplitPath)) -} - -// AllowedPath checks if requestPath is not an ignored path. -func (r Rule) AllowedPath(requestPath string) bool { - for _, ignoredSubPath := range r.IgnoredSubPaths { - if httpserver.Path(path.Clean(requestPath)).Matches(path.Join(r.Path, ignoredSubPath)) { - return false - } - } - return true -} - -var ( - headerNameReplacer = strings.NewReplacer(" ", "_", "-", "_") - // ErrIndexMissingSplit describes an index configuration error. - ErrIndexMissingSplit = errors.New("configured index file(s) must include split value") -) - -// LogError is a non fatal error that allows requests to go through. -type LogError string - -// Error satisfies error interface. -func (l LogError) Error() string { - return string(l) -} - -// Map of supported protocols to Apache ssl_mod format -// Note that these are slightly different from SupportedProtocols in caddytls/config.go's -var tlsProtocolStringToMap = map[uint16]string{ - tls.VersionTLS10: "TLSv1", - tls.VersionTLS11: "TLSv1.1", - tls.VersionTLS12: "TLSv1.2", -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgi_test.php b/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgi_test.php deleted file mode 100644 index 3f5e5f2db..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgi_test.php +++ /dev/null @@ -1,79 +0,0 @@ - $val) { - $md5 = md5($val); - - if ($key != $md5) { - $stat = "FAILED"; - echo "server:err ".$md5." != ".$key."\n"; - } - - $length += strlen($key) + strlen($val); - - $ret .= $key."(".strlen($key).") "; - } - $ret .= "] ["; - foreach ($_FILES as $k0 => $val) { - - $error = $val["error"]; - if ($error == UPLOAD_ERR_OK) { - $tmp_name = $val["tmp_name"]; - $name = $val["name"]; - $datafile = "/tmp/test.go"; - move_uploaded_file($tmp_name, $datafile); - $md5 = md5_file($datafile); - - if ($k0 != $md5) { - $stat = "FAILED"; - echo "server:err ".$md5." != ".$key."\n"; - } - - $length += strlen($k0) + filesize($datafile); - - unlink($datafile); - $ret .= $k0."(".strlen($k0).") "; - } - else{ - $stat = "FAILED"; - echo "server:file err ".file_upload_error_message($error)."\n"; - } - } - $ret .= "]"; - echo "server:got data length " .$length."\n"; -} - - -echo "-{$stat}-POST(".count($_POST).") FILE(".count($_FILES).")\n"; - -function file_upload_error_message($error_code) { - switch ($error_code) { - case UPLOAD_ERR_INI_SIZE: - return 'The uploaded file exceeds the upload_max_filesize directive in php.ini'; - case UPLOAD_ERR_FORM_SIZE: - return 'The uploaded file exceeds the MAX_FILE_SIZE directive that was specified in the HTML form'; - case UPLOAD_ERR_PARTIAL: - return 'The uploaded file was only partially uploaded'; - case UPLOAD_ERR_NO_FILE: - return 'No file was uploaded'; - case UPLOAD_ERR_NO_TMP_DIR: - return 'Missing a temporary folder'; - case UPLOAD_ERR_CANT_WRITE: - return 'Failed to write file to disk'; - case UPLOAD_ERR_EXTENSION: - return 'File upload stopped by extension'; - default: - return 'Unknown upload error'; - } -} \ No newline at end of file diff --git a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgiclient.go b/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgiclient.go deleted file mode 100644 index 45e82526e..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/fcgiclient.go +++ /dev/null @@ -1,578 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Forked Jan. 2015 from http://bitbucket.org/PinIdea/fcgi_client -// (which is forked from https://code.google.com/p/go-fastcgi-client/) - -// This fork contains several fixes and improvements by Matt Holt and -// other contributors to this project. - -// Copyright 2012 Junqing Tan and The Go Authors -// Use of this source code is governed by a BSD-style -// Part of source code is from Go fcgi package - -package fastcgi - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "errors" - "io" - "io/ioutil" - "mime/multipart" - "net" - "net/http" - "net/http/httputil" - "net/textproto" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" -) - -// FCGIListenSockFileno describes listen socket file number. -const FCGIListenSockFileno uint8 = 0 - -// FCGIHeaderLen describes header length. -const FCGIHeaderLen uint8 = 8 - -// Version1 describes the version. -const Version1 uint8 = 1 - -// FCGINullRequestID describes the null request ID. -const FCGINullRequestID uint8 = 0 - -// FCGIKeepConn describes keep connection mode. -const FCGIKeepConn uint8 = 1 - -const ( - // BeginRequest is the begin request flag. - BeginRequest uint8 = iota + 1 - // AbortRequest is the abort request flag. - AbortRequest - // EndRequest is the end request flag. - EndRequest - // Params is the parameters flag. - Params - // Stdin is the standard input flag. - Stdin - // Stdout is the standard output flag. - Stdout - // Stderr is the standard error flag. - Stderr - // Data is the data flag. - Data - // GetValues is the get values flag. - GetValues - // GetValuesResult is the get values result flag. - GetValuesResult - // UnknownType is the unknown type flag. - UnknownType - // MaxType is the maximum type flag. - MaxType = UnknownType -) - -const ( - // Responder is the responder flag. - Responder uint8 = iota + 1 - // Authorizer is the authorizer flag. - Authorizer - // Filter is the filter flag. - Filter -) - -const ( - // RequestComplete is the completed request flag. - RequestComplete uint8 = iota - // CantMultiplexConns is the multiplexed connections flag. - CantMultiplexConns - // Overloaded is the overloaded flag. - Overloaded - // UnknownRole is the unknown role flag. - UnknownRole -) - -const ( - // MaxConns is the maximum connections flag. - MaxConns string = "MAX_CONNS" - // MaxRequests is the maximum requests flag. - MaxRequests string = "MAX_REQS" - // MultiplexConns is the multiplex connections flag. - MultiplexConns string = "MPXS_CONNS" -) - -const ( - maxWrite = 65500 // 65530 may work, but for compatibility - maxPad = 255 -) - -type header struct { - Version uint8 - Type uint8 - ID uint16 - ContentLength uint16 - PaddingLength uint8 - Reserved uint8 -} - -// for padding so we don't have to allocate all the time -// not synchronized because we don't care what the contents are -var pad [maxPad]byte - -func (h *header) init(recType uint8, reqID uint16, contentLength int) { - h.Version = 1 - h.Type = recType - h.ID = reqID - h.ContentLength = uint16(contentLength) - h.PaddingLength = uint8(-contentLength & 7) -} - -type record struct { - h header - rbuf []byte -} - -func (rec *record) read(r io.Reader) (buf []byte, err error) { - if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { - return - } - if rec.h.Version != 1 { - err = errors.New("fcgi: invalid header version") - return - } - if rec.h.Type == EndRequest { - err = io.EOF - return - } - n := int(rec.h.ContentLength) + int(rec.h.PaddingLength) - if len(rec.rbuf) < n { - rec.rbuf = make([]byte, n) - } - if _, err = io.ReadFull(r, rec.rbuf[:n]); err != nil { - return - } - buf = rec.rbuf[:int(rec.h.ContentLength)] - - return -} - -// FCGIClient implements a FastCGI client, which is a standard for -// interfacing external applications with Web servers. -type FCGIClient struct { - mutex sync.Mutex - rwc io.ReadWriteCloser - h header - buf bytes.Buffer - stderr bytes.Buffer - keepAlive bool - reqID uint16 -} - -// DialWithDialerContext connects to the fcgi responder at the specified network address, using custom net.Dialer -// and a context. -// See func net.Dial for a description of the network and address parameters. -func DialWithDialerContext(ctx context.Context, network, address string, dialer net.Dialer) (fcgi *FCGIClient, err error) { - var conn net.Conn - conn, err = dialer.DialContext(ctx, network, address) - if err != nil { - return - } - - fcgi = &FCGIClient{ - rwc: conn, - keepAlive: false, - reqID: 1, - } - - return -} - -// DialContext is like Dial but passes ctx to dialer.Dial. -func DialContext(ctx context.Context, network, address string) (fcgi *FCGIClient, err error) { - return DialWithDialerContext(ctx, network, address, net.Dialer{}) -} - -// Dial connects to the fcgi responder at the specified network address, using default net.Dialer. -// See func net.Dial for a description of the network and address parameters. -func Dial(network, address string) (fcgi *FCGIClient, err error) { - return DialContext(context.Background(), network, address) -} - -// Close closes fcgi connection -func (c *FCGIClient) Close() { - c.rwc.Close() -} - -func (c *FCGIClient) writeRecord(recType uint8, content []byte) (err error) { - c.mutex.Lock() - defer c.mutex.Unlock() - c.buf.Reset() - c.h.init(recType, c.reqID, len(content)) - if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { - return err - } - if _, err := c.buf.Write(content); err != nil { - return err - } - if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { - return err - } - _, err = c.rwc.Write(c.buf.Bytes()) - return err -} - -func (c *FCGIClient) writeBeginRequest(role uint16, flags uint8) error { - b := [8]byte{byte(role >> 8), byte(role), flags} - return c.writeRecord(BeginRequest, b[:]) -} - -func (c *FCGIClient) writeEndRequest(appStatus int, protocolStatus uint8) error { - b := make([]byte, 8) - binary.BigEndian.PutUint32(b, uint32(appStatus)) - b[4] = protocolStatus - return c.writeRecord(EndRequest, b) -} - -func (c *FCGIClient) writePairs(recType uint8, pairs map[string]string) error { - w := newWriter(c, recType) - b := make([]byte, 8) - nn := 0 - for k, v := range pairs { - m := 8 + len(k) + len(v) - if m > maxWrite { - // param data size exceed 65535 bytes" - vl := maxWrite - 8 - len(k) - v = v[:vl] - } - n := encodeSize(b, uint32(len(k))) - n += encodeSize(b[n:], uint32(len(v))) - m = n + len(k) + len(v) - if (nn + m) > maxWrite { - w.Flush() - nn = 0 - } - nn += m - if _, err := w.Write(b[:n]); err != nil { - return err - } - if _, err := w.WriteString(k); err != nil { - return err - } - if _, err := w.WriteString(v); err != nil { - return err - } - } - w.Close() - return nil -} - -func encodeSize(b []byte, size uint32) int { - if size > 127 { - size |= 1 << 31 - binary.BigEndian.PutUint32(b, size) - return 4 - } - b[0] = byte(size) - return 1 -} - -// bufWriter encapsulates bufio.Writer but also closes the underlying stream when -// Closed. -type bufWriter struct { - closer io.Closer - *bufio.Writer -} - -func (w *bufWriter) Close() error { - if err := w.Writer.Flush(); err != nil { - w.closer.Close() - return err - } - return w.closer.Close() -} - -func newWriter(c *FCGIClient, recType uint8) *bufWriter { - s := &streamWriter{c: c, recType: recType} - w := bufio.NewWriterSize(s, maxWrite) - return &bufWriter{s, w} -} - -// streamWriter abstracts out the separation of a stream into discrete records. -// It only writes maxWrite bytes at a time. -type streamWriter struct { - c *FCGIClient - recType uint8 -} - -func (w *streamWriter) Write(p []byte) (int, error) { - nn := 0 - for len(p) > 0 { - n := len(p) - if n > maxWrite { - n = maxWrite - } - if err := w.c.writeRecord(w.recType, p[:n]); err != nil { - return nn, err - } - nn += n - p = p[n:] - } - return nn, nil -} - -func (w *streamWriter) Close() error { - // send empty record to close the stream - return w.c.writeRecord(w.recType, nil) -} - -type streamReader struct { - c *FCGIClient - buf []byte -} - -func (w *streamReader) Read(p []byte) (n int, err error) { - - if len(p) > 0 { - if len(w.buf) == 0 { - - // filter outputs for error log - for { - rec := &record{} - var buf []byte - buf, err = rec.read(w.c.rwc) - if err != nil { - return - } - // standard error output - if rec.h.Type == Stderr { - w.c.stderr.Write(buf) - continue - } - w.buf = buf - break - } - } - - n = len(p) - if n > len(w.buf) { - n = len(w.buf) - } - copy(p, w.buf[:n]) - w.buf = w.buf[n:] - } - - return -} - -// Do made the request and returns a io.Reader that translates the data read -// from fcgi responder out of fcgi packet before returning it. -func (c *FCGIClient) Do(p map[string]string, req io.Reader) (r io.Reader, err error) { - err = c.writeBeginRequest(uint16(Responder), 0) - if err != nil { - return - } - - err = c.writePairs(Params, p) - if err != nil { - return - } - - body := newWriter(c, Stdin) - if req != nil { - _, _ = io.Copy(body, req) - } - body.Close() - - r = &streamReader{c: c} - return -} - -// clientCloser is a io.ReadCloser. It wraps a io.Reader with a Closer -// that closes FCGIClient connection. -type clientCloser struct { - *FCGIClient - io.Reader -} - -func (f clientCloser) Close() error { return f.rwc.Close() } - -// Request returns a HTTP Response with Header and Body -// from fcgi responder -func (c *FCGIClient) Request(p map[string]string, req io.Reader) (resp *http.Response, err error) { - r, err := c.Do(p, req) - if err != nil { - return - } - - rb := bufio.NewReader(r) - tp := textproto.NewReader(rb) - resp = new(http.Response) - - // Parse the response headers. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil && err != io.EOF { - return - } - resp.Header = http.Header(mimeHeader) - - if resp.Header.Get("Status") != "" { - statusParts := strings.SplitN(resp.Header.Get("Status"), " ", 2) - resp.StatusCode, err = strconv.Atoi(statusParts[0]) - if err != nil { - return - } - if len(statusParts) > 1 { - resp.Status = statusParts[1] - } - - } else { - resp.StatusCode = http.StatusOK - } - - // TODO: fixTransferEncoding ? - resp.TransferEncoding = resp.Header["Transfer-Encoding"] - resp.ContentLength, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - - if chunked(resp.TransferEncoding) { - resp.Body = clientCloser{c, httputil.NewChunkedReader(rb)} - } else { - resp.Body = clientCloser{c, ioutil.NopCloser(rb)} - } - return -} - -// Get issues a GET request to the fcgi responder. -func (c *FCGIClient) Get(p map[string]string, body io.Reader, l int64) (resp *http.Response, err error) { - - p["REQUEST_METHOD"] = "GET" - p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10) - - return c.Request(p, body) -} - -// Head issues a HEAD request to the fcgi responder. -func (c *FCGIClient) Head(p map[string]string) (resp *http.Response, err error) { - - p["REQUEST_METHOD"] = "HEAD" - p["CONTENT_LENGTH"] = "0" - - return c.Request(p, nil) -} - -// Options issues an OPTIONS request to the fcgi responder. -func (c *FCGIClient) Options(p map[string]string) (resp *http.Response, err error) { - - p["REQUEST_METHOD"] = "OPTIONS" - p["CONTENT_LENGTH"] = "0" - - return c.Request(p, nil) -} - -// Post issues a POST request to the fcgi responder. with request body -// in the format that bodyType specified -func (c *FCGIClient) Post(p map[string]string, method string, bodyType string, body io.Reader, l int64) (resp *http.Response, err error) { - if p == nil { - p = make(map[string]string) - } - - p["REQUEST_METHOD"] = strings.ToUpper(method) - - if len(p["REQUEST_METHOD"]) == 0 || p["REQUEST_METHOD"] == "GET" { - p["REQUEST_METHOD"] = "POST" - } - - p["CONTENT_LENGTH"] = strconv.FormatInt(l, 10) - if len(bodyType) > 0 { - p["CONTENT_TYPE"] = bodyType - } else { - p["CONTENT_TYPE"] = "application/x-www-form-urlencoded" - } - - return c.Request(p, body) -} - -// PostForm issues a POST to the fcgi responder, with form -// as a string key to a list values (url.Values) -func (c *FCGIClient) PostForm(p map[string]string, data url.Values) (resp *http.Response, err error) { - body := bytes.NewReader([]byte(data.Encode())) - return c.Post(p, "POST", "application/x-www-form-urlencoded", body, int64(body.Len())) -} - -// PostFile issues a POST to the fcgi responder in multipart(RFC 2046) standard, -// with form as a string key to a list values (url.Values), -// and/or with file as a string key to a list file path. -func (c *FCGIClient) PostFile(p map[string]string, data url.Values, file map[string]string) (resp *http.Response, err error) { - buf := &bytes.Buffer{} - writer := multipart.NewWriter(buf) - bodyType := writer.FormDataContentType() - - for key, val := range data { - for _, v0 := range val { - err = writer.WriteField(key, v0) - if err != nil { - return - } - } - } - - for key, val := range file { - fd, e := os.Open(val) - if e != nil { - return nil, e - } - defer fd.Close() - - part, e := writer.CreateFormFile(key, filepath.Base(val)) - if e != nil { - return nil, e - } - _, err = io.Copy(part, fd) - if err != nil { - return - } - } - - err = writer.Close() - if err != nil { - return - } - - return c.Post(p, "POST", bodyType, buf, int64(buf.Len())) -} - -// SetReadTimeout sets the read timeout for future calls that read from the -// fcgi responder. A zero value for t means no timeout will be set. -func (c *FCGIClient) SetReadTimeout(t time.Duration) error { - if conn, ok := c.rwc.(net.Conn); ok && t != 0 { - return conn.SetReadDeadline(time.Now().Add(t)) - } - return nil -} - -// SetSendTimeout sets the read timeout for future calls that send data to -// the fcgi responder. A zero value for t means no timeout will be set. -func (c *FCGIClient) SetSendTimeout(t time.Duration) error { - if conn, ok := c.rwc.(net.Conn); ok && t != 0 { - return conn.SetWriteDeadline(time.Now().Add(t)) - } - return nil -} - -// Checks whether chunked is part of the encodings stack -func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } diff --git a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/setup.go deleted file mode 100644 index 8034c6410..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/fastcgi/setup.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fastcgi - -import ( - "errors" - "fmt" - "net" - "net/http" - "path/filepath" - "strings" - "time" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -var defaultTimeout = 60 * time.Second - -func init() { - caddy.RegisterPlugin("fastcgi", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new FastCGI middleware instance. -func setup(c *caddy.Controller) error { - cfg := httpserver.GetConfig(c) - - rules, err := fastcgiParse(c) - if err != nil { - return err - } - - cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Handler{ - Next: next, - Rules: rules, - Root: cfg.Root, - FileSys: http.Dir(cfg.Root), - SoftwareName: caddy.AppName, - SoftwareVersion: caddy.AppVersion, - ServerName: cfg.Addr.Host, - ServerPort: cfg.Addr.Port, - } - }) - - return nil -} - -func fastcgiParse(c *caddy.Controller) ([]Rule, error) { - var rules []Rule - - cfg := httpserver.GetConfig(c) - absRoot, err := filepath.Abs(cfg.Root) - if err != nil { - return nil, err - } - - for c.Next() { - args := c.RemainingArgs() - - if len(args) < 2 || len(args) > 3 { - return rules, c.ArgErr() - } - - rule := Rule{ - Root: absRoot, - Path: args[0], - ConnectTimeout: defaultTimeout, - ReadTimeout: defaultTimeout, - SendTimeout: defaultTimeout, - } - - upstreams := []string{args[1]} - - srvUpstream := false - if strings.HasPrefix(upstreams[0], "srv://") { - srvUpstream = true - } - - if len(args) == 3 { - if err := fastcgiPreset(args[2], &rule); err != nil { - return rules, err - } - } - - var err error - - for c.NextBlock() { - switch c.Val() { - case "root": - if !c.NextArg() { - return rules, c.ArgErr() - } - rule.Root = c.Val() - - case "ext": - if !c.NextArg() { - return rules, c.ArgErr() - } - rule.Ext = c.Val() - case "split": - if !c.NextArg() { - return rules, c.ArgErr() - } - rule.SplitPath = c.Val() - case "index": - args := c.RemainingArgs() - if len(args) == 0 { - return rules, c.ArgErr() - } - rule.IndexFiles = args - - case "upstream": - if srvUpstream { - return rules, c.Err("additional upstreams are not supported with SRV upstream") - } - - args := c.RemainingArgs() - - if len(args) != 1 { - return rules, c.ArgErr() - } - - upstreams = append(upstreams, args[0]) - case "env": - envArgs := c.RemainingArgs() - if len(envArgs) < 2 { - return rules, c.ArgErr() - } - rule.EnvVars = append(rule.EnvVars, [2]string{envArgs[0], envArgs[1]}) - case "except": - ignoredPaths := c.RemainingArgs() - if len(ignoredPaths) == 0 { - return rules, c.ArgErr() - } - rule.IgnoredSubPaths = ignoredPaths - - case "connect_timeout": - if !c.NextArg() { - return rules, c.ArgErr() - } - rule.ConnectTimeout, err = time.ParseDuration(c.Val()) - if err != nil { - return rules, err - } - case "read_timeout": - if !c.NextArg() { - return rules, c.ArgErr() - } - readTimeout, err := time.ParseDuration(c.Val()) - if err != nil { - return rules, err - } - rule.ReadTimeout = readTimeout - case "send_timeout": - if !c.NextArg() { - return rules, c.ArgErr() - } - sendTimeout, err := time.ParseDuration(c.Val()) - if err != nil { - return rules, err - } - rule.SendTimeout = sendTimeout - } - } - - if srvUpstream { - balancer, err := parseSRV(upstreams[0]) - if err != nil { - return rules, c.Err("malformed service locator string: " + err.Error()) - } - rule.balancer = balancer - } else { - rule.balancer = &roundRobin{addresses: upstreams, index: -1} - } - - rules = append(rules, rule) - } - return rules, nil -} - -func parseSRV(locator string) (*srv, error) { - if locator[6:] == "" { - return nil, fmt.Errorf("%s does not include the host", locator) - } - - return &srv{ - service: locator[6:], - resolver: &net.Resolver{}, - }, nil -} - -// fastcgiPreset configures rule according to name. It returns an error if -// name is not a recognized preset name. -func fastcgiPreset(name string, rule *Rule) error { - switch name { - case "php": - rule.Ext = ".php" - rule.SplitPath = ".php" - rule.IndexFiles = []string{"index.php"} - default: - return errors.New(name + " is not a valid preset name") - } - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/gzip/gzip.go b/vendor/github.com/mholt/caddy/caddyhttp/gzip/gzip.go deleted file mode 100644 index 619a1d579..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/gzip/gzip.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gzip provides a middleware layer that performs -// gzip compression on the response. -package gzip - -import ( - "compress/gzip" - "io" - "net/http" - "strings" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("gzip", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) - - initWriterPool() -} - -// Gzip is a middleware type which gzips HTTP responses. It is -// imperative that any handler which writes to a gzipped response -// specifies the Content-Type, otherwise some clients will assume -// application/x-gzip and try to download a file. -type Gzip struct { - Next httpserver.Handler - Configs []Config -} - -// Config holds the configuration for Gzip middleware -type Config struct { - RequestFilters []RequestFilter - ResponseFilters []ResponseFilter - Level int // Compression level -} - -// ServeHTTP serves a gzipped response if the client supports it. -func (g Gzip) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { - return g.Next.ServeHTTP(w, r) - } -outer: - for _, c := range g.Configs { - - // Check request filters to determine if gzipping is permitted for this request - for _, filter := range c.RequestFilters { - if !filter.ShouldCompress(r) { - continue outer - } - } - - // In order to avoid unused memory allocation, gzip.putWriter only be called when gzip compression happened. - // see https://github.com/mholt/caddy/issues/2395 - gz := &gzipResponseWriter{ - ResponseWriterWrapper: &httpserver.ResponseWriterWrapper{ResponseWriter: w}, - newWriter: func() io.Writer { - // gzipWriter modifies underlying writer at init, - // use a discard writer instead to leave ResponseWriter in - // original form. - return getWriter(c.Level) - }, - } - - defer func() { - if gzWriter, ok := gz.internalWriter.(*gzip.Writer); ok { - putWriter(c.Level, gzWriter) - } - }() - - var rw http.ResponseWriter - // if no response filter is used - if len(c.ResponseFilters) == 0 { - // replace discard writer with ResponseWriter - if gzWriter, ok := gz.Writer().(*gzip.Writer); ok { - gzWriter.Reset(w) - } - rw = gz - } else { - // wrap gzip writer with ResponseFilterWriter - rw = NewResponseFilterWriter(c.ResponseFilters, gz) - } - - // Any response in forward middleware will now be compressed - status, err := g.Next.ServeHTTP(rw, r) - - // If there was an error that remained unhandled, we need - // to send something back before gzipWriter gets closed at - // the return of this method! - if status >= 400 { - httpserver.DefaultErrorFunc(w, r, status) - return 0, err - } - return status, err - } - - // no matching filter - return g.Next.ServeHTTP(w, r) -} - -// gzipResponseWriter wraps the underlying Write method -// with a gzip.Writer to compress the output. -type gzipResponseWriter struct { - internalWriter io.Writer - *httpserver.ResponseWriterWrapper - statusCodeWritten bool - newWriter func() io.Writer -} - -// WriteHeader wraps the underlying WriteHeader method to prevent -// problems with conflicting headers from proxied backends. For -// example, a backend system that calculates Content-Length would -// be wrong because it doesn't know it's being gzipped. -func (w *gzipResponseWriter) WriteHeader(code int) { - w.Header().Del("Content-Length") - w.Header().Set("Content-Encoding", "gzip") - w.Header().Add("Vary", "Accept-Encoding") - originalEtag := w.Header().Get("ETag") - if originalEtag != "" && !strings.HasPrefix(originalEtag, "W/") { - w.Header().Set("ETag", "W/"+originalEtag) - } - w.ResponseWriterWrapper.WriteHeader(code) - w.statusCodeWritten = true -} - -// Write wraps the underlying Write method to do compression. -func (w *gzipResponseWriter) Write(b []byte) (int, error) { - if w.Header().Get("Content-Type") == "" { - w.Header().Set("Content-Type", http.DetectContentType(b)) - } - if !w.statusCodeWritten { - w.WriteHeader(http.StatusOK) - } - n, err := w.Writer().Write(b) - return n, err -} - -//Writer use a lazy way to initialize Writer -func (w *gzipResponseWriter) Writer() io.Writer { - if w.internalWriter == nil { - w.internalWriter = w.newWriter() - } - return w.internalWriter -} - -// Interface guards -var _ httpserver.HTTPInterfaces = (*gzipResponseWriter)(nil) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/gzip/requestfilter.go b/vendor/github.com/mholt/caddy/caddyhttp/gzip/requestfilter.go deleted file mode 100644 index 696c051d4..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/gzip/requestfilter.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gzip - -import ( - "net/http" - "path" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// RequestFilter determines if a request should be gzipped. -type RequestFilter interface { - // ShouldCompress tells if gzip compression - // should be done on the request. - ShouldCompress(*http.Request) bool -} - -// defaultExtensions is the list of default extensions for which to enable gzipping. -var defaultExtensions = []string{"", ".txt", ".htm", ".html", ".css", ".php", ".js", ".json", - ".md", ".mdown", ".xml", ".svg", ".go", ".cgi", ".py", ".pl", ".aspx", ".asp", ".m3u", ".m3u8"} - -// DefaultExtFilter creates an ExtFilter with default extensions. -func DefaultExtFilter() ExtFilter { - m := ExtFilter{Exts: make(Set)} - for _, extension := range defaultExtensions { - m.Exts.Add(extension) - } - return m -} - -// ExtFilter is RequestFilter for file name extensions. -type ExtFilter struct { - // Exts is the file name extensions to accept - Exts Set -} - -// ExtWildCard is the wildcard for extensions. -const ExtWildCard = "*" - -// ShouldCompress checks if the request file extension matches any -// of the registered extensions. It returns true if the extension is -// found and false otherwise. -func (e ExtFilter) ShouldCompress(r *http.Request) bool { - ext := path.Ext(r.URL.Path) - return e.Exts.Contains(ExtWildCard) || e.Exts.Contains(ext) -} - -// PathFilter is RequestFilter for request path. -type PathFilter struct { - // IgnoredPaths is the paths to ignore - IgnoredPaths Set -} - -// ShouldCompress checks if the request path matches any of the -// registered paths to ignore. It returns false if an ignored path -// is found and true otherwise. -func (p PathFilter) ShouldCompress(r *http.Request) bool { - return !p.IgnoredPaths.ContainsFunc(func(value string) bool { - return httpserver.Path(r.URL.Path).Matches(value) - }) -} - -// Set stores distinct strings. -type Set map[string]struct{} - -// Add adds an element to the set. -func (s Set) Add(value string) { - s[value] = struct{}{} -} - -// Remove removes an element from the set. -func (s Set) Remove(value string) { - delete(s, value) -} - -// Contains check if the set contains value. -func (s Set) Contains(value string) bool { - _, ok := s[value] - return ok -} - -// ContainsFunc is similar to Contains. It iterates all the -// elements in the set and passes each to f. It returns true -// on the first call to f that returns true and false otherwise. -func (s Set) ContainsFunc(f func(string) bool) bool { - for k := range s { - if f(k) { - return true - } - } - return false -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/gzip/responsefilter.go b/vendor/github.com/mholt/caddy/caddyhttp/gzip/responsefilter.go deleted file mode 100644 index 82e10e9c9..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/gzip/responsefilter.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gzip - -import ( - "compress/gzip" - "net/http" - "strconv" -) - -// ResponseFilter determines if the response should be gzipped. -type ResponseFilter interface { - ShouldCompress(http.ResponseWriter) bool -} - -// LengthFilter is ResponseFilter for minimum content length. -type LengthFilter int64 - -// ShouldCompress returns if content length is greater than or -// equals to minimum length. -func (l LengthFilter) ShouldCompress(w http.ResponseWriter) bool { - contentLength := w.Header().Get("Content-Length") - length, err := strconv.ParseInt(contentLength, 10, 64) - if err != nil || length == 0 { - return false - } - return l != 0 && int64(l) <= length -} - -// SkipCompressedFilter is ResponseFilter that will discard already compressed responses -type SkipCompressedFilter struct{} - -// ShouldCompress returns true if served file is not already compressed -// encodings via https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding -func (n SkipCompressedFilter) ShouldCompress(w http.ResponseWriter) bool { - switch w.Header().Get("Content-Encoding") { - case "gzip", "compress", "deflate", "br": - return false - default: - return true - } -} - -// ResponseFilterWriter validates ResponseFilters. It writes -// gzip compressed data if ResponseFilters are satisfied or -// uncompressed data otherwise. -type ResponseFilterWriter struct { - filters []ResponseFilter - shouldCompress bool - statusCodeWritten bool - *gzipResponseWriter -} - -// NewResponseFilterWriter creates and initializes a new ResponseFilterWriter. -func NewResponseFilterWriter(filters []ResponseFilter, gz *gzipResponseWriter) *ResponseFilterWriter { - return &ResponseFilterWriter{filters: filters, gzipResponseWriter: gz} -} - -// WriteHeader wraps underlying WriteHeader method and -// compresses if filters are satisfied. -func (r *ResponseFilterWriter) WriteHeader(code int) { - // Determine if compression should be used or not. - r.shouldCompress = true - for _, filter := range r.filters { - if !filter.ShouldCompress(r) { - r.shouldCompress = false - break - } - } - - if r.shouldCompress { - // replace discard writer with ResponseWriter - if gzWriter, ok := r.gzipResponseWriter.Writer().(*gzip.Writer); ok { - gzWriter.Reset(r.ResponseWriter) - } - // use gzip WriteHeader to include and delete - // necessary headers - r.gzipResponseWriter.WriteHeader(code) - } else { - r.ResponseWriter.WriteHeader(code) - } - r.statusCodeWritten = true -} - -// Write wraps underlying Write method and compresses if filters -// are satisfied -func (r *ResponseFilterWriter) Write(b []byte) (int, error) { - if !r.statusCodeWritten { - r.WriteHeader(http.StatusOK) - } - if r.shouldCompress { - return r.gzipResponseWriter.Write(b) - } - return r.ResponseWriter.Write(b) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/gzip/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/gzip/setup.go deleted file mode 100644 index 275955c1d..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/gzip/setup.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gzip - -import ( - "compress/gzip" - "fmt" - "io/ioutil" - "strconv" - "strings" - "sync" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// setup configures a new gzip middleware instance. -func setup(c *caddy.Controller) error { - configs, err := gzipParse(c) - if err != nil { - return err - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Gzip{Next: next, Configs: configs} - }) - - return nil -} - -func gzipParse(c *caddy.Controller) ([]Config, error) { - var configs []Config - - for c.Next() { - config := Config{} - - // Request Filters - pathFilter := PathFilter{IgnoredPaths: make(Set)} - extFilter := ExtFilter{Exts: make(Set)} - - // Response Filters - lengthFilter := LengthFilter(0) - - // No extra args expected - if len(c.RemainingArgs()) > 0 { - return configs, c.ArgErr() - } - - for c.NextBlock() { - switch c.Val() { - case "ext": - exts := c.RemainingArgs() - if len(exts) == 0 { - return configs, c.ArgErr() - } - for _, e := range exts { - if !strings.HasPrefix(e, ".") && e != ExtWildCard && e != "" { - return configs, fmt.Errorf(`gzip: invalid extension "%v" (must start with dot)`, e) - } - extFilter.Exts.Add(e) - } - case "not": - paths := c.RemainingArgs() - if len(paths) == 0 { - return configs, c.ArgErr() - } - for _, p := range paths { - if p == "/" { - return configs, fmt.Errorf(`gzip: cannot exclude path "/" - remove directive entirely instead`) - } - if !strings.HasPrefix(p, "/") { - return configs, fmt.Errorf(`gzip: invalid path "%v" (must start with /)`, p) - } - pathFilter.IgnoredPaths.Add(p) - } - case "level": - if !c.NextArg() { - return configs, c.ArgErr() - } - level, _ := strconv.Atoi(c.Val()) - config.Level = level - case "min_length": - if !c.NextArg() { - return configs, c.ArgErr() - } - length, err := strconv.ParseInt(c.Val(), 10, 64) - if err != nil { - return configs, err - } else if length == 0 { - return configs, fmt.Errorf(`gzip: min_length must be greater than 0`) - } - lengthFilter = LengthFilter(length) - default: - return configs, c.ArgErr() - } - } - - // Request Filters - config.RequestFilters = []RequestFilter{} - - // If ignored paths are specified, put in front to filter with path first - if len(pathFilter.IgnoredPaths) > 0 { - config.RequestFilters = []RequestFilter{pathFilter} - } - - // Then, if extensions are specified, use those to filter. - // Otherwise, use default extensions filter. - if len(extFilter.Exts) > 0 { - config.RequestFilters = append(config.RequestFilters, extFilter) - } else { - config.RequestFilters = append(config.RequestFilters, DefaultExtFilter()) - } - - config.ResponseFilters = append(config.ResponseFilters, SkipCompressedFilter{}) - - // Response Filters - // If min_length is specified, use it. - if int64(lengthFilter) != 0 { - config.ResponseFilters = append(config.ResponseFilters, lengthFilter) - } - - configs = append(configs, config) - } - - return configs, nil -} - -// pool gzip.Writer according to compress level -// so we can reuse allocations over time -var ( - writerPool = map[int]*sync.Pool{} - defaultWriterPoolIndex int -) - -func initWriterPool() { - var i int - newWriterPool := func(level int) *sync.Pool { - return &sync.Pool{ - New: func() interface{} { - w, _ := gzip.NewWriterLevel(ioutil.Discard, level) - return w - }, - } - } - for i = gzip.BestSpeed; i <= gzip.BestCompression; i++ { - writerPool[i] = newWriterPool(i) - } - - // add default writer pool - defaultWriterPoolIndex = i - writerPool[defaultWriterPoolIndex] = newWriterPool(gzip.DefaultCompression) -} - -func getWriter(level int) *gzip.Writer { - index := defaultWriterPoolIndex - if level >= gzip.BestSpeed && level <= gzip.BestCompression { - index = level - } - w := writerPool[index].Get().(*gzip.Writer) - w.Reset(ioutil.Discard) - return w -} - -func putWriter(level int, w *gzip.Writer) { - index := defaultWriterPoolIndex - if level >= gzip.BestSpeed && level <= gzip.BestCompression { - index = level - } - w.Close() - writerPool[index].Put(w) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/header/header.go b/vendor/github.com/mholt/caddy/caddyhttp/header/header.go deleted file mode 100644 index 884c5e403..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/header/header.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package header provides middleware that appends headers to -// requests based on a set of configuration rules that define -// which routes receive which headers. -package header - -import ( - "net/http" - "strings" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Headers is middleware that adds headers to the responses -// for requests matching a certain path. -type Headers struct { - Next httpserver.Handler - Rules []Rule -} - -// ServeHTTP implements the httpserver.Handler interface and serves requests, -// setting headers on the response according to the configured rules. -func (h Headers) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - replacer := httpserver.NewReplacer(r, nil, "") - rww := &responseWriterWrapper{ - ResponseWriterWrapper: &httpserver.ResponseWriterWrapper{ResponseWriter: w}, - } - for _, rule := range h.Rules { - if httpserver.Path(r.URL.Path).Matches(rule.Path) { - for name := range rule.Headers { - - // One can either delete a header, add multiple values to a header, or simply - // set a header. - - if strings.HasPrefix(name, "-") { - rww.delHeader(strings.TrimLeft(name, "-")) - } else if strings.HasPrefix(name, "+") { - for _, value := range rule.Headers[name] { - rww.Header().Add(strings.TrimLeft(name, "+"), replacer.Replace(value)) - } - } else { - for _, value := range rule.Headers[name] { - rww.Header().Set(name, replacer.Replace(value)) - } - } - } - } - } - return h.Next.ServeHTTP(rww, r) -} - -type ( - // Rule groups a slice of HTTP headers by a URL pattern. - Rule struct { - Path string - Headers http.Header - } -) - -// headerOperation represents an operation on the header -type headerOperation func(http.Header) - -// responseWriterWrapper wraps the real ResponseWriter. -// It defers header operations until writeHeader -type responseWriterWrapper struct { - *httpserver.ResponseWriterWrapper - ops []headerOperation - wroteHeader bool -} - -func (rww *responseWriterWrapper) Header() http.Header { - return rww.ResponseWriterWrapper.Header() -} - -func (rww *responseWriterWrapper) Write(d []byte) (int, error) { - if !rww.wroteHeader { - rww.WriteHeader(http.StatusOK) - } - return rww.ResponseWriterWrapper.Write(d) -} - -func (rww *responseWriterWrapper) WriteHeader(status int) { - if rww.wroteHeader { - return - } - rww.wroteHeader = true - // capture the original headers - h := rww.Header() - - // perform our revisions - for _, op := range rww.ops { - op(h) - } - - rww.ResponseWriterWrapper.WriteHeader(status) -} - -// delHeader deletes the existing header according to the key -// Also it will delete that header added later. -func (rww *responseWriterWrapper) delHeader(key string) { - // remove the existing one if any - rww.Header().Del(key) - - // register a future deletion - rww.ops = append(rww.ops, func(h http.Header) { - h.Del(key) - }) -} - -// Interface guards -var _ httpserver.HTTPInterfaces = (*responseWriterWrapper)(nil) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/header/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/header/setup.go deleted file mode 100644 index 99c173fb9..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/header/setup.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package header - -import ( - "net/http" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("header", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Headers middleware instance. -func setup(c *caddy.Controller) error { - rules, err := headersParse(c) - if err != nil { - return err - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Headers{Next: next, Rules: rules} - }) - - return nil -} - -func headersParse(c *caddy.Controller) ([]Rule, error) { - var rules []Rule - - for c.NextLine() { - var head Rule - head.Headers = http.Header{} - var isNewPattern bool - - if !c.NextArg() { - return rules, c.ArgErr() - } - pattern := c.Val() - - // See if we already have a definition for this Path pattern... - for _, h := range rules { - if h.Path == pattern { - head = h - break - } - } - - // ...otherwise, this is a new pattern - if head.Path == "" { - head.Path = pattern - isNewPattern = true - } - - for c.NextBlock() { - // A block of headers was opened... - name := c.Val() - value := "" - - args := c.RemainingArgs() - - if len(args) > 1 { - return rules, c.ArgErr() - } else if len(args) == 1 { - value = args[0] - } - - head.Headers.Add(name, value) - } - if c.NextArg() { - // ... or single header was defined as an argument instead. - - name := c.Val() - value := c.Val() - - if c.NextArg() { - value = c.Val() - } - - head.Headers.Add(name, value) - } - - if isNewPattern { - rules = append(rules, head) - } else { - for i := 0; i < len(rules); i++ { - if rules[i].Path == pattern { - rules[i] = head - break - } - } - } - } - - return rules, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/condition.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/condition.go deleted file mode 100644 index 0b329092b..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/condition.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/mholt/caddy" -) - -// SetupIfMatcher parses `if` or `if_op` in the current dispenser block. -// It returns a RequestMatcher and an error if any. -func SetupIfMatcher(controller *caddy.Controller) (RequestMatcher, error) { - var c = controller.Dispenser // copy the dispenser - var matcher IfMatcher - for c.NextBlock() { - switch c.Val() { - case "if": - args1 := c.RemainingArgs() - if len(args1) != 3 { - return matcher, c.ArgErr() - } - ifc, err := newIfCond(args1[0], args1[1], args1[2]) - if err != nil { - return matcher, err - } - matcher.ifs = append(matcher.ifs, ifc) - matcher.Enabled = true - case "if_op": - if !c.NextArg() { - return matcher, c.ArgErr() - } - switch c.Val() { - case "and": - matcher.isOr = false - case "or": - matcher.isOr = true - default: - return matcher, c.ArgErr() - } - } - } - return matcher, nil -} - -// operators -const ( - isOp = "is" - notOp = "not" - hasOp = "has" - startsWithOp = "starts_with" - endsWithOp = "ends_with" - matchOp = "match" -) - -// ifCondition is a 'if' condition. -type ifFunc func(a, b string) bool - -// ifCond is statement for a IfMatcher condition. -type ifCond struct { - a string - op string - b string - neg bool - rex *regexp.Regexp - f ifFunc -} - -// newIfCond creates a new If condition. -func newIfCond(a, op, b string) (ifCond, error) { - i := ifCond{a: a, op: op, b: b} - if strings.HasPrefix(op, "not_") { - i.neg = true - i.op = op[4:] - } - - switch i.op { - case isOp: - // It checks for equality. - i.f = i.isFunc - case notOp: - // It checks for inequality. - i.f = i.notFunc - case hasOp: - // It checks if b is a substring of a. - i.f = strings.Contains - case startsWithOp: - // It checks if b is a prefix of a. - i.f = strings.HasPrefix - case endsWithOp: - // It checks if b is a suffix of a. - i.f = strings.HasSuffix - case matchOp: - // It does regexp matching of a against pattern in b and returns if they match. - var err error - if i.rex, err = regexp.Compile(i.b); err != nil { - return ifCond{}, fmt.Errorf("Invalid regular expression: '%s', %v", i.b, err) - } - i.f = i.matchFunc - default: - return ifCond{}, fmt.Errorf("Invalid operator %v", i.op) - } - - return i, nil -} - -// isFunc is condition for Is operator. -func (i ifCond) isFunc(a, b string) bool { - return a == b -} - -// notFunc is condition for Not operator. -func (i ifCond) notFunc(a, b string) bool { - return a != b -} - -// matchFunc is condition for Match operator. -func (i ifCond) matchFunc(a, b string) bool { - return i.rex.MatchString(a) -} - -// True returns true if the condition is true and false otherwise. -// If r is not nil, it replaces placeholders before comparison. -func (i ifCond) True(r *http.Request) bool { - if i.f != nil { - a, b := i.a, i.b - if r != nil { - replacer := NewReplacer(r, nil, "") - a = replacer.Replace(i.a) - if i.op != matchOp { - b = replacer.Replace(i.b) - } - } - if i.neg { - return !i.f(a, b) - } - return i.f(a, b) - } - return i.neg // false if not negated, true otherwise -} - -// IfMatcher is a RequestMatcher for 'if' conditions. -type IfMatcher struct { - Enabled bool // if true, matcher has been configured; otherwise it's no-op - ifs []ifCond // list of If - isOr bool // if true, conditions are 'or' instead of 'and' -} - -// Match satisfies RequestMatcher interface. -// It returns true if the conditions in m are true. -func (m IfMatcher) Match(r *http.Request) bool { - if m.isOr { - return m.Or(r) - } - return m.And(r) -} - -// And returns true if all conditions in m are true. -func (m IfMatcher) And(r *http.Request) bool { - for _, i := range m.ifs { - if !i.True(r) { - return false - } - } - return true -} - -// Or returns true if any of the conditions in m is true. -func (m IfMatcher) Or(r *http.Request) bool { - for _, i := range m.ifs { - if i.True(r) { - return true - } - } - return false -} - -// IfMatcherKeyword checks if the next value in the dispenser is a keyword for 'if' config block. -// If true, remaining arguments in the dispenser are cleared to keep the dispenser valid for use. -func IfMatcherKeyword(c *caddy.Controller) bool { - if c.Val() == "if" || c.Val() == "if_op" { - // clear remaining args - c.RemainingArgs() - return true - } - return false -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/error.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/error.go deleted file mode 100644 index 85bc6e136..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/error.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "fmt" -) - -var ( - _ error = NonHijackerError{} - _ error = NonFlusherError{} - _ error = NonCloseNotifierError{} - _ error = NonPusherError{} -) - -// NonHijackerError is more descriptive error caused by a non hijacker -type NonHijackerError struct { - // underlying type which doesn't implement Hijack - Underlying interface{} -} - -// Implement Error -func (h NonHijackerError) Error() string { - return fmt.Sprintf("%T is not a hijacker", h.Underlying) -} - -// NonFlusherError is more descriptive error caused by a non flusher -type NonFlusherError struct { - // underlying type which doesn't implement Flush - Underlying interface{} -} - -// Implement Error -func (f NonFlusherError) Error() string { - return fmt.Sprintf("%T is not a flusher", f.Underlying) -} - -// NonCloseNotifierError is more descriptive error caused by a non closeNotifier -type NonCloseNotifierError struct { - // underlying type which doesn't implement CloseNotify - Underlying interface{} -} - -// Implement Error -func (c NonCloseNotifierError) Error() string { - return fmt.Sprintf("%T is not a closeNotifier", c.Underlying) -} - -// NonPusherError is more descriptive error caused by a non pusher -type NonPusherError struct { - // underlying type which doesn't implement pusher - Underlying interface{} -} - -// Implement Error -func (c NonPusherError) Error() string { - return fmt.Sprintf("%T is not a pusher", c.Underlying) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/https.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/https.go deleted file mode 100644 index 24b18b8a8..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/https.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "fmt" - "net" - "net/http" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddytls" - "github.com/mholt/certmagic" -) - -func activateHTTPS(cctx caddy.Context) error { - operatorPresent := !caddy.Started() - - if !caddy.Quiet && operatorPresent { - fmt.Print("Activating privacy features... ") - } - - ctx := cctx.(*httpContext) - - // pre-screen each config and earmark the ones that qualify for managed TLS - markQualifiedForAutoHTTPS(ctx.siteConfigs) - - // place certificates and keys on disk - for _, c := range ctx.siteConfigs { - if !c.TLS.Managed { - continue - } - if c.TLS.Manager.OnDemand != nil { - continue // obtain these certificates on-demand instead - } - err := c.TLS.Manager.ObtainCert(c.TLS.Hostname, operatorPresent) - if err != nil { - return err - } - } - - // update TLS configurations - err := enableAutoHTTPS(ctx.siteConfigs, true) - if err != nil { - return err - } - - // set up redirects - ctx.siteConfigs = makePlaintextRedirects(ctx.siteConfigs) - - // renew all relevant certificates that need renewal. this is important - // to do right away so we guarantee that renewals aren't missed, and - // also the user can respond to any potential errors that occur. - // (skip if upgrading, because the parent process is likely already listening - // on the ports we'd need to do ACME before we finish starting; parent process - // already running renewal ticker, so renewal won't be missed anyway.) - if !caddy.IsUpgrade() { - ctx.instance.StorageMu.RLock() - certCache, ok := ctx.instance.Storage[caddytls.CertCacheInstStorageKey].(*certmagic.Cache) - ctx.instance.StorageMu.RUnlock() - if ok && certCache != nil { - err = certCache.RenewManagedCertificates(operatorPresent) - if err != nil { - return err - } - } - } - - if !caddy.Quiet && operatorPresent { - fmt.Println("done.") - } - - return nil -} - -// markQualifiedForAutoHTTPS scans each config and, if it -// qualifies for managed TLS, it sets the Managed field of -// the TLS config to true. -func markQualifiedForAutoHTTPS(configs []*SiteConfig) { - for _, cfg := range configs { - if caddytls.QualifiesForManagedTLS(cfg) && cfg.Addr.Scheme != "http" { - cfg.TLS.Managed = true - } - } -} - -// enableAutoHTTPS configures each config to use TLS according to default settings. -// It will only change configs that are marked as managed but not on-demand, and -// assumes that certificates and keys are already on disk. If loadCertificates is -// true, the certificates will be loaded from disk into the cache for this process -// to use. If false, TLS will still be enabled and configured with default settings, -// but no certificates will be parsed loaded into the cache, and the returned error -// value will always be nil. -func enableAutoHTTPS(configs []*SiteConfig, loadCertificates bool) error { - for _, cfg := range configs { - if cfg == nil || cfg.TLS == nil || !cfg.TLS.Managed || - cfg.TLS.Manager == nil || cfg.TLS.Manager.OnDemand != nil { - continue - } - cfg.TLS.Enabled = true - cfg.Addr.Scheme = "https" - if loadCertificates && certmagic.HostQualifies(cfg.TLS.Hostname) { - _, err := cfg.TLS.Manager.CacheManagedCertificate(cfg.TLS.Hostname) - if err != nil { - return err - } - } - - // Make sure any config values not explicitly set are set to default - caddytls.SetDefaultTLSParams(cfg.TLS) - - // Set default port of 443 if not explicitly set - if cfg.Addr.Port == "" && - cfg.TLS.Enabled && - (!cfg.TLS.Manual || cfg.TLS.Manager.OnDemand != nil) && - cfg.Addr.Host != "localhost" { - cfg.Addr.Port = HTTPSPort - } - } - return nil -} - -// makePlaintextRedirects sets up redirects from port 80 to the relevant HTTPS -// hosts. You must pass in all configs, not just configs that qualify, since -// we must know whether the same host already exists on port 80, and those would -// not be in a list of configs that qualify for automatic HTTPS. This function will -// only set up redirects for configs that qualify. It returns the updated list of -// all configs. -func makePlaintextRedirects(allConfigs []*SiteConfig) []*SiteConfig { - for i, cfg := range allConfigs { - if cfg.TLS.Managed && - !hostHasOtherPort(allConfigs, i, HTTPPort) && - (cfg.Addr.Port == HTTPSPort || !hostHasOtherPort(allConfigs, i, HTTPSPort)) { - allConfigs = append(allConfigs, redirPlaintextHost(cfg)) - } - } - return allConfigs -} - -// hostHasOtherPort returns true if there is another config in the list with the same -// hostname that has port otherPort, or false otherwise. All the configs are checked -// against the hostname of allConfigs[thisConfigIdx]. -func hostHasOtherPort(allConfigs []*SiteConfig, thisConfigIdx int, otherPort string) bool { - for i, otherCfg := range allConfigs { - if i == thisConfigIdx { - continue // has to be a config OTHER than the one we're comparing against - } - if otherCfg.Addr.Host == allConfigs[thisConfigIdx].Addr.Host && - otherCfg.Addr.Port == otherPort { - return true - } - } - return false -} - -// redirPlaintextHost returns a new plaintext HTTP configuration for -// a virtualHost that simply redirects to cfg, which is assumed to -// be the HTTPS configuration. The returned configuration is set -// to listen on HTTPPort. The TLS field of cfg must not be nil. -func redirPlaintextHost(cfg *SiteConfig) *SiteConfig { - redirPort := cfg.Addr.Port - if redirPort == HTTPSPort { - // By default, HTTPSPort should be DefaultHTTPSPort, - // which of course doesn't need to be explicitly stated - // in the Location header. Even if HTTPSPort is changed - // so that it is no longer DefaultHTTPSPort, we shouldn't - // append it to the URL in the Location because changing - // the HTTPS port is assumed to be an internal-only change - // (in other words, we assume port forwarding is going on); - // but redirects go back to a presumably-external client. - // (If redirect clients are also internal, that is more - // advanced, and the user should configure HTTP->HTTPS - // redirects themselves.) - redirPort = "" - } - - redirMiddleware := func(next Handler) Handler { - return HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) { - // Construct the URL to which to redirect. Note that the Host in a - // request might contain a port, but we just need the hostname from - // it; and we'll set the port if needed. - toURL := "https://" - requestHost, _, err := net.SplitHostPort(r.Host) - if err != nil { - requestHost = r.Host // Host did not contain a port, so use the whole value - } - if redirPort == "" { - toURL += requestHost - } else { - toURL += net.JoinHostPort(requestHost, redirPort) - } - - toURL += r.URL.RequestURI() - - w.Header().Set("Connection", "close") - http.Redirect(w, r, toURL, http.StatusMovedPermanently) - return 0, nil - }) - } - - host := cfg.Addr.Host - port := HTTPPort - addr := net.JoinHostPort(host, port) - - return &SiteConfig{ - Addr: Address{Original: addr, Host: host, Port: port}, - ListenHost: cfg.ListenHost, - middleware: []Middleware{redirMiddleware}, - TLS: &caddytls.Config{Manager: cfg.TLS.Manager}, - Timeouts: cfg.Timeouts, - } -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/logger.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/logger.go deleted file mode 100644 index cd2c26674..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/logger.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "bytes" - "io" - "log" - "net" - "os" - "strings" - "sync" - - gsyslog "github.com/hashicorp/go-syslog" - "github.com/mholt/caddy" -) - -var remoteSyslogPrefixes = map[string]string{ - "syslog+tcp://": "tcp", - "syslog+udp://": "udp", - "syslog://": "udp", -} - -// Logger is shared between errors and log plugins and supports both logging to -// a file (with an optional file roller), local and remote syslog servers. -type Logger struct { - Output string - *log.Logger - Roller *LogRoller - writer io.Writer - fileMu *sync.RWMutex - V4ipMask net.IPMask - V6ipMask net.IPMask - IPMaskExists bool - Exceptions []string -} - -// NewTestLogger creates logger suitable for testing purposes -func NewTestLogger(buffer *bytes.Buffer) *Logger { - return &Logger{ - Logger: log.New(buffer, "", 0), - fileMu: new(sync.RWMutex), - } -} - -// Println wraps underlying logger with mutex -func (l Logger) Println(args ...interface{}) { - l.fileMu.RLock() - l.Logger.Println(args...) - l.fileMu.RUnlock() -} - -// Printf wraps underlying logger with mutex -func (l Logger) Printf(format string, args ...interface{}) { - l.fileMu.RLock() - l.Logger.Printf(format, args...) - l.fileMu.RUnlock() -} - -func (l Logger) MaskIP(ip string) string { - var reqIP net.IP - // If unable to parse, simply return IP as provided. - reqIP = net.ParseIP(ip) - if reqIP == nil { - return ip - } - - if reqIP.To4() != nil { - return reqIP.Mask(l.V4ipMask).String() - } else { - return reqIP.Mask(l.V6ipMask).String() - } - -} - -// ShouldLog returns true if the path is not exempted from -// being logged (i.e. it is not found in l.Exceptions). -func (l Logger) ShouldLog(path string) bool { - for _, exc := range l.Exceptions { - if Path(path).Matches(exc) { - return false - } - } - return true -} - -// Attach binds logger Start and Close functions to -// controller's OnStartup and OnShutdown hooks. -func (l *Logger) Attach(controller *caddy.Controller) { - if controller != nil { - // Opens file or connect to local/remote syslog - controller.OnStartup(l.Start) - - // Closes file or disconnects from local/remote syslog - controller.OnShutdown(l.Close) - } -} - -type syslogAddress struct { - network string - address string -} - -func parseSyslogAddress(location string) *syslogAddress { - for prefix, network := range remoteSyslogPrefixes { - if strings.HasPrefix(location, prefix) { - return &syslogAddress{ - network: network, - address: strings.TrimPrefix(location, prefix), - } - } - } - - return nil -} - -// Start initializes logger opening files or local/remote syslog connections -func (l *Logger) Start() error { - // initialize mutex on start - l.fileMu = new(sync.RWMutex) - - var err error - -selectwriter: - switch l.Output { - case "", "stderr": - l.writer = os.Stderr - case "stdout": - l.writer = os.Stdout - case "syslog": - l.writer, err = gsyslog.NewLogger(gsyslog.LOG_ERR, "LOCAL0", "caddy") - if err != nil { - return err - } - default: - if address := parseSyslogAddress(l.Output); address != nil { - l.writer, err = gsyslog.DialLogger(address.network, address.address, gsyslog.LOG_ERR, "LOCAL0", "caddy") - - if err != nil { - return err - } - - break selectwriter - } - - var file *os.File - - file, err = os.OpenFile(l.Output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) - if err != nil { - return err - } - - if l.Roller != nil && !l.Roller.Disabled { - file.Close() - l.Roller.Filename = l.Output - l.writer = l.Roller.GetLogWriter() - } else { - l.writer = file - } - } - - l.Logger = log.New(l.writer, "", 0) - - return nil - -} - -// Close closes open log files or connections to syslog. -func (l *Logger) Close() error { - // don't close stdout or stderr - if l.writer == os.Stdout || l.writer == os.Stderr { - return nil - } - - // Will close local/remote syslog connections too :) - if closer, ok := l.writer.(io.WriteCloser); ok { - l.fileMu.Lock() - err := closer.Close() - l.fileMu.Unlock() - return err - } - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/middleware.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/middleware.go deleted file mode 100644 index d470811a9..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/middleware.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "fmt" - "net/http" - "os" - "path" - "time" - - "github.com/mholt/caddy" -) - -func init() { - initCaseSettings() -} - -type ( - // Middleware is the middle layer which represents the traditional - // idea of middleware: it chains one Handler to the next by being - // passed the next Handler in the chain. - Middleware func(Handler) Handler - - // ListenerMiddleware is similar to the Middleware type, except it - // chains one net.Listener to the next. - ListenerMiddleware func(caddy.Listener) caddy.Listener - - // Handler is like http.Handler except ServeHTTP may return a status - // code and/or error. - // - // If ServeHTTP writes the response header, it should return a status - // code of 0. This signals to other handlers before it that the response - // is already handled, and that they should not write to it also. Keep - // in mind that writing to the response body writes the header, too. - // - // If ServeHTTP encounters an error, it should return the error value - // so it can be logged by designated error-handling middleware. - // - // If writing a response after calling the next ServeHTTP method, the - // returned status code SHOULD be used when writing the response. - // - // If handling errors after calling the next ServeHTTP method, the - // returned error value SHOULD be logged or handled accordingly. - // - // Otherwise, return values should be propagated down the middleware - // chain by returning them unchanged. - Handler interface { - ServeHTTP(http.ResponseWriter, *http.Request) (int, error) - } - - // HandlerFunc is a convenience type like http.HandlerFunc, except - // ServeHTTP returns a status code and an error. See Handler - // documentation for more information. - HandlerFunc func(http.ResponseWriter, *http.Request) (int, error) - - // RequestMatcher checks to see if current request should be handled - // by underlying handler. - RequestMatcher interface { - Match(r *http.Request) bool - } - - // HandlerConfig is a middleware configuration. - // This makes it possible for middlewares to have a common - // configuration interface. - // - // TODO The long term plan is to get all middleware implement this - // interface for configurations. - HandlerConfig interface { - RequestMatcher - BasePath() string - } - - // ConfigSelector selects a configuration. - ConfigSelector []HandlerConfig -) - -// ServeHTTP implements the Handler interface. -func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - return f(w, r) -} - -// Select selects a Config. -// This chooses the config with the longest length. -func (c ConfigSelector) Select(r *http.Request) (config HandlerConfig) { - for i := range c { - if !c[i].Match(r) { - continue - } - if config == nil || len(c[i].BasePath()) > len(config.BasePath()) { - config = c[i] - } - } - return config -} - -// IndexFile looks for a file in /root/fpath/indexFile for each string -// in indexFiles. If an index file is found, it returns the root-relative -// path to the file and true. If no index file is found, empty string -// and false is returned. fpath must end in a forward slash '/' -// otherwise no index files will be tried (directory paths must end -// in a forward slash according to HTTP). -// -// All paths passed into and returned from this function use '/' as the -// path separator, just like URLs. IndexFle handles path manipulation -// internally for systems that use different path separators. -func IndexFile(root http.FileSystem, fpath string, indexFiles []string) (string, bool) { - if fpath[len(fpath)-1] != '/' || root == nil { - return "", false - } - for _, indexFile := range indexFiles { - // func (http.FileSystem).Open wants all paths separated by "/", - // regardless of operating system convention, so use - // path.Join instead of filepath.Join - fp := path.Join(fpath, indexFile) - f, err := root.Open(fp) - if err == nil { - f.Close() - return fp, true - } - } - return "", false -} - -// SetLastModifiedHeader checks if the provided modTime is valid and if it is sets it -// as a Last-Modified header to the ResponseWriter. If the modTime is in the future -// the current time is used instead. -func SetLastModifiedHeader(w http.ResponseWriter, modTime time.Time) { - if modTime.IsZero() || modTime.Equal(time.Unix(0, 0)) { - // the time does not appear to be valid. Don't put it in the response - return - } - - // RFC 2616 - Section 14.29 - Last-Modified: - // An origin server MUST NOT send a Last-Modified date which is later than the - // server's time of message origination. In such cases, where the resource's last - // modification would indicate some time in the future, the server MUST replace - // that date with the message origination date. - now := currentTime() - if modTime.After(now) { - modTime = now - } - - w.Header().Set("Last-Modified", modTime.UTC().Format(http.TimeFormat)) -} - -// CaseSensitivePath determines if paths should be case sensitive. -// This is configurable via CASE_SENSITIVE_PATH environment variable. -var CaseSensitivePath = false - -const caseSensitivePathEnv = "CASE_SENSITIVE_PATH" - -// initCaseSettings loads case sensitivity config from environment variable. -// -// This could have been in init, but init cannot be called from tests. -func initCaseSettings() { - switch os.Getenv(caseSensitivePathEnv) { - case "1", "true": - CaseSensitivePath = true - default: - CaseSensitivePath = false - } -} - -// MergeRequestMatchers merges multiple RequestMatchers into one. -// This allows a middleware to use multiple RequestMatchers. -func MergeRequestMatchers(matchers ...RequestMatcher) RequestMatcher { - return requestMatchers(matchers) -} - -type requestMatchers []RequestMatcher - -// Match satisfies RequestMatcher interface. -func (m requestMatchers) Match(r *http.Request) bool { - for _, matcher := range m { - if !matcher.Match(r) { - return false - } - } - return true -} - -// currentTime, as it is defined here, returns time.Now(). -// It's defined as a variable for mocking time in tests. -var currentTime = func() time.Time { return time.Now() } - -// EmptyNext is a no-op function that can be passed into -// Middleware functions so that the assignment to the -// Next field of the Handler can be tested. -// -// Used primarily for testing but needs to be exported so -// plugins can use this as a convenience. -var EmptyNext = HandlerFunc(func(w http.ResponseWriter, r *http.Request) (int, error) { return 0, nil }) - -// SameNext does a pointer comparison between next1 and next2. -// -// Used primarily for testing but needs to be exported so -// plugins can use this as a convenience. -func SameNext(next1, next2 Handler) bool { - return fmt.Sprintf("%v", next1) == fmt.Sprintf("%v", next2) -} - -// Context key constants. -const ( - // ReplacerCtxKey is the context key for a per-request replacer. - ReplacerCtxKey caddy.CtxKey = "replacer" - - // RemoteUserCtxKey is the key for the remote user of the request, if any (basicauth). - RemoteUserCtxKey caddy.CtxKey = "remote_user" - - // MitmCtxKey is the key for the result of MITM detection - MitmCtxKey caddy.CtxKey = "mitm" - - // RequestIDCtxKey is the key for the U4 UUID value - RequestIDCtxKey caddy.CtxKey = "request_id" -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/mitm.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/mitm.go deleted file mode 100644 index 6736610e9..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/mitm.go +++ /dev/null @@ -1,780 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "bytes" - "context" - "crypto/tls" - "io" - "net" - "net/http" - "strconv" - "strings" - "sync" - - "github.com/mholt/caddy/caddytls" - "github.com/mholt/caddy/telemetry" -) - -// tlsHandler is a http.Handler that will inject a value -// into the request context indicating if the TLS -// connection is likely being intercepted. -type tlsHandler struct { - next http.Handler - listener *tlsHelloListener - closeOnMITM bool // whether to close connection on MITM; TODO: expose through new directive -} - -// ServeHTTP checks the User-Agent. For the four main browsers (Chrome, -// Edge, Firefox, and Safari) indicated by the User-Agent, the properties -// of the TLS Client Hello will be compared. The context value "mitm" will -// be set to a value indicating if it is likely that the underlying TLS -// connection is being intercepted. -// -// Note that due to Microsoft's decision to intentionally make IE/Edge -// user agents obscure (and look like other browsers), this may offer -// less accuracy for IE/Edge clients. -// -// This MITM detection capability is based on research done by Durumeric, -// Halderman, et. al. in "The Security Impact of HTTPS Interception" (NDSS '17): -// https://jhalderm.com/pub/papers/interception-ndss17.pdf -func (h *tlsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // TODO: one request per connection, we should report UA in connection with - // handshake (reported in caddytls package) and our MITM assessment - - if h.listener == nil { - h.next.ServeHTTP(w, r) - return - } - - h.listener.helloInfosMu.RLock() - info := h.listener.helloInfos[r.RemoteAddr] - h.listener.helloInfosMu.RUnlock() - - ua := r.Header.Get("User-Agent") - uaHash := telemetry.FastHash([]byte(ua)) - - // report this request's UA in connection with this ClientHello - go telemetry.AppendUnique("tls_client_hello_ua:"+caddytls.ClientHelloInfo(info).Key(), uaHash) - - var checked, mitm bool - if r.Header.Get("X-BlueCoat-Via") != "" || // Blue Coat (masks User-Agent header to generic values) - r.Header.Get("X-FCCKV2") != "" || // Fortinet - info.advertisesHeartbeatSupport() { // no major browsers have ever implemented Heartbeat - // TODO: Move the heartbeat check into each "looksLike" function... - checked = true - mitm = true - } else if strings.Contains(ua, "Edge") || strings.Contains(ua, "MSIE") || - strings.Contains(ua, "Trident") { - checked = true - mitm = !info.looksLikeEdge() - } else if strings.Contains(ua, "Chrome") { - checked = true - mitm = !info.looksLikeChrome() - } else if strings.Contains(ua, "CriOS") { - // Chrome on iOS sometimes uses iOS-provided TLS stack (which looks exactly like Safari) - // but for connections that don't render a web page (favicon, etc.) it uses its own... - checked = true - mitm = !info.looksLikeChrome() && !info.looksLikeSafari() - } else if strings.Contains(ua, "Firefox") { - checked = true - if strings.Contains(ua, "Windows") { - ver := getVersion(ua, "Firefox") - if ver == 45.0 || ver == 52.0 { - mitm = !info.looksLikeTor() - } else { - mitm = !info.looksLikeFirefox() - } - } else { - mitm = !info.looksLikeFirefox() - } - } else if strings.Contains(ua, "Safari") { - checked = true - mitm = !info.looksLikeSafari() - } - - if checked { - r = r.WithContext(context.WithValue(r.Context(), MitmCtxKey, mitm)) - if mitm { - go telemetry.AppendUnique("http_mitm", "likely") - } else { - go telemetry.AppendUnique("http_mitm", "unlikely") - } - } else { - go telemetry.AppendUnique("http_mitm", "unknown") - } - - if mitm && h.closeOnMITM { - // TODO: This termination might need to happen later in the middleware - // chain in order to be picked up by the log directive, in case the site - // owner still wants to log this event. It'll probably require a new - // directive. If this feature is useful, we can finish implementing this. - r.Close = true - return - } - - h.next.ServeHTTP(w, r) -} - -// getVersion returns a (possibly simplified) representation of the version string -// from a UserAgent string. It returns a float, so it can represent major and minor -// versions; the rest of the version is just tacked on behind the decimal point. -// The purpose of this is to stay simple while allowing for basic, fast comparisons. -// If the version for softwareName is not found in ua, -1 is returned. -func getVersion(ua, softwareName string) float64 { - search := softwareName + "/" - start := strings.Index(ua, search) - if start < 0 { - return -1 - } - start += len(search) - end := strings.Index(ua[start:], " ") - if end < 0 { - end = len(ua) - } else { - end += start - } - strVer := strings.Replace(ua[start:end], "-", "", -1) - firstDot := strings.Index(strVer, ".") - if firstDot >= 0 { - strVer = strVer[:firstDot+1] + strings.Replace(strVer[firstDot+1:], ".", "", -1) - } - ver, err := strconv.ParseFloat(strVer, 64) - if err != nil { - return -1 - } - return ver -} - -// clientHelloConn reads the ClientHello -// and stores it in the attached listener. -type clientHelloConn struct { - net.Conn - listener *tlsHelloListener - readHello bool // whether ClientHello has been read - buf *bytes.Buffer -} - -// Read reads from c.Conn (by letting the standard library -// do the reading off the wire), with the exception of -// getting a copy of the ClientHello so it can parse it. -func (c *clientHelloConn) Read(b []byte) (n int, err error) { - // if we've already read the ClientHello, pass thru - if c.readHello { - return c.Conn.Read(b) - } - - // we let the standard lib read off the wire for us, and - // tee that into our buffer so we can read the ClientHello - tee := io.TeeReader(c.Conn, c.buf) - n, err = tee.Read(b) - if err != nil { - return - } - if c.buf.Len() < 5 { - return // need to read more bytes for header - } - - // read the header bytes - hdr := make([]byte, 5) - _, err = io.ReadFull(c.buf, hdr) - if err != nil { - return // this would be highly unusual and sad - } - - // get length of the ClientHello message and read it - length := int(uint16(hdr[3])<<8 | uint16(hdr[4])) - if c.buf.Len() < length { - return // need to read more bytes - } - hello := make([]byte, length) - _, err = io.ReadFull(c.buf, hello) - if err != nil { - return - } - bufpool.Put(c.buf) // buffer no longer needed - - // parse the ClientHello and store it in the map - rawParsed := parseRawClientHello(hello) - c.listener.helloInfosMu.Lock() - c.listener.helloInfos[c.Conn.RemoteAddr().String()] = rawParsed - c.listener.helloInfosMu.Unlock() - - // report this ClientHello to telemetry - chKey := caddytls.ClientHelloInfo(rawParsed).Key() - go telemetry.SetNested("tls_client_hello", chKey, rawParsed) - go telemetry.AppendUnique("tls_client_hello_count", chKey) - - c.readHello = true - return -} - -// parseRawClientHello parses data which contains the raw -// TLS Client Hello message. It extracts relevant information -// into info. Any error reading the Client Hello (such as -// insufficient length or invalid length values) results in -// a silent error and an incomplete info struct, since there -// is no good way to handle an error like this during Accept(). -// The data is expected to contain the whole ClientHello and -// ONLY the ClientHello. -// -// The majority of this code is borrowed from the Go standard -// library, which is (c) The Go Authors. It has been modified -// to fit this use case. -func parseRawClientHello(data []byte) (info rawHelloInfo) { - if len(data) < 42 { - return - } - info.Version = uint16(data[4])<<8 | uint16(data[5]) - sessionIDLen := int(data[38]) - if sessionIDLen > 32 || len(data) < 39+sessionIDLen { - return - } - data = data[39+sessionIDLen:] - if len(data) < 2 { - return - } - // cipherSuiteLen is the number of bytes of cipher suite numbers. Since - // they are uint16s, the number must be even. - cipherSuiteLen := int(data[0])<<8 | int(data[1]) - if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen { - return - } - numCipherSuites := cipherSuiteLen / 2 - // read in the cipher suites - info.CipherSuites = make([]uint16, numCipherSuites) - for i := 0; i < numCipherSuites; i++ { - info.CipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i]) - } - data = data[2+cipherSuiteLen:] - if len(data) < 1 { - return - } - // read in the compression methods - compressionMethodsLen := int(data[0]) - if len(data) < 1+compressionMethodsLen { - return - } - info.CompressionMethods = data[1 : 1+compressionMethodsLen] - - data = data[1+compressionMethodsLen:] - - // ClientHello is optionally followed by extension data - if len(data) < 2 { - return - } - extensionsLength := int(data[0])<<8 | int(data[1]) - data = data[2:] - if extensionsLength != len(data) { - return - } - - // read in each extension, and extract any relevant information - // from extensions we care about - for len(data) != 0 { - if len(data) < 4 { - return - } - extension := uint16(data[0])<<8 | uint16(data[1]) - length := int(data[2])<<8 | int(data[3]) - data = data[4:] - if len(data) < length { - return - } - - // record that the client advertised support for this extension - info.Extensions = append(info.Extensions, extension) - - switch extension { - case extensionSupportedCurves: - // http://tools.ietf.org/html/rfc4492#section-5.5.1 - if length < 2 { - return - } - l := int(data[0])<<8 | int(data[1]) - if l%2 == 1 || length != l+2 { - return - } - numCurves := l / 2 - info.Curves = make([]tls.CurveID, numCurves) - d := data[2:] - for i := 0; i < numCurves; i++ { - info.Curves[i] = tls.CurveID(d[0])<<8 | tls.CurveID(d[1]) - d = d[2:] - } - case extensionSupportedPoints: - // http://tools.ietf.org/html/rfc4492#section-5.5.2 - if length < 1 { - return - } - l := int(data[0]) - if length != l+1 { - return - } - info.Points = make([]uint8, l) - copy(info.Points, data[1:]) - } - - data = data[length:] - } - - return -} - -// newTLSListener returns a new tlsHelloListener that wraps ln. -func newTLSListener(ln net.Listener, config *tls.Config) *tlsHelloListener { - return &tlsHelloListener{ - Listener: ln, - config: config, - helloInfos: make(map[string]rawHelloInfo), - } -} - -// tlsHelloListener is a TLS listener that is specially designed -// to read the ClientHello manually so we can extract necessary -// information from it. Each ClientHello message is mapped by -// the remote address of the client, which must be removed when -// the connection is closed (use ConnState). -type tlsHelloListener struct { - net.Listener - config *tls.Config - helloInfos map[string]rawHelloInfo - helloInfosMu sync.RWMutex -} - -// Accept waits for and returns the next connection to the listener. -// After it accepts the underlying connection, it reads the -// ClientHello message and stores the parsed data into a map on l. -func (l *tlsHelloListener) Accept() (net.Conn, error) { - conn, err := l.Listener.Accept() - if err != nil { - return nil, err - } - buf := bufpool.Get().(*bytes.Buffer) - buf.Reset() - helloConn := &clientHelloConn{Conn: conn, listener: l, buf: buf} - return tls.Server(helloConn, l.config), nil -} - -// rawHelloInfo contains the "raw" data parsed from the TLS -// Client Hello. No interpretation is done on the raw data. -// -// The methods on this type implement heuristics described -// by Durumeric, Halderman, et. al. in -// "The Security Impact of HTTPS Interception": -// https://jhalderm.com/pub/papers/interception-ndss17.pdf -type rawHelloInfo caddytls.ClientHelloInfo - -// advertisesHeartbeatSupport returns true if info indicates -// that the client supports the Heartbeat extension. -func (info rawHelloInfo) advertisesHeartbeatSupport() bool { - for _, ext := range info.Extensions { - if ext == extensionHeartbeat { - return true - } - } - return false -} - -// looksLikeFirefox returns true if info looks like a handshake -// from a modern version of Firefox. -func (info rawHelloInfo) looksLikeFirefox() bool { - // "To determine whether a Firefox session has been - // intercepted, we check for the presence and order - // of extensions, cipher suites, elliptic curves, - // EC point formats, and handshake compression methods." (early 2016) - - // We check for the presence and order of the extensions. - // Note: Sometimes 0x15 (21, padding) is present, sometimes not. - // Note: Firefox 51+ does not advertise 0x3374 (13172, NPN). - // Note: Firefox doesn't advertise 0x0 (0, SNI) when connecting to IP addresses. - // Note: Firefox 55+ doesn't appear to advertise 0xFF03 (65283, short headers). It used to be between 5 and 13. - // Note: Firefox on Fedora (or RedHat) doesn't include ECC suites because of patent liability. - requiredExtensionsOrder := []uint16{23, 65281, 10, 11, 35, 16, 5, 13} - if !assertPresenceAndOrdering(requiredExtensionsOrder, info.Extensions, true) { - return false - } - - // We check for both presence of curves and their ordering. - requiredCurves := []tls.CurveID{29, 23, 24, 25} - if len(info.Curves) < len(requiredCurves) { - return false - } - for i := range requiredCurves { - if info.Curves[i] != requiredCurves[i] { - return false - } - } - if len(info.Curves) > len(requiredCurves) { - // newer Firefox (55 Nightly?) may have additional curves at end of list - allowedCurves := []tls.CurveID{256, 257} - for i := range allowedCurves { - if info.Curves[len(requiredCurves)+i] != allowedCurves[i] { - return false - } - } - } - - if hasGreaseCiphers(info.CipherSuites) { - return false - } - - // We check for order of cipher suites but not presence, since - // according to the paper, cipher suites may be not be added - // or reordered by the user, but they may be disabled. - expectedCipherSuiteOrder := []uint16{ - TLS_AES_128_GCM_SHA256, // 0x1301 - TLS_CHACHA20_POLY1305_SHA256, // 0x1303 - TLS_AES_256_GCM_SHA384, // 0x1302 - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, // 0xcca9 - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // 0xcca8 - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030 - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009 - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013 - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA, // 0x33 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA, // 0x39 - tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f - tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35 - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa - } - return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.CipherSuites, false) -} - -// looksLikeChrome returns true if info looks like a handshake -// from a modern version of Chrome. -func (info rawHelloInfo) looksLikeChrome() bool { - // "We check for ciphers and extensions that Chrome is known - // to not support, but do not check for the inclusion of - // specific ciphers or extensions, nor do we validate their - // order. When appropriate, we check the presence and order - // of elliptic curves, compression methods, and EC point formats." (early 2016) - - // Not in Chrome 56, but present in Safari 10 (Feb. 2017): - // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 (0xc024) - // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 (0xc023) - // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (0xc00a) - // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA (0xc009) - // TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 (0xc028) - // TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 (0xc027) - // TLS_RSA_WITH_AES_256_CBC_SHA256 (0x3d) - // TLS_RSA_WITH_AES_128_CBC_SHA256 (0x3c) - - // Not in Chrome 56, but present in Firefox 51 (Feb. 2017): - // TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA (0xc00a) - // TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA (0xc009) - // TLS_DHE_RSA_WITH_AES_128_CBC_SHA (0x33) - // TLS_DHE_RSA_WITH_AES_256_CBC_SHA (0x39) - - // Selected ciphers present in Chrome mobile (Feb. 2017): - // 0xc00a, 0xc014, 0xc009, 0x9c, 0x9d, 0x2f, 0x35, 0xa - - chromeCipherExclusions := map[uint16]struct{}{ - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384: {}, // 0xc024 - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: {}, // 0xc023 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384: {}, // 0xc028 - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: {}, // 0xc027 - TLS_RSA_WITH_AES_256_CBC_SHA256: {}, // 0x3d - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: {}, // 0x3c - TLS_DHE_RSA_WITH_AES_128_CBC_SHA: {}, // 0x33 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA: {}, // 0x39 - } - for _, ext := range info.CipherSuites { - if _, ok := chromeCipherExclusions[ext]; ok { - return false - } - } - - // Chrome does not include curve 25 (CurveP521) (as of Chrome 56, Feb. 2017). - for _, curve := range info.Curves { - if curve == 25 { - return false - } - } - - if !hasGreaseCiphers(info.CipherSuites) { - return false - } - - return true -} - -// looksLikeEdge returns true if info looks like a handshake -// from a modern version of MS Edge. -func (info rawHelloInfo) looksLikeEdge() bool { - // "SChannel connections can by uniquely identified because SChannel - // is the only TLS library we tested that includes the OCSP status - // request extension before the supported groups and EC point formats - // extensions." (early 2016) - // - // More specifically, the OCSP status request extension appears - // *directly* before the other two extensions, which occur in that - // order. (I contacted the authors for clarification and verified it.) - for i, ext := range info.Extensions { - if ext == extensionOCSPStatusRequest { - if len(info.Extensions) <= i+2 { - return false - } - if info.Extensions[i+1] != extensionSupportedCurves || - info.Extensions[i+2] != extensionSupportedPoints { - return false - } - } - } - - for _, cs := range info.CipherSuites { - // As of Feb. 2017, Edge does not have 0xff, but Avast adds it - if cs == scsvRenegotiation { - return false - } - // Edge and modern IE do not have 0x4 or 0x5, but Blue Coat does - if cs == TLS_RSA_WITH_RC4_128_MD5 || cs == tls.TLS_RSA_WITH_RC4_128_SHA { - return false - } - } - - if hasGreaseCiphers(info.CipherSuites) { - return false - } - - return true -} - -// looksLikeSafari returns true if info looks like a handshake -// from a modern version of MS Safari. -func (info rawHelloInfo) looksLikeSafari() bool { - // "One unique aspect of Secure Transport is that it includes - // the TLS_EMPTY_RENEGOTIATION_INFO_SCSV (0xff) cipher first, - // whereas the other libraries we investigated include the - // cipher last. Similar to Microsoft, Apple has changed - // TLS behavior in minor OS updates, which are not indicated - // in the HTTP User-Agent header. We allow for any of the - // updates when validating handshakes, and we check for the - // presence and ordering of ciphers, extensions, elliptic - // curves, and compression methods." (early 2016) - - // Note that any C lib (e.g. curl) compiled on macOS - // will probably use Secure Transport which will also - // share the TLS handshake characteristics of Safari. - - // We check for the presence and order of the extensions. - requiredExtensionsOrder := []uint16{10, 11, 13, 13172, 16, 5, 18, 23} - if !assertPresenceAndOrdering(requiredExtensionsOrder, info.Extensions, true) { - // Safari on iOS 11 (beta) uses different set/ordering of extensions - requiredExtensionsOrderiOS11 := []uint16{65281, 0, 23, 13, 5, 13172, 18, 16, 11, 10} - if !assertPresenceAndOrdering(requiredExtensionsOrderiOS11, info.Extensions, true) { - return false - } - } else { - // For these versions of Safari, expect TLS_EMPTY_RENEGOTIATION_INFO_SCSV first. - if len(info.CipherSuites) < 1 { - return false - } - if info.CipherSuites[0] != scsvRenegotiation { - return false - } - } - - if hasGreaseCiphers(info.CipherSuites) { - return false - } - - // We check for order and presence of cipher suites - expectedCipherSuiteOrder := []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, // 0xc024 - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // 0xc023 - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009 - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030 - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, // 0xc028 - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // 0xc027 - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014 - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013 - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // 0x9d - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // 0x9c - TLS_RSA_WITH_AES_256_CBC_SHA256, // 0x3d - tls.TLS_RSA_WITH_AES_128_CBC_SHA256, // 0x3c - tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35 - tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f - } - return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.CipherSuites, true) -} - -// looksLikeTor returns true if the info looks like a ClientHello from Tor browser -// (based on Firefox). -func (info rawHelloInfo) looksLikeTor() bool { - requiredExtensionsOrder := []uint16{10, 11, 16, 5, 13} - if !assertPresenceAndOrdering(requiredExtensionsOrder, info.Extensions, true) { - return false - } - - // check for session tickets support; Tor doesn't support them to prevent tracking - for _, ext := range info.Extensions { - if ext == 35 { - return false - } - } - - // We check for both presence of curves and their ordering, including - // an optional curve at the beginning (for Tor based on Firefox 52) - infoCurves := info.Curves - if len(info.Curves) == 4 { - if info.Curves[0] != 29 { - return false - } - infoCurves = info.Curves[1:] - } - requiredCurves := []tls.CurveID{23, 24, 25} - if len(infoCurves) < len(requiredCurves) { - return false - } - for i := range requiredCurves { - if infoCurves[i] != requiredCurves[i] { - return false - } - } - - if hasGreaseCiphers(info.CipherSuites) { - return false - } - - // We check for order of cipher suites but not presence, since - // according to the paper, cipher suites may be not be added - // or reordered by the user, but they may be disabled. - expectedCipherSuiteOrder := []uint16{ - TLS_AES_128_GCM_SHA256, // 0x1301 - TLS_CHACHA20_POLY1305_SHA256, // 0x1303 - TLS_AES_256_GCM_SHA384, // 0x1302 - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, // 0xc02b - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // 0xc02f - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, // 0xcca9 - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, // 0xcca8 - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, // 0xc02c - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, // 0xc030 - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // 0xc00a - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // 0xc009 - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // 0xc013 - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // 0xc014 - TLS_DHE_RSA_WITH_AES_128_CBC_SHA, // 0x33 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA, // 0x39 - tls.TLS_RSA_WITH_AES_128_CBC_SHA, // 0x2f - tls.TLS_RSA_WITH_AES_256_CBC_SHA, // 0x35 - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // 0xa - } - return assertPresenceAndOrdering(expectedCipherSuiteOrder, info.CipherSuites, false) -} - -// assertPresenceAndOrdering will return true if candidateList contains -// the items in requiredItems in the same order as requiredItems. -// -// If requiredIsSubset is true, then all items in requiredItems must be -// present in candidateList. If requiredIsSubset is false, then requiredItems -// may contain items that are not in candidateList. -// -// In all cases, the order of requiredItems is enforced. -func assertPresenceAndOrdering(requiredItems, candidateList []uint16, requiredIsSubset bool) bool { - superset := requiredItems - subset := candidateList - if requiredIsSubset { - superset = candidateList - subset = requiredItems - } - - var j int - for _, item := range subset { - var found bool - for j < len(superset) { - if superset[j] == item { - found = true - break - } - j++ - } - if j == len(superset) && !found { - return false - } - } - return true -} - -func hasGreaseCiphers(cipherSuites []uint16) bool { - for _, cipher := range cipherSuites { - if _, ok := greaseCiphers[cipher]; ok { - return true - } - } - return false -} - -// pool buffers so we can reuse allocations over time -var bufpool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -var greaseCiphers = map[uint16]struct{}{ - 0x0A0A: {}, - 0x1A1A: {}, - 0x2A2A: {}, - 0x3A3A: {}, - 0x4A4A: {}, - 0x5A5A: {}, - 0x6A6A: {}, - 0x7A7A: {}, - 0x8A8A: {}, - 0x9A9A: {}, - 0xAAAA: {}, - 0xBABA: {}, - 0xCACA: {}, - 0xDADA: {}, - 0xEAEA: {}, - 0xFAFA: {}, -} - -// Define variables used for TLS communication -const ( - extensionOCSPStatusRequest = 5 - extensionSupportedCurves = 10 // also called "SupportedGroups" - extensionSupportedPoints = 11 - extensionHeartbeat = 15 - - scsvRenegotiation = 0xff - - // cipher suites missing from the crypto/tls package, - // in no particular order here - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xc024 - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xc028 - TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x3d - TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x33 - TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x39 - TLS_RSA_WITH_RC4_128_MD5 = 0x4 - - // new PSK ciphers introduced by TLS 1.3, not (yet) in crypto/tls - // https://tlswg.github.io/tls13-spec/#rfc.appendix.A.4) - TLS_AES_128_GCM_SHA256 = 0x1301 - TLS_AES_256_GCM_SHA384 = 0x1302 - TLS_CHACHA20_POLY1305_SHA256 = 0x1303 - TLS_AES_128_CCM_SHA256 = 0x1304 - TLS_AES_128_CCM_8_SHA256 = 0x1305 -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/path.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/path.go deleted file mode 100644 index 524bb0252..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/path.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "net/http" - "path" - "strings" -) - -// Path represents a URI path. It should usually be -// set to the value of a request path. -type Path string - -// Matches checks to see if base matches p. The correct -// usage of this method sets p as the request path, and -// base as a Caddyfile (user-defined) rule path. -// -// Path matching will probably not always be a direct -// comparison; this method assures that paths can be -// easily and consistently matched. -// -// Multiple slashes are collapsed/merged. See issue #1859. -func (p Path) Matches(base string) bool { - if base == "/" || base == "" { - return true - } - - // sanitize the paths for comparison, very important - // (slightly lossy if the base path requires multiple - // consecutive forward slashes, since those will be merged) - pHasTrailingSlash := strings.HasSuffix(string(p), "/") - baseHasTrailingSlash := strings.HasSuffix(base, "/") - p = Path(path.Clean(string(p))) - base = path.Clean(base) - if pHasTrailingSlash { - p += "/" - } - if baseHasTrailingSlash { - base += "/" - } - - if CaseSensitivePath { - return strings.HasPrefix(string(p), base) - } - return strings.HasPrefix(strings.ToLower(string(p)), strings.ToLower(base)) -} - -// PathMatcher is a Path RequestMatcher. -type PathMatcher string - -// Match satisfies RequestMatcher. -func (p PathMatcher) Match(r *http.Request) bool { - return Path(r.URL.Path).Matches(string(p)) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/plugin.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/plugin.go deleted file mode 100644 index 58dbfd50d..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/plugin.go +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "crypto/tls" - "flag" - "fmt" - "log" - "net" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyfile" - "github.com/mholt/caddy/caddyhttp/staticfiles" - "github.com/mholt/caddy/caddytls" - "github.com/mholt/caddy/telemetry" - "github.com/mholt/certmagic" -) - -const serverType = "http" - -func init() { - flag.StringVar(&HTTPPort, "http-port", HTTPPort, "Default port to use for HTTP") - flag.StringVar(&HTTPSPort, "https-port", HTTPSPort, "Default port to use for HTTPS") - flag.StringVar(&Host, "host", DefaultHost, "Default host") - flag.StringVar(&Port, "port", DefaultPort, "Default port") - flag.StringVar(&Root, "root", DefaultRoot, "Root path of default site") - flag.DurationVar(&GracefulTimeout, "grace", 5*time.Second, "Maximum duration of graceful shutdown") - flag.BoolVar(&HTTP2, "http2", true, "Use HTTP/2") - flag.BoolVar(&QUIC, "quic", false, "Use experimental QUIC") - - caddy.RegisterServerType(serverType, caddy.ServerType{ - Directives: func() []string { return directives }, - DefaultInput: func() caddy.Input { - if Port == DefaultPort && Host != "" { - // by leaving the port blank in this case we give auto HTTPS - // a chance to set the port to 443 for us - return caddy.CaddyfileInput{ - Contents: []byte(fmt.Sprintf("%s\nroot %s", Host, Root)), - ServerTypeName: serverType, - } - } - return caddy.CaddyfileInput{ - Contents: []byte(fmt.Sprintf("%s:%s\nroot %s", Host, Port, Root)), - ServerTypeName: serverType, - } - }, - NewContext: newContext, - }) - caddy.RegisterCaddyfileLoader("short", caddy.LoaderFunc(shortCaddyfileLoader)) - caddy.RegisterParsingCallback(serverType, "root", hideCaddyfile) - caddy.RegisterParsingCallback(serverType, "tls", activateHTTPS) - caddytls.RegisterConfigGetter(serverType, func(c *caddy.Controller) *caddytls.Config { return GetConfig(c).TLS }) - - // disable the caddytls package reporting ClientHellos - // to telemetry, since our MITM detector does this but - // with more information than the standard lib provides - // (as of May 2018) - caddytls.ClientHelloTelemetry = false -} - -// hideCaddyfile hides the source/origin Caddyfile if it is within the -// site root. This function should be run after parsing the root directive. -func hideCaddyfile(cctx caddy.Context) error { - ctx := cctx.(*httpContext) - for _, cfg := range ctx.siteConfigs { - // if no Caddyfile exists exit. - if cfg.originCaddyfile == "" { - return nil - } - absRoot, err := filepath.Abs(cfg.Root) - if err != nil { - return err - } - absOriginCaddyfile, err := filepath.Abs(cfg.originCaddyfile) - if err != nil { - return err - } - if strings.HasPrefix(absOriginCaddyfile, absRoot) { - cfg.HiddenFiles = append(cfg.HiddenFiles, filepath.ToSlash(strings.TrimPrefix(absOriginCaddyfile, absRoot))) - } - } - return nil -} - -func newContext(inst *caddy.Instance) caddy.Context { - return &httpContext{instance: inst, keysToSiteConfigs: make(map[string]*SiteConfig)} -} - -type httpContext struct { - instance *caddy.Instance - - // keysToSiteConfigs maps an address at the top of a - // server block (a "key") to its SiteConfig. Not all - // SiteConfigs will be represented here, only ones - // that appeared in the Caddyfile. - keysToSiteConfigs map[string]*SiteConfig - - // siteConfigs is the master list of all site configs. - siteConfigs []*SiteConfig -} - -func (h *httpContext) saveConfig(key string, cfg *SiteConfig) { - h.siteConfigs = append(h.siteConfigs, cfg) - h.keysToSiteConfigs[key] = cfg -} - -// InspectServerBlocks make sure that everything checks out before -// executing directives and otherwise prepares the directives to -// be parsed and executed. -func (h *httpContext) InspectServerBlocks(sourceFile string, serverBlocks []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) { - siteAddrs := make(map[string]string) - - // For each address in each server block, make a new config - for _, sb := range serverBlocks { - for _, key := range sb.Keys { - addr, err := standardizeAddress(key) - if err != nil { - return serverBlocks, err - } - - addr = addr.Normalize() - key = addr.Key() - if _, dup := h.keysToSiteConfigs[key]; dup { - return serverBlocks, fmt.Errorf("duplicate site key: %s", key) - } - - // Fill in address components from command line so that middleware - // have access to the correct information during setup - if addr.Host == "" && Host != DefaultHost { - addr.Host = Host - } - if addr.Port == "" && Port != DefaultPort { - addr.Port = Port - } - - // Make sure the adjusted site address is distinct - addrCopy := addr // make copy so we don't disturb the original, carefully-parsed address struct - if addrCopy.Port == "" && Port == DefaultPort { - addrCopy.Port = Port - } - addrStr := addrCopy.String() - if otherSiteKey, dup := siteAddrs[addrStr]; dup { - err := fmt.Errorf("duplicate site address: %s", addrStr) - if (addrCopy.Host == Host && Host != DefaultHost) || - (addrCopy.Port == Port && Port != DefaultPort) { - err = fmt.Errorf("site defined as %s is a duplicate of %s because of modified "+ - "default host and/or port values (usually via -host or -port flags)", key, otherSiteKey) - } - return serverBlocks, err - } - siteAddrs[addrStr] = key - - // If default HTTP or HTTPS ports have been customized, - // make sure the ACME challenge ports match - var altHTTPPort, altTLSALPNPort int - if HTTPPort != DefaultHTTPPort { - portInt, err := strconv.Atoi(HTTPPort) - if err != nil { - return nil, err - } - altHTTPPort = portInt - } - if HTTPSPort != DefaultHTTPSPort { - portInt, err := strconv.Atoi(HTTPSPort) - if err != nil { - return nil, err - } - altTLSALPNPort = portInt - } - - // Make our caddytls.Config, which has a pointer to the - // instance's certificate cache and enough information - // to use automatic HTTPS when the time comes - caddytlsConfig, err := caddytls.NewConfig(h.instance) - if err != nil { - return nil, fmt.Errorf("creating new caddytls configuration: %v", err) - } - caddytlsConfig.Hostname = addr.Host - caddytlsConfig.Manager.AltHTTPPort = altHTTPPort - caddytlsConfig.Manager.AltTLSALPNPort = altTLSALPNPort - - // Save the config to our master list, and key it for lookups - cfg := &SiteConfig{ - Addr: addr, - Root: Root, - TLS: caddytlsConfig, - originCaddyfile: sourceFile, - IndexPages: staticfiles.DefaultIndexPages, - } - h.saveConfig(key, cfg) - } - } - - // For sites that have gzip (which gets chained in - // before the error handler) we should ensure that the - // errors directive also appears so error pages aren't - // written after the gzip writer is closed. See #616. - for _, sb := range serverBlocks { - _, hasGzip := sb.Tokens["gzip"] - _, hasErrors := sb.Tokens["errors"] - if hasGzip && !hasErrors { - sb.Tokens["errors"] = []caddyfile.Token{{Text: "errors"}} - } - } - - return serverBlocks, nil -} - -// MakeServers uses the newly-created siteConfigs to -// create and return a list of server instances. -func (h *httpContext) MakeServers() ([]caddy.Server, error) { - // make a rough estimate as to whether we're in a "production - // environment/system" - start by assuming that most production - // servers will set their default CA endpoint to a public, - // trusted CA (obviously not a perfect heuristic) - var looksLikeProductionCA bool - for _, publicCAEndpoint := range caddytls.KnownACMECAs { - if strings.Contains(certmagic.Default.CA, publicCAEndpoint) { - looksLikeProductionCA = true - break - } - } - - // Iterate each site configuration and make sure that: - // 1) TLS is disabled for explicitly-HTTP sites (necessary - // when an HTTP address shares a block containing tls) - // 2) if QUIC is enabled, TLS ClientAuth is not, because - // currently, QUIC does not support ClientAuth (TODO: - // revisit this when our QUIC implementation supports it) - // 3) if TLS ClientAuth is used, StrictHostMatching is on - var atLeastOneSiteLooksLikeProduction bool - for _, cfg := range h.siteConfigs { - // see if all the addresses (both sites and - // listeners) are loopback to help us determine - // if this is a "production" instance or not - if !atLeastOneSiteLooksLikeProduction { - if !caddy.IsLoopback(cfg.Addr.Host) && - !caddy.IsLoopback(cfg.ListenHost) && - (caddytls.QualifiesForManagedTLS(cfg) || - certmagic.HostQualifies(cfg.Addr.Host)) { - atLeastOneSiteLooksLikeProduction = true - } - } - - // make sure TLS is disabled for explicitly-HTTP sites - // (necessary when HTTP address shares a block containing tls) - if !cfg.TLS.Enabled { - continue - } - if cfg.Addr.Port == HTTPPort || cfg.Addr.Scheme == "http" { - cfg.TLS.Enabled = false - log.Printf("[WARNING] TLS disabled for %s", cfg.Addr) - } else if cfg.Addr.Scheme == "" { - // set scheme to https ourselves, since TLS is enabled - // and it was not explicitly set to something else. this - // makes it appear as "https" when we print the list of - // running sites; otherwise "http" would be assumed which - // is incorrect for this site. - cfg.Addr.Scheme = "https" - } - if cfg.Addr.Port == "" && ((!cfg.TLS.Manual && !cfg.TLS.SelfSigned) || cfg.TLS.Manager.OnDemand != nil) { - // this is vital, otherwise the function call below that - // sets the listener address will use the default port - // instead of 443 because it doesn't know about TLS. - cfg.Addr.Port = HTTPSPort - } - if cfg.TLS.ClientAuth != tls.NoClientCert { - if QUIC { - return nil, fmt.Errorf("cannot enable TLS client authentication with QUIC, because QUIC does not yet support it") - } - // this must be enabled so that a client cannot connect - // using SNI for another site on this listener that - // does NOT require ClientAuth, and then send HTTP - // requests with the Host header of this site which DOES - // require client auth, thus bypassing it... - cfg.StrictHostMatching = true - } - } - - // we must map (group) each config to a bind address - groups, err := groupSiteConfigsByListenAddr(h.siteConfigs) - if err != nil { - return nil, err - } - - // then we create a server for each group - var servers []caddy.Server - for addr, group := range groups { - s, err := NewServer(addr, group) - if err != nil { - return nil, err - } - servers = append(servers, s) - } - - // NOTE: This value is only a "good guess". Quite often, development - // environments will use internal DNS or a local hosts file to serve - // real-looking domains in local development. We can't easily tell - // which without doing a DNS lookup, so this guess is definitely naive, - // and if we ever want a better guess, we will have to do DNS lookups. - deploymentGuess := "dev" - if looksLikeProductionCA && atLeastOneSiteLooksLikeProduction { - deploymentGuess = "prod" - } - telemetry.Set("http_deployment_guess", deploymentGuess) - telemetry.Set("http_num_sites", len(h.siteConfigs)) - - return servers, nil -} - -// normalizedKey returns "normalized" key representation: -// scheme and host names are lowered, everything else stays the same -func normalizedKey(key string) string { - addr, err := standardizeAddress(key) - if err != nil { - return key - } - return addr.Normalize().Key() -} - -// GetConfig gets the SiteConfig that corresponds to c. -// If none exist (should only happen in tests), then a -// new, empty one will be created. -func GetConfig(c *caddy.Controller) *SiteConfig { - ctx := c.Context().(*httpContext) - key := normalizedKey(c.Key) - if cfg, ok := ctx.keysToSiteConfigs[key]; ok { - return cfg - } - // we should only get here during tests because directive - // actions typically skip the server blocks where we make - // the configs - cfg := &SiteConfig{ - Root: Root, - TLS: &caddytls.Config{Manager: certmagic.NewDefault()}, - IndexPages: staticfiles.DefaultIndexPages, - } - ctx.saveConfig(key, cfg) - return cfg -} - -// shortCaddyfileLoader loads a Caddyfile if positional arguments are -// detected, or, in other words, if un-named arguments are provided to -// the program. A "short Caddyfile" is one in which each argument -// is a line of the Caddyfile. The default host and port are prepended -// according to the Host and Port values. -func shortCaddyfileLoader(serverType string) (caddy.Input, error) { - if flag.NArg() > 0 && serverType == "http" { - confBody := fmt.Sprintf("%s:%s\n%s", Host, Port, strings.Join(flag.Args(), "\n")) - return caddy.CaddyfileInput{ - Contents: []byte(confBody), - Filepath: "args", - ServerTypeName: serverType, - }, nil - } - return nil, nil -} - -// groupSiteConfigsByListenAddr groups site configs by their listen -// (bind) address, so sites that use the same listener can be served -// on the same server instance. The return value maps the listen -// address (what you pass into net.Listen) to the list of site configs. -// This function does NOT vet the configs to ensure they are compatible. -func groupSiteConfigsByListenAddr(configs []*SiteConfig) (map[string][]*SiteConfig, error) { - groups := make(map[string][]*SiteConfig) - - for _, conf := range configs { - // We would add a special case here so that localhost addresses - // bind to 127.0.0.1 if conf.ListenHost is not already set, which - // would prevent outsiders from even connecting; but that was problematic: - // https://caddy.community/t/wildcard-virtual-domains-with-wildcard-roots/221/5?u=matt - - if conf.Addr.Port == "" { - conf.Addr.Port = Port - } - addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(conf.ListenHost, conf.Addr.Port)) - if err != nil { - return nil, err - } - addrstr := addr.String() - groups[addrstr] = append(groups[addrstr], conf) - } - - return groups, nil -} - -// Address represents a site address. It contains -// the original input value, and the component -// parts of an address. The component parts may be -// updated to the correct values as setup proceeds, -// but the original value should never be changed. -// -// The Host field must be in a normalized form. -type Address struct { - Original, Scheme, Host, Port, Path string -} - -// String returns a human-friendly print of the address. -func (a Address) String() string { - if a.Host == "" && a.Port == "" { - return "" - } - scheme := a.Scheme - if scheme == "" { - if a.Port == HTTPSPort { - scheme = "https" - } else { - scheme = "http" - } - } - s := scheme - if s != "" { - s += "://" - } - if a.Port != "" && - ((scheme == "https" && a.Port != DefaultHTTPSPort) || - (scheme == "http" && a.Port != DefaultHTTPPort)) { - s += net.JoinHostPort(a.Host, a.Port) - } else { - s += a.Host - } - if a.Path != "" { - s += a.Path - } - return s -} - -// VHost returns a sensible concatenation of Host:Port/Path from a. -// It's basically the a.Original but without the scheme. -func (a Address) VHost() string { - if idx := strings.Index(a.Original, "://"); idx > -1 { - return a.Original[idx+3:] - } - return a.Original -} - -// Normalize normalizes URL: turn scheme and host names into lower case -func (a Address) Normalize() Address { - path := a.Path - if !CaseSensitivePath { - path = strings.ToLower(path) - } - - // ensure host is normalized if it's an IP address - host := a.Host - if ip := net.ParseIP(host); ip != nil { - host = ip.String() - } - - return Address{ - Original: a.Original, - Scheme: strings.ToLower(a.Scheme), - Host: strings.ToLower(host), - Port: a.Port, - Path: path, - } -} - -// Key is similar to String, just replaces scheme and host values with modified values. -// Unlike String it doesn't add anything default (scheme, port, etc) -func (a Address) Key() string { - res := "" - if a.Scheme != "" { - res += a.Scheme + "://" - } - if a.Host != "" { - res += a.Host - } - if a.Port != "" { - if strings.HasPrefix(a.Original[len(res):], ":"+a.Port) { - // insert port only if the original has its own explicit port - res += ":" + a.Port - } - } - if a.Path != "" { - res += a.Path - } - return res -} - -// standardizeAddress parses an address string into a structured format with separate -// scheme, host, port, and path portions, as well as the original input string. -func standardizeAddress(str string) (Address, error) { - input := str - - // Split input into components (prepend with // to assert host by default) - if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") { - str = "//" + str - } - u, err := url.Parse(str) - if err != nil { - return Address{}, err - } - - // separate host and port - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - host, port, err = net.SplitHostPort(u.Host + ":") - if err != nil { - host = u.Host - } - } - - // see if we can set port based off scheme - if port == "" { - if u.Scheme == "http" { - port = HTTPPort - } else if u.Scheme == "https" { - port = HTTPSPort - } - } - - // repeated or conflicting scheme is confusing, so error - if u.Scheme != "" && (port == "http" || port == "https") { - return Address{}, fmt.Errorf("[%s] scheme specified twice in address", input) - } - - // error if scheme and port combination violate convention - if (u.Scheme == "http" && port == HTTPSPort) || (u.Scheme == "https" && port == HTTPPort) { - return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input) - } - - // standardize http and https ports to their respective port numbers - if port == "http" { - u.Scheme = "http" - port = HTTPPort - } else if port == "https" { - u.Scheme = "https" - port = HTTPSPort - } - - return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err -} - -// RegisterDevDirective splices name into the list of directives -// immediately before another directive. This function is ONLY -// for plugin development purposes! NEVER use it for a plugin -// that you are not currently building. If before is empty, -// the directive will be appended to the end of the list. -// -// It is imperative that directives execute in the proper -// order, and hard-coding the list of directives guarantees -// a correct, absolute order every time. This function is -// convenient when developing a plugin, but it does not -// guarantee absolute ordering. Multiple plugins registering -// directives with this function will lead to non- -// deterministic builds and buggy software. -// -// Directive names must be lower-cased and unique. Any errors -// here are fatal, and even successful calls print a message -// to stdout as a reminder to use it only in development. -func RegisterDevDirective(name, before string) { - if name == "" { - fmt.Println("[FATAL] Cannot register empty directive name") - os.Exit(1) - } - if strings.ToLower(name) != name { - fmt.Printf("[FATAL] %s: directive name must be lowercase\n", name) - os.Exit(1) - } - for _, dir := range directives { - if dir == name { - fmt.Printf("[FATAL] %s: directive name already exists\n", name) - os.Exit(1) - } - } - if before == "" { - directives = append(directives, name) - } else { - var found bool - for i, dir := range directives { - if dir == before { - directives = append(directives[:i], append([]string{name}, directives[i:]...)...) - found = true - break - } - } - if !found { - fmt.Printf("[FATAL] %s: directive not found\n", before) - os.Exit(1) - } - } - msg := fmt.Sprintf("Registered directive '%s' ", name) - if before == "" { - msg += "at end of list" - } else { - msg += fmt.Sprintf("before '%s'", before) - } - fmt.Printf("[DEV NOTICE] %s\n", msg) -} - -// directives is the list of all directives known to exist for the -// http server type, including non-standard (3rd-party) directives. -// The ordering of this list is important. -var directives = []string{ - // primitive actions that set up the fundamental vitals of each config - "root", - "index", - "bind", - "limits", - "timeouts", - "tls", - - // services/utilities, or other directives that don't necessarily inject handlers - "startup", // TODO: Deprecate this directive - "shutdown", // TODO: Deprecate this directive - "on", - "supervisor", // github.com/lucaslorentz/caddy-supervisor - "request_id", - "realip", // github.com/captncraig/caddy-realip - "git", // github.com/abiosoft/caddy-git - - // directives that add listener middleware to the stack - "proxyprotocol", // github.com/mastercactapus/caddy-proxyprotocol - - // directives that add middleware to the stack - "locale", // github.com/simia-tech/caddy-locale - "log", - "cache", // github.com/nicolasazrak/caddy-cache - "rewrite", - "ext", - "minify", // github.com/hacdias/caddy-minify - "gzip", - "header", - "geoip", // github.com/kodnaplakal/caddy-geoip - "errors", - "authz", // github.com/casbin/caddy-authz - "filter", // github.com/echocat/caddy-filter - "ipfilter", // github.com/pyed/ipfilter - "ratelimit", // github.com/xuqingfeng/caddy-rate-limit - "expires", // github.com/epicagency/caddy-expires - "forwardproxy", // github.com/caddyserver/forwardproxy - "basicauth", - "redir", - "status", - "cors", // github.com/captncraig/cors/caddy - "s3browser", // github.com/techknowlogick/caddy-s3browser - "nobots", // github.com/Xumeiquer/nobots - "mime", - "login", // github.com/tarent/loginsrv/caddy - "reauth", // github.com/freman/caddy-reauth - "extauth", // github.com/BTBurke/caddy-extauth - "jwt", // github.com/BTBurke/caddy-jwt - "jsonp", // github.com/pschlump/caddy-jsonp - "upload", // blitznote.com/src/caddy.upload - "multipass", // github.com/namsral/multipass/caddy - "internal", - "pprof", - "expvar", - "push", - "datadog", // github.com/payintech/caddy-datadog - "prometheus", // github.com/miekg/caddy-prometheus - "templates", - "proxy", - "fastcgi", - "cgi", // github.com/jung-kurt/caddy-cgi - "websocket", - "filebrowser", // github.com/filebrowser/caddy - "webdav", // github.com/hacdias/caddy-webdav - "markdown", - "browse", - "mailout", // github.com/SchumacherFM/mailout - "awses", // github.com/miquella/caddy-awses - "awslambda", // github.com/coopernurse/caddy-awslambda - "grpc", // github.com/pieterlouw/caddy-grpc - "gopkg", // github.com/zikes/gopkg - "restic", // github.com/restic/caddy - "wkd", // github.com/emersion/caddy-wkd - "dyndns", // github.com/linkonoid/caddy-dyndns -} - -const ( - // DefaultHost is the default host. - DefaultHost = "" - // DefaultPort is the default port. - DefaultPort = "2015" - // DefaultRoot is the default root folder. - DefaultRoot = "." - // DefaultHTTPPort is the default port for HTTP. - DefaultHTTPPort = "80" - // DefaultHTTPSPort is the default port for HTTPS. - DefaultHTTPSPort = "443" -) - -// These "soft defaults" are configurable by -// command line flags, etc. -var ( - // Root is the site root - Root = DefaultRoot - - // Host is the site host - Host = DefaultHost - - // Port is the site port - Port = DefaultPort - - // GracefulTimeout is the maximum duration of a graceful shutdown. - GracefulTimeout time.Duration - - // HTTP2 indicates whether HTTP2 is enabled or not. - HTTP2 bool - - // QUIC indicates whether QUIC is enabled or not. - QUIC bool - - // HTTPPort is the port to use for HTTP. - HTTPPort = DefaultHTTPPort - - // HTTPSPort is the port to use for HTTPS. - HTTPSPort = DefaultHTTPSPort -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/recorder.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/recorder.go deleted file mode 100644 index e59b5c288..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/recorder.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "bytes" - "io" - "net/http" - "sync" - "time" -) - -// ResponseRecorder is a type of http.ResponseWriter that captures -// the status code written to it and also the size of the body -// written in the response. A status code does not have -// to be written, however, in which case 200 must be assumed. -// It is best to have the constructor initialize this type -// with that default status code. -// -// Setting the Replacer field allows middlewares to type-assert -// the http.ResponseWriter to ResponseRecorder and set their own -// placeholder values for logging utilities to use. -// -// Beware when accessing the Replacer value; it may be nil! -type ResponseRecorder struct { - *ResponseWriterWrapper - Replacer Replacer - status int - size int - start time.Time -} - -// NewResponseRecorder makes and returns a new ResponseRecorder. -// Because a status is not set unless WriteHeader is called -// explicitly, this constructor initializes with a status code -// of 200 to cover the default case. -func NewResponseRecorder(w http.ResponseWriter) *ResponseRecorder { - return &ResponseRecorder{ - ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w}, - status: http.StatusOK, - start: time.Now(), - } -} - -// WriteHeader records the status code and calls the -// underlying ResponseWriter's WriteHeader method. -func (r *ResponseRecorder) WriteHeader(status int) { - r.status = status - r.ResponseWriterWrapper.WriteHeader(status) -} - -// Write is a wrapper that records the size of the body -// that gets written. -func (r *ResponseRecorder) Write(buf []byte) (int, error) { - n, err := r.ResponseWriterWrapper.Write(buf) - if err == nil { - r.size += n - } - return n, err -} - -// Size returns the size of the recorded response body. -func (r *ResponseRecorder) Size() int { - return r.size -} - -// Status returns the recorded response status code. -func (r *ResponseRecorder) Status() int { - return r.status -} - -// ResponseBuffer is a type that conditionally buffers the -// response in memory. It implements http.ResponseWriter so -// that it can stream the response if it is not buffering. -// Whether it buffers is decided by a func passed into the -// constructor, NewResponseBuffer. -// -// This type implements http.ResponseWriter, so you can pass -// this to the Next() middleware in the chain and record its -// response. However, since the entire response body will be -// buffered in memory, only use this when explicitly configured -// and required for some specific reason. For example, the -// text/template package only parses templates out of []byte -// and not io.Reader, so the templates directive uses this -// type to obtain the entire template text, but only on certain -// requests that match the right Content-Type, etc. -// -// ResponseBuffer also implements io.ReaderFrom for performance -// reasons. The standard lib's http.response type (unexported) -// uses io.Copy to write the body. io.Copy makes an allocation -// if the destination does not have a ReadFrom method (or if -// the source does not have a WriteTo method, but that's -// irrelevant here). Our ReadFrom is smart: if buffering, it -// calls the buffer's ReadFrom, which makes no allocs because -// it is already a buffer! If we're streaming the response -// instead, ReadFrom uses io.CopyBuffer with a pooled buffer -// that is managed within this package. -type ResponseBuffer struct { - *ResponseWriterWrapper - Buffer *bytes.Buffer - header http.Header - status int - shouldBuffer func(status int, header http.Header) bool - stream bool - rw http.ResponseWriter - wroteHeader bool -} - -// NewResponseBuffer returns a new ResponseBuffer that will -// use buf to store the full body of the response if shouldBuffer -// returns true. If shouldBuffer returns false, then the response -// body will be streamed directly to rw. -// -// shouldBuffer will be passed the status code and header fields of -// the response. With that information, the function should decide -// whether to buffer the response in memory. For example: the templates -// directive uses this to determine whether the response is the -// right Content-Type (according to user config) for a template. -// -// For performance, the buf you pass in should probably be obtained -// from a sync.Pool in order to reuse allocated space. -func NewResponseBuffer(buf *bytes.Buffer, rw http.ResponseWriter, - shouldBuffer func(status int, header http.Header) bool) *ResponseBuffer { - rb := &ResponseBuffer{ - Buffer: buf, - header: make(http.Header), - status: http.StatusOK, // default status code - shouldBuffer: shouldBuffer, - rw: rw, - } - rb.ResponseWriterWrapper = &ResponseWriterWrapper{ResponseWriter: rw} - return rb -} - -// Header returns the response header map. -func (rb *ResponseBuffer) Header() http.Header { - return rb.header -} - -// WriteHeader calls shouldBuffer to decide whether the -// upcoming body should be buffered, and then writes -// the header to the response. -func (rb *ResponseBuffer) WriteHeader(status int) { - if rb.wroteHeader { - return - } - rb.wroteHeader = true - - rb.status = status - rb.stream = !rb.shouldBuffer(status, rb.header) - if rb.stream { - rb.CopyHeader() - rb.ResponseWriterWrapper.WriteHeader(status) - } -} - -// Write writes buf to rb.Buffer if buffering, otherwise -// to the ResponseWriter directly if streaming. -func (rb *ResponseBuffer) Write(buf []byte) (int, error) { - if !rb.wroteHeader { - rb.WriteHeader(http.StatusOK) - } - - if rb.stream { - return rb.ResponseWriterWrapper.Write(buf) - } - return rb.Buffer.Write(buf) -} - -// Buffered returns whether rb has decided to buffer the response. -func (rb *ResponseBuffer) Buffered() bool { - return !rb.stream -} - -// CopyHeader copies the buffered header in rb to the ResponseWriter, -// but it does not write the header out. -func (rb *ResponseBuffer) CopyHeader() { - for field, val := range rb.header { - rb.ResponseWriterWrapper.Header()[field] = val - } -} - -// ReadFrom avoids allocations when writing to the buffer (if buffering), -// and reduces allocations when writing to the ResponseWriter directly -// (if streaming). -// -// In local testing with the templates directive, req/sec were improved -// from ~8,200 to ~9,600 on templated files by ensuring that this type -// implements io.ReaderFrom. -func (rb *ResponseBuffer) ReadFrom(src io.Reader) (int64, error) { - if !rb.wroteHeader { - rb.WriteHeader(http.StatusOK) - } - - if rb.stream { - // first see if we can avoid any allocations at all - if wt, ok := src.(io.WriterTo); ok { - return wt.WriteTo(rb.ResponseWriterWrapper) - } - // if not, use a pooled copy buffer to reduce allocs - // (this improved req/sec from ~25,300 to ~27,000 on - // static files served directly with the fileserver, - // but results fluctuated a little on each run). - // a note of caution: - // https://go-review.googlesource.com/c/22134#message-ff351762308fe05f6b72a487d6842e3988916486 - buf := respBufPool.Get().([]byte) - n, err := io.CopyBuffer(rb.ResponseWriterWrapper, src, buf) - respBufPool.Put(buf) // deferring this slowed down benchmarks a smidgin, I think - return n, err - } - return rb.Buffer.ReadFrom(src) -} - -// StatusCodeWriter returns an http.ResponseWriter that always -// writes the status code stored in rb from when a response -// was buffered to it. -func (rb *ResponseBuffer) StatusCodeWriter(w http.ResponseWriter) http.ResponseWriter { - return forcedStatusCodeWriter{w, rb} -} - -// forcedStatusCodeWriter is used to force a status code when -// writing the header. It uses the status code saved on rb. -// This is useful if passing a http.ResponseWriter into -// http.ServeContent because ServeContent hard-codes 2xx status -// codes. If we buffered the response, we force that status code -// instead. -type forcedStatusCodeWriter struct { - http.ResponseWriter - rb *ResponseBuffer -} - -func (fscw forcedStatusCodeWriter) WriteHeader(int) { - fscw.ResponseWriter.WriteHeader(fscw.rb.status) -} - -// respBufPool is used for io.CopyBuffer when ResponseBuffer -// is configured to stream a response. -var respBufPool = &sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, -} - -// Interface guards -var ( - _ HTTPInterfaces = (*ResponseRecorder)(nil) - _ HTTPInterfaces = (*ResponseBuffer)(nil) - _ io.ReaderFrom = (*ResponseBuffer)(nil) -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/replacer.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/replacer.go deleted file mode 100644 index bf5240d99..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/replacer.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "bytes" - "crypto/sha256" - "crypto/x509" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddytls" -) - -// requestReplacer is a strings.Replacer which is used to -// encode literal \r and \n characters and keep everything -// on one line -var requestReplacer = strings.NewReplacer( - "\r", "\\r", - "\n", "\\n", -) - -var now = time.Now - -// Replacer is a type which can replace placeholder -// substrings in a string with actual values from a -// http.Request and ResponseRecorder. Always use -// NewReplacer to get one of these. Any placeholders -// made with Set() should overwrite existing values if -// the key is already used. -type Replacer interface { - Replace(string) string - Set(key, value string) -} - -// replacer implements Replacer. customReplacements -// is used to store custom replacements created with -// Set() until the time of replacement, at which point -// they will be used to overwrite other replacements -// if there is a name conflict. -type replacer struct { - customReplacements map[string]string - emptyValue string - responseRecorder *ResponseRecorder - request *http.Request - requestBody *limitWriter -} - -type limitWriter struct { - w bytes.Buffer - remain int -} - -func newLimitWriter(max int) *limitWriter { - return &limitWriter{ - w: bytes.Buffer{}, - remain: max, - } -} - -func (lw *limitWriter) Write(p []byte) (int, error) { - // skip if we are full - if lw.remain <= 0 { - return len(p), nil - } - if n := len(p); n > lw.remain { - p = p[:lw.remain] - } - n, err := lw.w.Write(p) - lw.remain -= n - return n, err -} - -func (lw *limitWriter) String() string { - return lw.w.String() -} - -// NewReplacer makes a new replacer based on r and rr which -// are used for request and response placeholders, respectively. -// Request placeholders are created immediately, whereas -// response placeholders are not created until Replace() -// is invoked. rr may be nil if it is not available. -// emptyValue should be the string that is used in place -// of empty string (can still be empty string). -func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Replacer { - repl := &replacer{ - request: r, - responseRecorder: rr, - emptyValue: emptyValue, - } - - // extract customReplacements from a request replacer when present. - if existing, ok := r.Context().Value(ReplacerCtxKey).(*replacer); ok { - repl.requestBody = existing.requestBody - repl.customReplacements = existing.customReplacements - } else { - // if there is no existing replacer, build one from scratch. - rb := newLimitWriter(MaxLogBodySize) - if r.Body != nil { - r.Body = struct { - io.Reader - io.Closer - }{io.TeeReader(r.Body, rb), io.Closer(r.Body)} - } - repl.requestBody = rb - repl.customReplacements = make(map[string]string) - } - - return repl -} - -func canLogRequest(r *http.Request) bool { - if r.Method == "POST" || r.Method == "PUT" { - for _, cType := range r.Header[headerContentType] { - // the cType could have charset and other info - if strings.Contains(cType, contentTypeJSON) || strings.Contains(cType, contentTypeXML) { - return true - } - } - } - return false -} - -// unescapeBraces finds escaped braces in s and returns -// a string with those braces unescaped. -func unescapeBraces(s string) string { - s = strings.Replace(s, "\\{", "{", -1) - s = strings.Replace(s, "\\}", "}", -1) - return s -} - -// Replace performs a replacement of values on s and returns -// the string with the replaced values. -func (r *replacer) Replace(s string) string { - // Do not attempt replacements if no placeholder is found. - if !strings.ContainsAny(s, "{}") { - return s - } - - result := "" -Placeholders: // process each placeholder in sequence - for { - var idxStart, idxEnd int - - idxOffset := 0 - for { // find first unescaped opening brace - searchSpace := s[idxOffset:] - idxStart = strings.Index(searchSpace, "{") - if idxStart == -1 { - // no more placeholders - break Placeholders - } - if idxStart == 0 || searchSpace[idxStart-1] != '\\' { - // preceding character is not an escape - idxStart += idxOffset - break - } - // the brace we found was escaped - // search the rest of the string next - idxOffset += idxStart + 1 - } - - idxOffset = 0 - for { // find first unescaped closing brace - searchSpace := s[idxStart+idxOffset:] - idxEnd = strings.Index(searchSpace, "}") - if idxEnd == -1 { - // unpaired placeholder - break Placeholders - } - if idxEnd == 0 || searchSpace[idxEnd-1] != '\\' { - // preceding character is not an escape - idxEnd += idxOffset + idxStart - break - } - // the brace we found was escaped - // search the rest of the string next - idxOffset += idxEnd + 1 - } - - // get a replacement for the unescaped placeholder - placeholder := unescapeBraces(s[idxStart : idxEnd+1]) - replacement := r.getSubstitution(placeholder) - - // append unescaped prefix + replacement - result += strings.TrimPrefix(unescapeBraces(s[:idxStart]), "\\") + replacement - - // strip out scanned parts - s = s[idxEnd+1:] - } - - // append unscanned parts - return result + unescapeBraces(s) -} - -func roundDuration(d time.Duration) time.Duration { - if d >= time.Millisecond { - return round(d, time.Millisecond) - } else if d >= time.Microsecond { - return round(d, time.Microsecond) - } - - return d -} - -// round rounds d to the nearest r -func round(d, r time.Duration) time.Duration { - if r <= 0 { - return d - } - neg := d < 0 - if neg { - d = -d - } - if m := d % r; m+m < r { - d = d - m - } else { - d = d + r - m - } - if neg { - return -d - } - return d -} - -// getPeerCert returns peer certificate -func (r *replacer) getPeerCert() *x509.Certificate { - if r.request.TLS != nil && len(r.request.TLS.PeerCertificates) > 0 { - return r.request.TLS.PeerCertificates[0] - } - - return nil -} - -// getSubstitution retrieves value from corresponding key -func (r *replacer) getSubstitution(key string) string { - // search custom replacements first - if value, ok := r.customReplacements[key]; ok { - return value - } - - // search request headers then - if key[1] == '>' { - want := key[2 : len(key)-1] - for key, values := range r.request.Header { - // Header placeholders (case-insensitive) - if strings.EqualFold(key, want) { - return strings.Join(values, ",") - } - } - } - // search response headers then - if r.responseRecorder != nil && key[1] == '<' { - want := key[2 : len(key)-1] - for key, values := range r.responseRecorder.Header() { - // Header placeholders (case-insensitive) - if strings.EqualFold(key, want) { - return strings.Join(values, ",") - } - } - } - // next check for cookies - if key[1] == '~' { - name := key[2 : len(key)-1] - if cookie, err := r.request.Cookie(name); err == nil { - return cookie.Value - } - } - // next check for query argument - if key[1] == '?' { - query := r.request.URL.Query() - name := key[2 : len(key)-1] - return query.Get(name) - } - - // search default replacements in the end - switch key { - case "{method}": - return r.request.Method - case "{scheme}": - if r.request.TLS != nil { - return "https" - } - return "http" - case "{hostname}": - name, err := os.Hostname() - if err != nil { - return r.emptyValue - } - return name - case "{host}": - return r.request.Host - case "{hostonly}": - host, _, err := net.SplitHostPort(r.request.Host) - if err != nil { - return r.request.Host - } - return host - case "{path}": - u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL) - return u.Path - case "{path_escaped}": - u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL) - return url.QueryEscape(u.Path) - case "{request_id}": - reqid, _ := r.request.Context().Value(RequestIDCtxKey).(string) - return reqid - case "{rewrite_path}": - return r.request.URL.Path - case "{rewrite_path_escaped}": - return url.QueryEscape(r.request.URL.Path) - case "{query}": - u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL) - return u.RawQuery - case "{query_escaped}": - u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL) - return url.QueryEscape(u.RawQuery) - case "{fragment}": - u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL) - return u.Fragment - case "{proto}": - return r.request.Proto - case "{remote}": - host, _, err := net.SplitHostPort(r.request.RemoteAddr) - if err != nil { - return r.request.RemoteAddr - } - return host - case "{port}": - _, port, err := net.SplitHostPort(r.request.RemoteAddr) - if err != nil { - return r.emptyValue - } - return port - case "{uri}": - u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL) - return u.RequestURI() - case "{uri_escaped}": - u, _ := r.request.Context().Value(OriginalURLCtxKey).(url.URL) - return url.QueryEscape(u.RequestURI()) - case "{rewrite_uri}": - return r.request.URL.RequestURI() - case "{rewrite_uri_escaped}": - return url.QueryEscape(r.request.URL.RequestURI()) - case "{when}": - return now().Format(timeFormat) - case "{when_iso_local}": - return now().Format(timeFormatISO) - case "{when_iso}": - return now().UTC().Format(timeFormatISOUTC) - case "{when_unix}": - return strconv.FormatInt(now().Unix(), 10) - case "{when_unix_ms}": - return strconv.FormatInt(nanoToMilliseconds(now().UnixNano()), 10) - case "{file}": - _, file := path.Split(r.request.URL.Path) - return file - case "{dir}": - dir, _ := path.Split(r.request.URL.Path) - return dir - case "{request}": - dump, err := httputil.DumpRequest(r.request, false) - if err != nil { - return r.emptyValue - } - return requestReplacer.Replace(string(dump)) - case "{request_body}": - if !canLogRequest(r.request) { - return r.emptyValue - } - _, err := ioutil.ReadAll(r.request.Body) - if err != nil { - if err == ErrMaxBytesExceeded { - return r.emptyValue - } - } - return requestReplacer.Replace(r.requestBody.String()) - case "{mitm}": - if val, ok := r.request.Context().Value(caddy.CtxKey("mitm")).(bool); ok { - if val { - return "likely" - } - return "unlikely" - } - return "unknown" - case "{status}": - if r.responseRecorder == nil { - return r.emptyValue - } - return strconv.Itoa(r.responseRecorder.status) - case "{size}": - if r.responseRecorder == nil { - return r.emptyValue - } - return strconv.Itoa(r.responseRecorder.size) - case "{latency}": - if r.responseRecorder == nil { - return r.emptyValue - } - return roundDuration(time.Since(r.responseRecorder.start)).String() - case "{latency_ms}": - if r.responseRecorder == nil { - return r.emptyValue - } - elapsedDuration := time.Since(r.responseRecorder.start) - return strconv.FormatInt(convertToMilliseconds(elapsedDuration), 10) - case "{tls_protocol}": - if r.request.TLS != nil { - if name, err := caddytls.GetSupportedProtocolName(r.request.TLS.Version); err == nil { - return name - } else { - return "tls" // this should never happen, but guard in case - } - } - return r.emptyValue // because not using a secure channel - case "{tls_cipher}": - if r.request.TLS != nil { - if name, err := caddytls.GetSupportedCipherName(r.request.TLS.CipherSuite); err == nil { - return name - } else { - return "UNKNOWN" // this should never happen, but guard in case - } - } - return r.emptyValue - case "{tls_client_escaped_cert}": - cert := r.getPeerCert() - if cert != nil { - pemBlock := pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - } - return url.QueryEscape(string(pem.EncodeToMemory(&pemBlock))) - } - return r.emptyValue - case "{tls_client_fingerprint}": - cert := r.getPeerCert() - if cert != nil { - return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)) - } - return r.emptyValue - case "{tls_client_i_dn}": - cert := r.getPeerCert() - if cert != nil { - return cert.Issuer.String() - } - return r.emptyValue - case "{tls_client_raw_cert}": - cert := r.getPeerCert() - if cert != nil { - return string(cert.Raw) - } - return r.emptyValue - case "{tls_client_s_dn}": - cert := r.getPeerCert() - if cert != nil { - return cert.Subject.String() - } - return r.emptyValue - case "{tls_client_serial}": - cert := r.getPeerCert() - if cert != nil { - return fmt.Sprintf("%x", cert.SerialNumber) - } - return r.emptyValue - case "{tls_client_v_end}": - cert := r.getPeerCert() - if cert != nil { - return cert.NotAfter.In(time.UTC).Format("Jan 02 15:04:05 2006 MST") - } - return r.emptyValue - case "{tls_client_v_remain}": - cert := r.getPeerCert() - if cert != nil { - now := time.Now().In(time.UTC) - days := int64(cert.NotAfter.Sub(now).Seconds() / 86400) - return strconv.FormatInt(days, 10) - } - return r.emptyValue - case "{tls_client_v_start}": - cert := r.getPeerCert() - if cert != nil { - return cert.NotBefore.Format("Jan 02 15:04:05 2006 MST") - } - return r.emptyValue - case "{server_port}": - _, port, err := net.SplitHostPort(r.request.Host) - if err != nil { - if r.request.TLS != nil { - return "443" - } else { - return "80" - } - } - return port - default: - // {labelN} - if strings.HasPrefix(key, "{label") { - nStr := key[6 : len(key)-1] // get the integer N in "{labelN}" - n, err := strconv.Atoi(nStr) - if err != nil || n < 1 { - return r.emptyValue - } - labels := strings.Split(r.request.Host, ".") - if n > len(labels) { - return r.emptyValue - } - return labels[n-1] - } - } - - return r.emptyValue -} - -func nanoToMilliseconds(d int64) int64 { - return d / 1e6 -} - -// convertToMilliseconds returns the number of milliseconds in the given duration -func convertToMilliseconds(d time.Duration) int64 { - return nanoToMilliseconds(d.Nanoseconds()) -} - -// Set sets key to value in the r.customReplacements map. -func (r *replacer) Set(key, value string) { - r.customReplacements["{"+key+"}"] = value -} - -const ( - timeFormat = "02/Jan/2006:15:04:05 -0700" - timeFormatISO = "2006-01-02T15:04:05" // ISO 8601 with timezone to be assumed as local - timeFormatISOUTC = "2006-01-02T15:04:05Z" // ISO 8601 with timezone to be assumed as UTC - headerContentType = "Content-Type" - contentTypeJSON = "application/json" - contentTypeXML = "application/xml" - // MaxLogBodySize limits the size of logged request's body - MaxLogBodySize = 100 * 1024 -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/responsewriterwrapper.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/responsewriterwrapper.go deleted file mode 100644 index ad92d6d87..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/responsewriterwrapper.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "bufio" - "net" - "net/http" -) - -// ResponseWriterWrapper wrappers underlying ResponseWriter -// and inherits its Hijacker/Pusher/CloseNotifier/Flusher as well. -type ResponseWriterWrapper struct { - http.ResponseWriter -} - -// Hijack implements http.Hijacker. It simply wraps the underlying -// ResponseWriter's Hijack method if there is one, or returns an error. -func (rww *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if hj, ok := rww.ResponseWriter.(http.Hijacker); ok { - return hj.Hijack() - } - return nil, nil, NonHijackerError{Underlying: rww.ResponseWriter} -} - -// Flush implements http.Flusher. It simply wraps the underlying -// ResponseWriter's Flush method if there is one, or panics. -func (rww *ResponseWriterWrapper) Flush() { - if f, ok := rww.ResponseWriter.(http.Flusher); ok { - f.Flush() - } else { - panic(NonFlusherError{Underlying: rww.ResponseWriter}) - } -} - -// CloseNotify implements http.CloseNotifier. -// It just inherits the underlying ResponseWriter's CloseNotify method. -// It panics if the underlying ResponseWriter is not a CloseNotifier. -func (rww *ResponseWriterWrapper) CloseNotify() <-chan bool { - if cn, ok := rww.ResponseWriter.(http.CloseNotifier); ok { - return cn.CloseNotify() - } - panic(NonCloseNotifierError{Underlying: rww.ResponseWriter}) -} - -// Push implements http.Pusher. -// It just inherits the underlying ResponseWriter's Push method. -// It panics if the underlying ResponseWriter is not a Pusher. -func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) error { - if pusher, hasPusher := rww.ResponseWriter.(http.Pusher); hasPusher { - return pusher.Push(target, opts) - } - - return NonPusherError{Underlying: rww.ResponseWriter} -} - -// HTTPInterfaces mix all the interfaces that middleware ResponseWriters need to support. -type HTTPInterfaces interface { - http.ResponseWriter - http.Pusher - http.Flusher - http.CloseNotifier - http.Hijacker -} - -// Interface guards -var _ HTTPInterfaces = (*ResponseWriterWrapper)(nil) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/roller.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/roller.go deleted file mode 100644 index 4f9142761..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/roller.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "errors" - "io" - "path/filepath" - "strconv" - - lumberjack "gopkg.in/natefinch/lumberjack.v2" -) - -// LogRoller implements a type that provides a rolling logger. -type LogRoller struct { - Disabled bool - Filename string - MaxSize int - MaxAge int - MaxBackups int - Compress bool - LocalTime bool -} - -// GetLogWriter returns an io.Writer that writes to a rolling logger. -// This should be called only from the main goroutine (like during -// server setup) because this method is not thread-safe; it is careful -// to create only one log writer per log file, even if the log file -// is shared by different sites or middlewares. This ensures that -// rolling is synchronized, since a process (or multiple processes) -// should not create more than one roller on the same file at the -// same time. See issue #1363. -func (l LogRoller) GetLogWriter() io.Writer { - absPath, err := filepath.Abs(l.Filename) - if err != nil { - absPath = l.Filename // oh well, hopefully they're consistent in how they specify the filename - } - lj, has := lumberjacks[absPath] - if !has { - lj = &lumberjack.Logger{ - Filename: l.Filename, - MaxSize: l.MaxSize, - MaxAge: l.MaxAge, - MaxBackups: l.MaxBackups, - Compress: l.Compress, - LocalTime: l.LocalTime, - } - lumberjacks[absPath] = lj - } - return lj -} - -// IsLogRollerSubdirective is true if the subdirective is for the log roller. -func IsLogRollerSubdirective(subdir string) bool { - return subdir == directiveRotateSize || - subdir == directiveRotateAge || - subdir == directiveRotateKeep || - subdir == directiveRotateCompress || - subdir == directiveRotateDisable -} - -var errInvalidRollParameter = errors.New("invalid roller parameter") - -// ParseRoller parses roller contents out of c. -func ParseRoller(l *LogRoller, what string, where ...string) error { - if l == nil { - l = DefaultLogRoller() - } - - // rotate_compress doesn't accept any parameters. - // others only accept one parameter - if ((what == directiveRotateCompress || what == directiveRotateDisable) && len(where) != 0) || - ((what != directiveRotateCompress && what != directiveRotateDisable) && len(where) != 1) { - return errInvalidRollParameter - } - - var ( - value int - err error - ) - if what != directiveRotateCompress && what != directiveRotateDisable { - value, err = strconv.Atoi(where[0]) - if err != nil { - return err - } - } - - switch what { - case directiveRotateDisable: - l.Disabled = true - case directiveRotateSize: - l.MaxSize = value - case directiveRotateAge: - l.MaxAge = value - case directiveRotateKeep: - l.MaxBackups = value - case directiveRotateCompress: - l.Compress = true - } - return nil -} - -// DefaultLogRoller will roll logs by default. -func DefaultLogRoller() *LogRoller { - return &LogRoller{ - MaxSize: defaultRotateSize, - MaxAge: defaultRotateAge, - MaxBackups: defaultRotateKeep, - Compress: false, - LocalTime: true, - } -} - -const ( - // defaultRotateSize is 100 MB. - defaultRotateSize = 100 - // defaultRotateAge is 14 days. - defaultRotateAge = 14 - // defaultRotateKeep is 10 files. - defaultRotateKeep = 10 - - directiveRotateDisable = "rotate_disable" - directiveRotateSize = "rotate_size" - directiveRotateAge = "rotate_age" - directiveRotateKeep = "rotate_keep" - directiveRotateCompress = "rotate_compress" -) - -// lumberjacks maps log filenames to the logger -// that is being used to keep them rolled/maintained. -var lumberjacks = make(map[string]io.Writer) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/server.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/server.go deleted file mode 100644 index 7940ac832..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/server.go +++ /dev/null @@ -1,625 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package httpserver implements an HTTP server on top of Caddy. -package httpserver - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "log" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "time" - - "github.com/lucas-clemente/quic-go/h2quic" - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/staticfiles" - "github.com/mholt/caddy/caddytls" - "github.com/mholt/caddy/telemetry" -) - -// Server is the HTTP server implementation. -type Server struct { - Server *http.Server - quicServer *h2quic.Server - sites []*SiteConfig - connTimeout time.Duration // max time to wait for a connection before force stop - tlsGovChan chan struct{} // close to stop the TLS maintenance goroutine - vhosts *vhostTrie -} - -// ensure it satisfies the interface -var _ caddy.GracefulServer = new(Server) - -var defaultALPN = []string{"h2", "http/1.1"} - -// makeTLSConfig extracts TLS settings from each site config to -// build a tls.Config usable in Caddy HTTP servers. The returned -// config will be nil if TLS is disabled for these sites. -func makeTLSConfig(group []*SiteConfig) (*tls.Config, error) { - var tlsConfigs []*caddytls.Config - for i := range group { - if HTTP2 && len(group[i].TLS.ALPN) == 0 { - // if no application-level protocol was configured up to now, - // default to HTTP/2, then HTTP/1.1 if necessary - group[i].TLS.ALPN = defaultALPN - } - tlsConfigs = append(tlsConfigs, group[i].TLS) - } - return caddytls.MakeTLSConfig(tlsConfigs) -} - -func getFallbacks(sites []*SiteConfig) []string { - fallbacks := []string{} - for _, sc := range sites { - if sc.FallbackSite { - fallbacks = append(fallbacks, sc.Addr.Host) - } - } - return fallbacks -} - -// NewServer creates a new Server instance that will listen on addr -// and will serve the sites configured in group. -func NewServer(addr string, group []*SiteConfig) (*Server, error) { - s := &Server{ - Server: makeHTTPServerWithTimeouts(addr, group), - vhosts: newVHostTrie(), - sites: group, - connTimeout: GracefulTimeout, - } - s.vhosts.fallbackHosts = append(s.vhosts.fallbackHosts, getFallbacks(group)...) - s.Server = makeHTTPServerWithHeaderLimit(s.Server, group) - s.Server.Handler = s // this is weird, but whatever - - // extract TLS settings from each site config to build - // a tls.Config, which will not be nil if TLS is enabled - tlsConfig, err := makeTLSConfig(group) - if err != nil { - return nil, err - } - s.Server.TLSConfig = tlsConfig - - // if TLS is enabled, make sure we prepare the Server accordingly - if s.Server.TLSConfig != nil { - // enable QUIC if desired (requires HTTP/2) - if HTTP2 && QUIC { - s.quicServer = &h2quic.Server{Server: s.Server} - s.Server.Handler = s.wrapWithSvcHeaders(s.Server.Handler) - } - - // wrap the HTTP handler with a handler that does MITM detection - tlsh := &tlsHandler{next: s.Server.Handler} - s.Server.Handler = tlsh // this needs to be the "outer" handler when Serve() is called, for type assertion - - // when Serve() creates the TLS listener later, that listener should - // be adding a reference the ClientHello info to a map; this callback - // will be sure to clear out that entry when the connection closes. - s.Server.ConnState = func(c net.Conn, cs http.ConnState) { - // when a connection closes or is hijacked, delete its entry - // in the map, because we are done with it. - if tlsh.listener != nil { - if cs == http.StateHijacked || cs == http.StateClosed { - tlsh.listener.helloInfosMu.Lock() - delete(tlsh.listener.helloInfos, c.RemoteAddr().String()) - tlsh.listener.helloInfosMu.Unlock() - } - } - } - - // As of Go 1.7, if the Server's TLSConfig is not nil, HTTP/2 is enabled only - // if TLSConfig.NextProtos includes the string "h2" - if HTTP2 && len(s.Server.TLSConfig.NextProtos) == 0 { - // some experimenting shows that this NextProtos must have at least - // one value that overlaps with the NextProtos of any other tls.Config - // that is returned from GetConfigForClient; if there is no overlap, - // the connection will fail (as of Go 1.8, Feb. 2017). - s.Server.TLSConfig.NextProtos = defaultALPN - } - } - - // Compile custom middleware for every site (enables virtual hosting) - for _, site := range group { - stack := Handler(staticfiles.FileServer{Root: http.Dir(site.Root), Hide: site.HiddenFiles, IndexPages: site.IndexPages}) - for i := len(site.middleware) - 1; i >= 0; i-- { - stack = site.middleware[i](stack) - } - site.middlewareChain = stack - s.vhosts.Insert(site.Addr.VHost(), site) - } - - return s, nil -} - -// makeHTTPServerWithHeaderLimit apply minimum header limit within a group to given http.Server -func makeHTTPServerWithHeaderLimit(s *http.Server, group []*SiteConfig) *http.Server { - var min int64 - for _, cfg := range group { - limit := cfg.Limits.MaxRequestHeaderSize - if limit == 0 { - continue - } - - // not set yet - if min == 0 { - min = limit - } - - // find a better one - if limit < min { - min = limit - } - } - - if min > 0 { - s.MaxHeaderBytes = int(min) - } - return s -} - -// makeHTTPServerWithTimeouts makes an http.Server from the group of -// configs in a way that configures timeouts (or, if not set, it uses -// the default timeouts) by combining the configuration of each -// SiteConfig in the group. (Timeouts are important for mitigating -// slowloris attacks.) -func makeHTTPServerWithTimeouts(addr string, group []*SiteConfig) *http.Server { - // find the minimum duration configured for each timeout - var min Timeouts - for _, cfg := range group { - if cfg.Timeouts.ReadTimeoutSet && - (!min.ReadTimeoutSet || cfg.Timeouts.ReadTimeout < min.ReadTimeout) { - min.ReadTimeoutSet = true - min.ReadTimeout = cfg.Timeouts.ReadTimeout - } - if cfg.Timeouts.ReadHeaderTimeoutSet && - (!min.ReadHeaderTimeoutSet || cfg.Timeouts.ReadHeaderTimeout < min.ReadHeaderTimeout) { - min.ReadHeaderTimeoutSet = true - min.ReadHeaderTimeout = cfg.Timeouts.ReadHeaderTimeout - } - if cfg.Timeouts.WriteTimeoutSet && - (!min.WriteTimeoutSet || cfg.Timeouts.WriteTimeout < min.WriteTimeout) { - min.WriteTimeoutSet = true - min.WriteTimeout = cfg.Timeouts.WriteTimeout - } - if cfg.Timeouts.IdleTimeoutSet && - (!min.IdleTimeoutSet || cfg.Timeouts.IdleTimeout < min.IdleTimeout) { - min.IdleTimeoutSet = true - min.IdleTimeout = cfg.Timeouts.IdleTimeout - } - } - - // for the values that were not set, use defaults - if !min.ReadTimeoutSet { - min.ReadTimeout = defaultTimeouts.ReadTimeout - } - if !min.ReadHeaderTimeoutSet { - min.ReadHeaderTimeout = defaultTimeouts.ReadHeaderTimeout - } - if !min.WriteTimeoutSet { - min.WriteTimeout = defaultTimeouts.WriteTimeout - } - if !min.IdleTimeoutSet { - min.IdleTimeout = defaultTimeouts.IdleTimeout - } - - // set the final values on the server and return it - return &http.Server{ - Addr: addr, - ReadTimeout: min.ReadTimeout, - ReadHeaderTimeout: min.ReadHeaderTimeout, - WriteTimeout: min.WriteTimeout, - IdleTimeout: min.IdleTimeout, - } -} - -func (s *Server) wrapWithSvcHeaders(previousHandler http.Handler) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if err := s.quicServer.SetQuicHeaders(w.Header()); err != nil { - log.Println("[Error] failed to set proper headers for QUIC: ", err) - } - previousHandler.ServeHTTP(w, r) - } -} - -// Listen creates an active listener for s that can be -// used to serve requests. -func (s *Server) Listen() (net.Listener, error) { - if s.Server == nil { - return nil, fmt.Errorf("server field is nil") - } - - ln, err := net.Listen("tcp", s.Server.Addr) - if err != nil { - var succeeded bool - if runtime.GOOS == "windows" { - // Windows has been known to keep sockets open even after closing the listeners. - // Tests reveal this error case easily because they call Start() then Stop() - // in succession. TODO: Better way to handle this? And why limit this to Windows? - for i := 0; i < 20; i++ { - time.Sleep(100 * time.Millisecond) - ln, err = net.Listen("tcp", s.Server.Addr) - if err == nil { - succeeded = true - break - } - } - } - if !succeeded { - return nil, err - } - } - - if tcpLn, ok := ln.(*net.TCPListener); ok { - ln = tcpKeepAliveListener{TCPListener: tcpLn} - } - - cln := s.WrapListener(ln) - - // Very important to return a concrete caddy.Listener - // implementation for graceful restarts. - return cln.(caddy.Listener), nil -} - -// WrapListener wraps ln in the listener middlewares configured -// for this server. -func (s *Server) WrapListener(ln net.Listener) net.Listener { - if ln == nil { - return nil - } - cln := ln.(caddy.Listener) - for _, site := range s.sites { - for _, m := range site.listenerMiddleware { - cln = m(cln) - } - } - return cln -} - -// ListenPacket creates udp connection for QUIC if it is enabled, -func (s *Server) ListenPacket() (net.PacketConn, error) { - if QUIC { - udpAddr, err := net.ResolveUDPAddr("udp", s.Server.Addr) - if err != nil { - return nil, err - } - return net.ListenUDP("udp", udpAddr) - } - return nil, nil -} - -// Serve serves requests on ln. It blocks until ln is closed. -func (s *Server) Serve(ln net.Listener) error { - if s.Server.TLSConfig != nil { - // Create TLS listener - note that we do not replace s.listener - // with this TLS listener; tls.listener is unexported and does - // not implement the File() method we need for graceful restarts - // on POSIX systems. - // TODO: Is this ^ still relevant anymore? Maybe we can now that it's a net.Listener... - ln = newTLSListener(ln, s.Server.TLSConfig) - if handler, ok := s.Server.Handler.(*tlsHandler); ok { - handler.listener = ln.(*tlsHelloListener) - } - - // Rotate TLS session ticket keys - s.tlsGovChan = caddytls.RotateSessionTicketKeys(s.Server.TLSConfig) - } - - defer func() { - if s.quicServer != nil { - if err := s.quicServer.Close(); err != nil { - log.Println("[ERROR] failed to close QUIC server: ", err) - } - } - }() - - err := s.Server.Serve(ln) - if err != nil && err != http.ErrServerClosed { - return err - } - return nil -} - -// ServePacket serves QUIC requests on pc until it is closed. -func (s *Server) ServePacket(pc net.PacketConn) error { - if s.quicServer != nil { - err := s.quicServer.Serve(pc.(*net.UDPConn)) - return fmt.Errorf("serving QUIC connections: %v", err) - } - return nil -} - -// ServeHTTP is the entry point of all HTTP requests. -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - defer func() { - // We absolutely need to be sure we stay alive up here, - // even though, in theory, the errors middleware does this. - if rec := recover(); rec != nil { - log.Printf("[PANIC] %v", rec) - DefaultErrorFunc(w, r, http.StatusInternalServerError) - } - }() - - // record the User-Agent string (with a cap on its length to mitigate attacks) - ua := r.Header.Get("User-Agent") - if len(ua) > 512 { - ua = ua[:512] - } - uaHash := telemetry.FastHash([]byte(ua)) // this is a normalized field - go telemetry.SetNested("http_user_agent", uaHash, ua) - go telemetry.AppendUnique("http_user_agent_count", uaHash) - go telemetry.Increment("http_request_count") - - // copy the original, unchanged URL into the context - // so it can be referenced by middlewares - urlCopy := *r.URL - if r.URL.User != nil { - userInfo := new(url.Userinfo) - *userInfo = *r.URL.User - urlCopy.User = userInfo - } - c := context.WithValue(r.Context(), OriginalURLCtxKey, urlCopy) - r = r.WithContext(c) - - // Setup a replacer for the request that keeps track of placeholder - // values across plugins. - replacer := NewReplacer(r, nil, "") - c = context.WithValue(r.Context(), ReplacerCtxKey, replacer) - r = r.WithContext(c) - - w.Header().Set("Server", caddy.AppName) - - status, _ := s.serveHTTP(w, r) - - // Fallback error response in case error handling wasn't chained in - if status >= 400 { - DefaultErrorFunc(w, r, status) - } -} - -func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - // strip out the port because it's not used in virtual - // hosting; the port is irrelevant because each listener - // is on a different port. - hostname, _, err := net.SplitHostPort(r.Host) - if err != nil { - hostname = r.Host - } - - // look up the virtualhost; if no match, serve error - vhost, pathPrefix := s.vhosts.Match(hostname + r.URL.Path) - c := context.WithValue(r.Context(), caddy.CtxKey("path_prefix"), pathPrefix) - r = r.WithContext(c) - - if vhost == nil { - // check for ACME challenge even if vhost is nil; - // could be a new host coming online soon - choose any - // vhost's cert manager configuration, I guess - if len(s.sites) > 0 && s.sites[0].TLS.Manager.HandleHTTPChallenge(w, r) { - return 0, nil - } - - // otherwise, log the error and write a message to the client - remoteHost, _, err := net.SplitHostPort(r.RemoteAddr) - if err != nil { - remoteHost = r.RemoteAddr - } - WriteSiteNotFound(w, r) // don't add headers outside of this function (http.forwardproxy) - log.Printf("[INFO] %s - No such site at %s (Remote: %s, Referer: %s)", - hostname, s.Server.Addr, remoteHost, r.Header.Get("Referer")) - return 0, nil - } - - // we still check for ACME challenge if the vhost exists, - // because the HTTP challenge might be disabled by its config - if vhost.TLS.Manager.HandleHTTPChallenge(w, r) { - return 0, nil - } - - // trim the path portion of the site address from the beginning of - // the URL path, so a request to example.com/foo/blog on the site - // defined as example.com/foo appears as /blog instead of /foo/blog. - if pathPrefix != "/" { - r.URL = trimPathPrefix(r.URL, pathPrefix) - } - - // enforce strict host matching, which ensures that the SNI - // value (if any), matches the Host header; essential for - // sites that rely on TLS ClientAuth sharing a port with - // sites that do not - if mismatched, close the connection - if vhost.StrictHostMatching && r.TLS != nil && - strings.ToLower(r.TLS.ServerName) != strings.ToLower(hostname) { - r.Close = true - log.Printf("[ERROR] %s - strict host matching: SNI (%s) and HTTP Host (%s) values differ", - vhost.Addr, r.TLS.ServerName, hostname) - return http.StatusForbidden, nil - } - - return vhost.middlewareChain.ServeHTTP(w, r) -} - -func trimPathPrefix(u *url.URL, prefix string) *url.URL { - // We need to use URL.EscapedPath() when trimming the pathPrefix as - // URL.Path is ambiguous about / or %2f - see docs. See #1927 - trimmedPath := strings.TrimPrefix(u.EscapedPath(), prefix) - if !strings.HasPrefix(trimmedPath, "/") { - trimmedPath = "/" + trimmedPath - } - // After trimming path reconstruct uri string with Query before parsing - trimmedURI := trimmedPath - if u.RawQuery != "" || u.ForceQuery == true { - trimmedURI = trimmedPath + "?" + u.RawQuery - } - if u.Fragment != "" { - trimmedURI = trimmedURI + "#" + u.Fragment - } - trimmedURL, err := url.Parse(trimmedURI) - if err != nil { - log.Printf("[ERROR] Unable to parse trimmed URL %s: %v", trimmedURI, err) - return u - } - return trimmedURL -} - -// Address returns the address s was assigned to listen on. -func (s *Server) Address() string { - return s.Server.Addr -} - -// Stop stops s gracefully (or forcefully after timeout) and -// closes its listener. -func (s *Server) Stop() error { - ctx, cancel := context.WithTimeout(context.Background(), s.connTimeout) - defer cancel() - - err := s.Server.Shutdown(ctx) - if err != nil { - return err - } - - // signal any TLS governor goroutines to exit - if s.tlsGovChan != nil { - close(s.tlsGovChan) - } - - return nil -} - -// OnStartupComplete lists the sites served by this server -// and any relevant information, assuming caddy.Quiet == false. -func (s *Server) OnStartupComplete() { - if !caddy.Quiet { - firstSite := s.sites[0] - scheme := "HTTP" - if firstSite.TLS.Enabled { - scheme = "HTTPS" - } - - fmt.Println("") - fmt.Printf("Serving %s on port "+firstSite.Port()+" \n", scheme) - s.outputSiteInfo(false) - fmt.Println("") - } - - // Print out process log without header comment - s.outputSiteInfo(true) -} - -func (s *Server) outputSiteInfo(isProcessLog bool) { - for _, site := range s.sites { - output := site.Addr.String() - if caddy.IsLoopback(s.Address()) && !caddy.IsLoopback(site.Addr.Host) { - output += " (only accessible on this machine)" - } - if isProcessLog { - log.Printf("[INFO] Serving %s \n", output) - } else { - fmt.Println(output) - } - } -} - -// defaultTimeouts stores the default timeout values to use -// if left unset by user configuration. NOTE: Most default -// timeouts are disabled (see issues #1464 and #1733). -var defaultTimeouts = Timeouts{IdleTimeout: 5 * time.Minute} - -// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted -// connections. It's used by ListenAndServe and ListenAndServeTLS so -// dead TCP connections (e.g. closing laptop mid-download) eventually -// go away. -// -// Borrowed from the Go standard library. -type tcpKeepAliveListener struct { - *net.TCPListener -} - -// Accept accepts the connection with a keep-alive enabled. -func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { - tc, err := ln.AcceptTCP() - if err != nil { - return - } - if err = tc.SetKeepAlive(true); err != nil { - return - } - if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil { - return - } - return tc, nil -} - -// File implements caddy.Listener; it returns the underlying file of the listener. -func (ln tcpKeepAliveListener) File() (*os.File, error) { - return ln.TCPListener.File() -} - -// ErrMaxBytesExceeded is the error returned by MaxBytesReader -// when the request body exceeds the limit imposed -var ErrMaxBytesExceeded = errors.New("http: request body too large") - -// DefaultErrorFunc responds to an HTTP request with a simple description -// of the specified HTTP status code. -func DefaultErrorFunc(w http.ResponseWriter, r *http.Request, status int) { - WriteTextResponse(w, status, fmt.Sprintf("%d %s\n", status, http.StatusText(status))) -} - -const httpStatusMisdirectedRequest = 421 // RFC 7540, 9.1.2 - -// WriteSiteNotFound writes appropriate error code to w, signaling that -// requested host is not served by Caddy on a given port. -func WriteSiteNotFound(w http.ResponseWriter, r *http.Request) { - status := http.StatusNotFound - if r.ProtoMajor >= 2 { - // TODO: use http.StatusMisdirectedRequest when it gets defined - status = httpStatusMisdirectedRequest - } - WriteTextResponse(w, status, fmt.Sprintf("%d Site %s is not served on this interface\n", status, r.Host)) -} - -// WriteTextResponse writes body with code status to w. The body will -// be interpreted as plain text. -func WriteTextResponse(w http.ResponseWriter, status int, body string) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Header().Set("X-Content-Type-Options", "nosniff") - w.WriteHeader(status) - if _, err := w.Write([]byte(body)); err != nil { - log.Println("[Error] failed to write body: ", err) - } -} - -// SafePath joins siteRoot and reqPath and converts it to a path that can -// be used to access a path on the local disk. It ensures the path does -// not traverse outside of the site root. -// -// If opening a file, use http.Dir instead. -func SafePath(siteRoot, reqPath string) string { - reqPath = filepath.ToSlash(reqPath) - reqPath = strings.Replace(reqPath, "\x00", "", -1) // NOTE: Go 1.9 checks for null bytes in the syscall package - if siteRoot == "" { - siteRoot = "." - } - return filepath.Join(siteRoot, filepath.FromSlash(path.Clean("/"+reqPath))) -} - -// OriginalURLCtxKey is the key for accessing the original, incoming URL on an HTTP request. -const OriginalURLCtxKey = caddy.CtxKey("original_url") diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/siteconfig.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/siteconfig.go deleted file mode 100644 index 069a4a106..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/siteconfig.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "time" - - "github.com/mholt/caddy/caddytls" -) - -// SiteConfig contains information about a site -// (also known as a virtual host). -type SiteConfig struct { - // The address of the site - Addr Address - - // The list of viable index page names of the site - IndexPages []string - - // The hostname to bind listener to; - // defaults to Addr.Host - ListenHost string - - // TLS configuration - TLS *caddytls.Config - - // If true, the Host header in the HTTP request must - // match the SNI value in the TLS handshake (if any). - // This should be enabled whenever a site relies on - // TLS client authentication, for example; or any time - // you want to enforce that THIS site's TLS config - // is used and not the TLS config of any other site - // on the same listener. TODO: Check how relevant this - // is with TLS 1.3. - StrictHostMatching bool - - // Uncompiled middleware stack - middleware []Middleware - - // Compiled middleware stack - middlewareChain Handler - - // listener middleware stack - listenerMiddleware []ListenerMiddleware - - // Directory from which to serve files - Root string - - // A list of files to hide (for example, the - // source Caddyfile). TODO: Enforcing this - // should be centralized, for example, a - // standardized way of loading files from disk - // for a request. - HiddenFiles []string - - // Max request's header/body size - Limits Limits - - // The path to the Caddyfile used to generate this site config - originCaddyfile string - - // These timeout values are used, in conjunction with other - // site configs on the same server instance, to set the - // respective timeout values on the http.Server that - // is created. Sensible values will mitigate slowloris - // attacks and overcome faulty networks, while still - // preserving functionality needed for proxying, - // websockets, etc. - Timeouts Timeouts - - // If true, any requests not matching other site definitions - // may be served by this site. - FallbackSite bool -} - -// Timeouts specify various timeouts for a server to use. -// If the associated bool field is true, then the duration -// value should be treated literally (i.e. a zero-value -// duration would mean "no timeout"). If false, the duration -// was left unset, so a zero-value duration would mean to -// use a default value (even if default is non-zero). -type Timeouts struct { - ReadTimeout time.Duration - ReadTimeoutSet bool - ReadHeaderTimeout time.Duration - ReadHeaderTimeoutSet bool - WriteTimeout time.Duration - WriteTimeoutSet bool - IdleTimeout time.Duration - IdleTimeoutSet bool -} - -// Limits specify size limit of request's header and body. -type Limits struct { - MaxRequestHeaderSize int64 - MaxRequestBodySizes []PathLimit -} - -// PathLimit is a mapping from a site's path to its corresponding -// maximum request body size (in bytes) -type PathLimit struct { - Path string - Limit int64 -} - -// AddMiddleware adds a middleware to a site's middleware stack. -func (s *SiteConfig) AddMiddleware(m Middleware) { - s.middleware = append(s.middleware, m) -} - -// AddListenerMiddleware adds a listener middleware to a site's listenerMiddleware stack. -func (s *SiteConfig) AddListenerMiddleware(l ListenerMiddleware) { - s.listenerMiddleware = append(s.listenerMiddleware, l) -} - -// TLSConfig returns s.TLS. -func (s SiteConfig) TLSConfig() *caddytls.Config { - return s.TLS -} - -// Host returns s.Addr.Host. -func (s SiteConfig) Host() string { - return s.Addr.Host -} - -// Port returns s.Addr.Port. -func (s SiteConfig) Port() string { - return s.Addr.Port -} - -// Middleware returns s.middleware (useful for tests). -func (s SiteConfig) Middleware() []Middleware { - return s.middleware -} - -// ListenerMiddleware returns s.listenerMiddleware -func (s SiteConfig) ListenerMiddleware() []ListenerMiddleware { - return s.listenerMiddleware -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/tplcontext.go b/vendor/github.com/mholt/caddy/caddyhttp/httpserver/tplcontext.go deleted file mode 100644 index ccad27a1b..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/httpserver/tplcontext.go +++ /dev/null @@ -1,473 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httpserver - -import ( - "bytes" - "crypto/rand" - "fmt" - "io/ioutil" - "log" - mathrand "math/rand" - "net" - "net/http" - "net/url" - "path" - "strings" - "sync" - "text/template" - "time" - - "os" - - "github.com/mholt/caddy/caddytls" - "github.com/russross/blackfriday" -) - -// This file contains the context and functions available for -// use in the templates. - -// Context is the context with which Caddy templates are executed. -type Context struct { - Root http.FileSystem - Req *http.Request - URL *url.URL - Args []interface{} // defined by arguments to .Include - - // just used for adding preload links for server push - responseHeader http.Header -} - -// NewContextWithHeader creates a context with given response header. -// -// To plugin developer: -// The returned context's exported fileds remain empty, -// you should then initialize them if you want. -func NewContextWithHeader(rh http.Header) Context { - return Context{ - responseHeader: rh, - } -} - -// Include returns the contents of filename relative to the site root. -func (c Context) Include(filename string, args ...interface{}) (string, error) { - c.Args = args - return ContextInclude(filename, c, c.Root) -} - -// Now returns the current timestamp in the specified format. -func (c Context) Now(format string) string { - return time.Now().Format(format) -} - -// NowDate returns the current date/time that can be used -// in other time functions. -func (c Context) NowDate() time.Time { - return time.Now() -} - -// Cookie gets the value of a cookie with name name. -func (c Context) Cookie(name string) string { - cookies := c.Req.Cookies() - for _, cookie := range cookies { - if cookie.Name == name { - return cookie.Value - } - } - return "" -} - -// Header gets the value of a request header with field name. -func (c Context) Header(name string) string { - return c.Req.Header.Get(name) -} - -// Hostname gets the (remote) hostname of the client making the request. -func (c Context) Hostname() string { - ip := c.IP() - - hostnameList, err := net.LookupAddr(ip) - if err != nil || len(hostnameList) == 0 { - return c.Req.RemoteAddr - } - - return hostnameList[0] -} - -// Env gets a map of the environment variables. -func (c Context) Env() map[string]string { - osEnv := os.Environ() - envVars := make(map[string]string, len(osEnv)) - for _, env := range osEnv { - data := strings.SplitN(env, "=", 2) - if len(data) == 2 && len(data[0]) > 0 { - envVars[data[0]] = data[1] - } - } - return envVars -} - -// IP gets the (remote) IP address of the client making the request. -func (c Context) IP() string { - ip, _, err := net.SplitHostPort(c.Req.RemoteAddr) - if err != nil { - return c.Req.RemoteAddr - } - return ip -} - -// To mock the net.InterfaceAddrs from the test. -var networkInterfacesFn = net.InterfaceAddrs - -// ServerIP gets the (local) IP address of the server. -// TODO: The bind directive should be honored in this method (see PR #1474). -func (c Context) ServerIP() string { - addrs, err := networkInterfacesFn() - if err != nil { - return "" - } - - for _, address := range addrs { - // Validate the address and check if it's not a loopback - if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { - if ipnet.IP.To4() != nil || ipnet.IP.To16() != nil { - return ipnet.IP.String() - } - } - } - - return "" -} - -// URI returns the raw, unprocessed request URI (including query -// string and hash) obtained directly from the Request-Line of -// the HTTP request. -func (c Context) URI() string { - return c.Req.RequestURI -} - -// Host returns the hostname portion of the Host header -// from the HTTP request. -func (c Context) Host() (string, error) { - host, _, err := net.SplitHostPort(c.Req.Host) - if err != nil { - if !strings.Contains(c.Req.Host, ":") { - // common with sites served on the default port 80 - return c.Req.Host, nil - } - return "", err - } - return host, nil -} - -// Port returns the port portion of the Host header if specified. -func (c Context) Port() (string, error) { - _, port, err := net.SplitHostPort(c.Req.Host) - if err != nil { - if !strings.Contains(c.Req.Host, ":") { - // common with sites served on the default port 80 - return HTTPPort, nil - } - return "", err - } - return port, nil -} - -// Method returns the method (GET, POST, etc.) of the request. -func (c Context) Method() string { - return c.Req.Method -} - -// PathMatches returns true if the path portion of the request -// URL matches pattern. -func (c Context) PathMatches(pattern string) bool { - return Path(c.Req.URL.Path).Matches(pattern) -} - -// Truncate truncates the input string to the given length. -// If length is negative, it returns that many characters -// starting from the end of the string. If the absolute value -// of length is greater than len(input), the whole input is -// returned. -func (c Context) Truncate(input string, length int) string { - if length < 0 && len(input)+length > 0 { - return input[len(input)+length:] - } - if length >= 0 && len(input) > length { - return input[:length] - } - return input -} - -// StripHTML returns s without HTML tags. It is fairly naive -// but works with most valid HTML inputs. -func (c Context) StripHTML(s string) string { - var buf bytes.Buffer - var inTag, inQuotes bool - var tagStart int - for i, ch := range s { - if inTag { - if ch == '>' && !inQuotes { - inTag = false - } else if ch == '<' && !inQuotes { - // false start - buf.WriteString(s[tagStart:i]) - tagStart = i - } else if ch == '"' { - inQuotes = !inQuotes - } - continue - } - if ch == '<' { - inTag = true - tagStart = i - continue - } - buf.WriteRune(ch) - } - if inTag { - // false start - buf.WriteString(s[tagStart:]) - } - return buf.String() -} - -// Ext returns the suffix beginning at the final dot in the final -// slash-separated element of the pathStr (or in other words, the -// file extension). -func (c Context) Ext(pathStr string) string { - return path.Ext(pathStr) -} - -// StripExt returns the input string without the extension, -// which is the suffix starting with the final '.' character -// but not before the final path separator ('/') character. -// If there is no extension, the whole input is returned. -func (c Context) StripExt(path string) string { - for i := len(path) - 1; i >= 0 && path[i] != '/'; i-- { - if path[i] == '.' { - return path[:i] - } - } - return path -} - -// Replace replaces instances of find in input with replacement. -func (c Context) Replace(input, find, replacement string) string { - return strings.Replace(input, find, replacement, -1) -} - -// Markdown returns the HTML contents of the markdown contained in filename -// (relative to the site root). -func (c Context) Markdown(filename string) (string, error) { - body, err := c.Include(filename) - if err != nil { - return "", err - } - renderer := blackfriday.HtmlRenderer(0, "", "") - extns := 0 - extns |= blackfriday.EXTENSION_TABLES - extns |= blackfriday.EXTENSION_FENCED_CODE - extns |= blackfriday.EXTENSION_STRIKETHROUGH - extns |= blackfriday.EXTENSION_DEFINITION_LISTS - markdown := blackfriday.Markdown([]byte(body), renderer, extns) - - return string(markdown), nil -} - -// ContextInclude opens filename using fs and executes a template with the context ctx. -// This does the same thing that Context.Include() does, but with the ability to provide -// your own context so that the included files can have access to additional fields your -// type may provide. You can embed Context in your type, then override its Include method -// to call this function with ctx being the instance of your type, and fs being Context.Root. -func ContextInclude(filename string, ctx interface{}, fs http.FileSystem) (string, error) { - file, err := fs.Open(filename) - if err != nil { - return "", err - } - defer file.Close() - - body, err := ioutil.ReadAll(file) - if err != nil { - return "", err - } - - tpl, err := template.New(filename).Funcs(TemplateFuncs).Parse(string(body)) - if err != nil { - return "", err - } - - buf := includeBufs.Get().(*bytes.Buffer) - buf.Reset() - defer includeBufs.Put(buf) - err = tpl.Execute(buf, ctx) - if err != nil { - return "", err - } - - return buf.String(), nil -} - -// ToLower will convert the given string to lower case. -func (c Context) ToLower(s string) string { - return strings.ToLower(s) -} - -// ToUpper will convert the given string to upper case. -func (c Context) ToUpper(s string) string { - return strings.ToUpper(s) -} - -// Split is a pass-through to strings.Split. It will split the first argument at each instance of the separator and return a slice of strings. -func (c Context) Split(s string, sep string) []string { - return strings.Split(s, sep) -} - -// Join is a pass-through to strings.Join. It will join the first argument slice with the separator in the second argument and return the result. -func (c Context) Join(a []string, sep string) string { - return strings.Join(a, sep) -} - -// Slice will convert the given arguments into a slice. -func (c Context) Slice(elems ...interface{}) []interface{} { - return elems -} - -// Map will convert the arguments into a map. It expects alternating string keys and values. This is useful for building more complicated data structures -// if you are using subtemplates or things like that. -func (c Context) Map(values ...interface{}) (map[string]interface{}, error) { - if len(values)%2 != 0 { - return nil, fmt.Errorf("Map expects an even number of arguments") - } - dict := make(map[string]interface{}, len(values)/2) - for i := 0; i < len(values); i += 2 { - key, ok := values[i].(string) - if !ok { - return nil, fmt.Errorf("Map keys must be strings") - } - dict[key] = values[i+1] - } - return dict, nil -} - -// Files reads and returns a slice of names from the given directory -// relative to the root of Context c. -func (c Context) Files(name string) ([]string, error) { - dir, err := c.Root.Open(path.Clean(name)) - if err != nil { - return nil, err - } - defer dir.Close() - - stat, err := dir.Stat() - if err != nil { - return nil, err - } - if !stat.IsDir() { - return nil, fmt.Errorf("%v is not a directory", name) - } - - dirInfo, err := dir.Readdir(0) - if err != nil { - return nil, err - } - - names := make([]string, len(dirInfo)) - for i, fileInfo := range dirInfo { - names[i] = fileInfo.Name() - } - - return names, nil -} - -// IsMITM returns true if it seems likely that the TLS connection -// is being intercepted. -func (c Context) IsMITM() bool { - if val, ok := c.Req.Context().Value(MitmCtxKey).(bool); ok { - return val - } - return false -} - -// RandomString generates a random string of random length given -// length bounds. Thanks to http://stackoverflow.com/a/35615565/1048862 -// for the clever technique that is fairly fast, secure, and maintains -// proper distributions over the dictionary. -func (c Context) RandomString(minLen, maxLen int) string { - const ( - letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - letterIdxBits = 6 // 6 bits to represent 64 possibilities (indexes) - letterIdxMask = 1< 0 { - ch := string(remainingPath[0]) - next, ok := t.edges[ch] - if !ok { - break - } - if next.site != nil { - longestMatch = next - } - t = next - remainingPath = remainingPath[1:] - } - return longestMatch -} - -// splitHostPath separates host from path in key. -func (t *vhostTrie) splitHostPath(key string) (host, path string) { - parts := strings.SplitN(key, "/", 2) - host, path = strings.ToLower(parts[0]), "/" - if len(parts) > 1 { - path += parts[1] - } - // strip out the port (if present) from the host, since - // each port has its own socket, and each socket has its - // own listener, and each listener has its own server - // instance, and each server instance has its own vhosts. - // removing the port is a simple way to standardize so - // when requests come in, we can be sure to get a match. - hostname, _, err := net.SplitHostPort(host) - if err == nil { - host = hostname - } - return -} - -// String returns a list of all the entries in t; assumes that -// t is a root node. -func (t *vhostTrie) String() string { - var s string - for host, edge := range t.edges { - s += edge.str(host) - } - return s -} - -func (t *vhostTrie) str(prefix string) string { - var s string - for key, edge := range t.edges { - if edge.site != nil { - s += prefix + key + "\n" - } - s += edge.str(prefix + key) - } - return s -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/index/index.go b/vendor/github.com/mholt/caddy/caddyhttp/index/index.go deleted file mode 100644 index fefc0ff73..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/index/index.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package index - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("index", caddy.Plugin{ - ServerType: "http", - Action: setupIndex, - }) -} - -func setupIndex(c *caddy.Controller) error { - var index []string - - cfg := httpserver.GetConfig(c) - - for c.Next() { - args := c.RemainingArgs() - - if len(args) == 0 { - return c.Errf("Expected at least one index") - } - - for _, in := range args { - index = append(index, in) - } - - cfg.IndexPages = index - } - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/internalsrv/internal.go b/vendor/github.com/mholt/caddy/caddyhttp/internalsrv/internal.go deleted file mode 100644 index 3bd47956d..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/internalsrv/internal.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internalsrv provides a simple middleware that (a) prevents access -// to internal locations and (b) allows to return files from internal location -// by setting a special header, e.g. in a proxy response. -// -// The package is named internalsrv so as not to conflict with Go tooling -// convention which treats folders called "internal" differently. -package internalsrv - -import ( - "net/http" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Internal middleware protects internal locations from external requests - -// but allows access from the inside by using a special HTTP header. -type Internal struct { - Next httpserver.Handler - Paths []string -} - -const ( - redirectHeader string = "X-Accel-Redirect" - contentLengthHeader string = "Content-Length" - contentEncodingHeader string = "Content-Encoding" - maxRedirectCount int = 10 -) - -func isInternalRedirect(w http.ResponseWriter) bool { - return w.Header().Get(redirectHeader) != "" -} - -// ServeHTTP implements the httpserver.Handler interface. -func (i Internal) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - // Internal location requested? -> Not found. - for _, prefix := range i.Paths { - if httpserver.Path(r.URL.Path).Matches(prefix) { - return http.StatusNotFound, nil - } - } - - // Use internal response writer to ignore responses that will be - // redirected to internal locations - iw := internalResponseWriter{ResponseWriterWrapper: &httpserver.ResponseWriterWrapper{ResponseWriter: w}} - status, err := i.Next.ServeHTTP(iw, r) - - for c := 0; c < maxRedirectCount && isInternalRedirect(iw); c++ { - // Redirect - adapt request URL path and send it again - // "down the chain" - r.URL.Path = iw.Header().Get(redirectHeader) - iw.ClearHeader() - status, err = i.Next.ServeHTTP(iw, r) - } - - if isInternalRedirect(iw) { - // Too many redirect cycles - iw.ClearHeader() - return http.StatusInternalServerError, nil - } - - return status, err -} - -// internalResponseWriter wraps the underlying http.ResponseWriter and ignores -// calls to Write and WriteHeader if the response should be redirected to an -// internal location. -type internalResponseWriter struct { - *httpserver.ResponseWriterWrapper -} - -// ClearHeader removes script headers that would interfere with follow up -// redirect requests. -func (w internalResponseWriter) ClearHeader() { - w.Header().Del(redirectHeader) - w.Header().Del(contentLengthHeader) - w.Header().Del(contentEncodingHeader) -} - -// WriteHeader ignores the call if the response should be redirected to an -// internal location. -func (w internalResponseWriter) WriteHeader(code int) { - if !isInternalRedirect(w) { - w.ResponseWriterWrapper.WriteHeader(code) - } -} - -// Write ignores the call if the response should be redirected to an internal -// location. -func (w internalResponseWriter) Write(b []byte) (int, error) { - if isInternalRedirect(w) { - return 0, nil - } - return w.ResponseWriterWrapper.Write(b) -} - -// Interface guards -var _ httpserver.HTTPInterfaces = internalResponseWriter{} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/internalsrv/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/internalsrv/setup.go deleted file mode 100644 index f70147ac8..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/internalsrv/setup.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internalsrv - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("internal", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// Internal configures a new Internal middleware instance. -func setup(c *caddy.Controller) error { - paths, err := internalParse(c) - if err != nil { - return err - } - - // Append Internal paths to Caddy config HiddenFiles to ensure - // files do not appear in Browse - config := httpserver.GetConfig(c) - config.HiddenFiles = append(config.HiddenFiles, paths...) - - config.AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Internal{Next: next, Paths: paths} - }) - - return nil -} - -func internalParse(c *caddy.Controller) ([]string, error) { - var paths []string - - for c.Next() { - if c.NextArg() { - paths = append(paths, c.Val()) - } - if c.NextArg() { - return nil, c.ArgErr() - } - } - - return paths, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/limits/handler.go b/vendor/github.com/mholt/caddy/caddyhttp/limits/handler.go deleted file mode 100644 index ded2fabe3..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/limits/handler.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package limits - -import ( - "io" - "net/http" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Limit is a middleware to control request body size -type Limit struct { - Next httpserver.Handler - BodyLimits []httpserver.PathLimit -} - -func (l Limit) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - if r.Body == nil { - return l.Next.ServeHTTP(w, r) - } - - // apply the path-based request body size limit. - for _, bl := range l.BodyLimits { - if httpserver.Path(r.URL.Path).Matches(bl.Path) { - r.Body = MaxBytesReader(w, r.Body, bl.Limit) - break - } - } - - return l.Next.ServeHTTP(w, r) -} - -// MaxBytesReader and its associated methods are borrowed from the -// Go Standard library (comments intact). The only difference is that -// it returns a ErrMaxBytesExceeded error instead of a generic error message -// when the request body has exceeded the requested limit -func MaxBytesReader(w http.ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { - return &maxBytesReader{w: w, r: r, n: n} -} - -type maxBytesReader struct { - w http.ResponseWriter - r io.ReadCloser // underlying reader - n int64 // max bytes remaining - err error // sticky error -} - -func (l *maxBytesReader) Read(p []byte) (n int, err error) { - if l.err != nil { - return 0, l.err - } - if len(p) == 0 { - return 0, nil - } - // If they asked for a 32KB byte read but only 5 bytes are - // remaining, no need to read 32KB. 6 bytes will answer the - // question of the whether we hit the limit or go past it. - if int64(len(p)) > l.n+1 { - p = p[:l.n+1] - } - n, err = l.r.Read(p) - - if int64(n) <= l.n { - l.n -= int64(n) - l.err = err - return n, err - } - - n = int(l.n) - l.n = 0 - - // The server code and client code both use - // maxBytesReader. This "requestTooLarge" check is - // only used by the server code. To prevent binaries - // which only using the HTTP Client code (such as - // cmd/go) from also linking in the HTTP server, don't - // use a static type assertion to the server - // "*response" type. Check this interface instead: - type requestTooLarger interface { - requestTooLarge() - } - if res, ok := l.w.(requestTooLarger); ok { - res.requestTooLarge() - } - l.err = httpserver.ErrMaxBytesExceeded - return n, l.err -} - -func (l *maxBytesReader) Close() error { - return l.r.Close() -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/limits/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/limits/setup.go deleted file mode 100644 index e9971263a..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/limits/setup.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package limits - -import ( - "errors" - "sort" - "strconv" - "strings" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -const ( - serverType = "http" - pluginName = "limits" -) - -func init() { - caddy.RegisterPlugin(pluginName, caddy.Plugin{ - ServerType: serverType, - Action: setupLimits, - }) -} - -// pathLimitUnparsed is a PathLimit before it's parsed -type pathLimitUnparsed struct { - Path string - Limit string -} - -func setupLimits(c *caddy.Controller) error { - bls, err := parseLimits(c) - if err != nil { - return err - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Limit{Next: next, BodyLimits: bls} - }) - return nil -} - -func parseLimits(c *caddy.Controller) ([]httpserver.PathLimit, error) { - config := httpserver.GetConfig(c) - - if !c.Next() { - return nil, c.ArgErr() - } - - args := c.RemainingArgs() - argList := []pathLimitUnparsed{} - headerLimit := "" - - switch len(args) { - case 0: - // Format: limits { - // header - // body - // body - // ... - // } - for c.NextBlock() { - kind := c.Val() - pathOrLimit := c.RemainingArgs() - switch kind { - case "header": - if len(pathOrLimit) != 1 { - return nil, c.ArgErr() - } - headerLimit = pathOrLimit[0] - case "body": - if len(pathOrLimit) == 1 { - argList = append(argList, pathLimitUnparsed{ - Path: "/", - Limit: pathOrLimit[0], - }) - break - } - - if len(pathOrLimit) == 2 { - argList = append(argList, pathLimitUnparsed{ - Path: pathOrLimit[0], - Limit: pathOrLimit[1], - }) - break - } - - fallthrough - default: - return nil, c.ArgErr() - } - } - case 1: - // Format: limits - headerLimit = args[0] - argList = []pathLimitUnparsed{{ - Path: "/", - Limit: args[0], - }} - default: - return nil, c.ArgErr() - } - - if headerLimit != "" { - size := parseSize(headerLimit) - if size < 1 { // also disallow size = 0 - return nil, c.ArgErr() - } - config.Limits.MaxRequestHeaderSize = size - } - - if len(argList) > 0 { - pathLimit, err := parseArguments(argList) - if err != nil { - return nil, c.ArgErr() - } - SortPathLimits(pathLimit) - config.Limits.MaxRequestBodySizes = pathLimit - } - - return config.Limits.MaxRequestBodySizes, nil -} - -func parseArguments(args []pathLimitUnparsed) ([]httpserver.PathLimit, error) { - pathLimit := []httpserver.PathLimit{} - - for _, pair := range args { - size := parseSize(pair.Limit) - if size < 1 { // also disallow size = 0 - return pathLimit, errors.New("Parse failed") - } - pathLimit = addPathLimit(pathLimit, pair.Path, size) - } - return pathLimit, nil -} - -var validUnits = []struct { - symbol string - multiplier int64 -}{ - {"KB", 1024}, - {"MB", 1024 * 1024}, - {"GB", 1024 * 1024 * 1024}, - {"B", 1}, - {"", 1}, // defaulting to "B" -} - -// parseSize parses the given string as size limit -// Size are positive numbers followed by a unit (case insensitive) -// Allowed units: "B" (bytes), "KB" (kilo), "MB" (mega), "GB" (giga) -// If the unit is omitted, "b" is assumed -// Returns the parsed size in bytes, or -1 if cannot parse -func parseSize(sizeStr string) int64 { - sizeStr = strings.ToUpper(sizeStr) - - for _, unit := range validUnits { - if strings.HasSuffix(sizeStr, unit.symbol) { - size, err := strconv.ParseInt(sizeStr[0:len(sizeStr)-len(unit.symbol)], 10, 64) - if err != nil { - return -1 - } - return size * unit.multiplier - } - } - - // Unreachable code - return -1 -} - -// addPathLimit appends the path-to-request body limit mapping to pathLimit -// Slashes are checked and added to path if necessary. Duplicates are ignored. -func addPathLimit(pathLimit []httpserver.PathLimit, path string, limit int64) []httpserver.PathLimit { - // Enforces preceding slash - if path[0] != '/' { - path = "/" + path - } - - // Use the last value if there are duplicates - for i, p := range pathLimit { - if p.Path == path { - pathLimit[i].Limit = limit - return pathLimit - } - } - - return append(pathLimit, httpserver.PathLimit{Path: path, Limit: limit}) -} - -// SortPathLimits sort pathLimits by their paths length, longest first -func SortPathLimits(pathLimits []httpserver.PathLimit) { - sorter := &pathLimitSorter{ - pathLimits: pathLimits, - by: LengthDescending, - } - sort.Sort(sorter) -} - -// structs and methods implementing the sorting interfaces for PathLimit -type pathLimitSorter struct { - pathLimits []httpserver.PathLimit - by func(p1, p2 *httpserver.PathLimit) bool -} - -func (s *pathLimitSorter) Len() int { - return len(s.pathLimits) -} - -func (s *pathLimitSorter) Swap(i, j int) { - s.pathLimits[i], s.pathLimits[j] = s.pathLimits[j], s.pathLimits[i] -} - -func (s *pathLimitSorter) Less(i, j int) bool { - return s.by(&s.pathLimits[i], &s.pathLimits[j]) -} - -// LengthDescending is the comparator for SortPathLimits -func LengthDescending(p1, p2 *httpserver.PathLimit) bool { - return len(p1.Path) > len(p2.Path) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/log/log.go b/vendor/github.com/mholt/caddy/caddyhttp/log/log.go deleted file mode 100644 index 251f091ca..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/log/log.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package log implements request (access) logging middleware. -package log - -import ( - "fmt" - "net" - "net/http" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("log", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// Logger is a basic request logging middleware. -type Logger struct { - Next httpserver.Handler - Rules []*Rule - ErrorFunc func(http.ResponseWriter, *http.Request, int) // failover error handler -} - -func (l Logger) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - for _, rule := range l.Rules { - if httpserver.Path(r.URL.Path).Matches(rule.PathScope) { - // Record the response - responseRecorder := httpserver.NewResponseRecorder(w) - - // Attach the Replacer we'll use so that other middlewares can - // set their own placeholders if they want to. - rep := httpserver.NewReplacer(r, responseRecorder, CommonLogEmptyValue) - responseRecorder.Replacer = rep - - // Bon voyage, request! - status, err := l.Next.ServeHTTP(responseRecorder, r) - - if status >= 400 { - // There was an error up the chain, but no response has been written yet. - // The error must be handled here so the log entry will record the response size. - if l.ErrorFunc != nil { - l.ErrorFunc(responseRecorder, r, status) - } else { - // Default failover error handler - responseRecorder.WriteHeader(status) - fmt.Fprintf(responseRecorder, "%d %s", status, http.StatusText(status)) - } - status = 0 - } - - // Write log entries - for _, e := range rule.Entries { - // Check if there is an exception to prevent log being written - if !e.Log.ShouldLog(r.URL.Path) { - continue - } - - // Mask IP Address - if e.Log.IPMaskExists { - hostip, _, err := net.SplitHostPort(r.RemoteAddr) - if err == nil { - maskedIP := e.Log.MaskIP(hostip) - // Overwrite log value with Masked version - rep.Set("remote", maskedIP) - } - } - e.Log.Println(rep.Replace(e.Format)) - - } - - return status, err - } - } - return l.Next.ServeHTTP(w, r) -} - -// Entry represents a log entry under a path scope -type Entry struct { - Format string - Log *httpserver.Logger -} - -// Rule configures the logging middleware. -type Rule struct { - PathScope string - Entries []*Entry -} - -const ( - // DefaultLogFilename is the default log filename. - DefaultLogFilename = "access.log" - // CommonLogFormat is the common log format. - CommonLogFormat = `{remote} ` + CommonLogEmptyValue + ` {user} [{when}] "{method} {uri} {proto}" {status} {size}` - // CommonLogEmptyValue is the common empty log value. - CommonLogEmptyValue = "-" - // CombinedLogFormat is the combined log format. - CombinedLogFormat = CommonLogFormat + ` "{>Referer}" "{>User-Agent}"` - // DefaultLogFormat is the default log format. - DefaultLogFormat = CommonLogFormat -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/log/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/log/setup.go deleted file mode 100644 index 0c889a9f7..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/log/setup.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package log - -import ( - "net" - "strings" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// setup sets up the logging middleware. -func setup(c *caddy.Controller) error { - rules, err := logParse(c) - if err != nil { - return err - } - - for _, rule := range rules { - for _, entry := range rule.Entries { - entry.Log.Attach(c) - } - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Logger{Next: next, Rules: rules, ErrorFunc: httpserver.DefaultErrorFunc} - }) - - return nil -} - -func logParse(c *caddy.Controller) ([]*Rule, error) { - var rules []*Rule - var logExceptions []string - for c.Next() { - args := c.RemainingArgs() - - ip4Mask := net.IPMask(net.ParseIP(DefaultIP4Mask).To4()) - ip6Mask := net.IPMask(net.ParseIP(DefaultIP6Mask)) - ipMaskExists := false - - var logRoller *httpserver.LogRoller - logRoller = httpserver.DefaultLogRoller() - - for c.NextBlock() { - what := c.Val() - where := c.RemainingArgs() - - if what == "ipmask" { - - if len(where) == 0 { - return nil, c.ArgErr() - } - - if where[0] != "" { - ip4MaskStr := where[0] - ipv4 := net.ParseIP(ip4MaskStr).To4() - - if ipv4 == nil { - return nil, c.Err("IPv4 Mask not valid IP Mask Format") - } else { - ip4Mask = net.IPMask(ipv4) - ipMaskExists = true - } - } - - if len(where) > 1 { - - ip6MaskStr := where[1] - ipv6 := net.ParseIP(ip6MaskStr) - - if ipv6 == nil { - return nil, c.Err("IPv6 Mask not valid IP Mask Format") - } else { - ip6Mask = net.IPMask(ipv6) - ipMaskExists = true - } - - } - - } else if what == "except" { - - for i := 0; i < len(where); i++ { - logExceptions = append(logExceptions, where[i]) - } - - } else if httpserver.IsLogRollerSubdirective(what) { - - if err := httpserver.ParseRoller(logRoller, what, where...); err != nil { - return nil, err - } - - } else { - return nil, c.ArgErr() - } - - } - - path := "/" - format := DefaultLogFormat - output := DefaultLogFilename - - switch len(args) { - case 0: - // nothing to change - case 1: - // Only an output file specified - output = args[0] - case 2, 3: - // Path scope, output file, and maybe a format specified - path = args[0] - output = args[1] - if len(args) > 2 { - format = strings.Replace(args[2], "{common}", CommonLogFormat, -1) - format = strings.Replace(format, "{combined}", CombinedLogFormat, -1) - } - default: - // Maximum number of args in log directive is 3. - return nil, c.ArgErr() - } - - rules = appendEntry(rules, path, &Entry{ - Log: &httpserver.Logger{ - Output: output, - Roller: logRoller, - V4ipMask: ip4Mask, - V6ipMask: ip6Mask, - IPMaskExists: ipMaskExists, - Exceptions: logExceptions, - }, - Format: format, - }) - } - - return rules, nil -} - -func appendEntry(rules []*Rule, pathScope string, entry *Entry) []*Rule { - for _, rule := range rules { - if rule.PathScope == pathScope { - rule.Entries = append(rule.Entries, entry) - return rules - } - } - - rules = append(rules, &Rule{ - PathScope: pathScope, - Entries: []*Entry{entry}, - }) - - return rules -} - -const ( - // IP Masks that have no effect on IP Address - DefaultIP4Mask = "255.255.255.255" - - DefaultIP6Mask = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/markdown.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/markdown.go deleted file mode 100644 index 1bbb36b49..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/markdown.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package markdown is middleware to render markdown files as HTML -// on-the-fly. -package markdown - -import ( - "log" - "net/http" - "os" - "path" - "strconv" - "strings" - "text/template" - "time" - - "github.com/mholt/caddy/caddyhttp/httpserver" - "github.com/russross/blackfriday" -) - -// Markdown implements a layer of middleware that serves -// markdown as HTML. -type Markdown struct { - // Server root - Root string - - // Jail the requests to site root with a mock file system - FileSys http.FileSystem - - // Next HTTP handler in the chain - Next httpserver.Handler - - // The list of markdown configurations - Configs []*Config -} - -// Config stores markdown middleware configurations. -type Config struct { - // Markdown renderer - Renderer blackfriday.Renderer - - // Base path to match - PathScope string - - // List of extensions to consider as markdown files - Extensions map[string]struct{} - - // List of style sheets to load for each markdown file - Styles []string - - // List of JavaScript files to load for each markdown file - Scripts []string - - // The list of index files to try - IndexFiles []string - - // Template(s) to render with - Template *template.Template - - // a pair of template's name and its underlying file information - TemplateFiles map[string]*cachedFileInfo -} - -type cachedFileInfo struct { - path string - fi os.FileInfo -} - -// ServeHTTP implements the http.Handler interface. -func (md Markdown) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - var cfg *Config - for _, c := range md.Configs { - if httpserver.Path(r.URL.Path).Matches(c.PathScope) { // not negated - cfg = c - break // or goto - } - } - if cfg == nil { - return md.Next.ServeHTTP(w, r) // exit early - } - - // We only deal with HEAD/GET - switch r.Method { - case http.MethodGet, http.MethodHead: - default: - return http.StatusMethodNotAllowed, nil - } - - var dirents []os.FileInfo - var lastModTime time.Time - fpath := r.URL.Path - if idx, ok := httpserver.IndexFile(md.FileSys, fpath, cfg.IndexFiles); ok { - // We're serving a directory index file, which may be a markdown - // file with a template. Let's grab a list of files this directory - // URL points to, and pass that in to any possible template invocations, - // so that templates can customize the look and feel of a directory. - fdp, err := md.FileSys.Open(fpath) - switch { - case err == nil: // nop - case os.IsPermission(err): - return http.StatusForbidden, err - case os.IsExist(err): - return http.StatusNotFound, nil - default: // did we run out of FD? - return http.StatusInternalServerError, err - } - defer fdp.Close() - - // Grab a possible set of directory entries. Note, we do not check - // for errors here (unreadable directory, for example). It may - // still be useful to have a directory template file, without the - // directory contents being present. Note, the directory's last - // modification is also present here (entry "."). - dirents, _ = fdp.Readdir(-1) - for _, d := range dirents { - lastModTime = latest(lastModTime, d.ModTime()) - } - - // Set path to found index file - fpath = idx - } - - // If not supported extension, pass on it - if _, ok := cfg.Extensions[path.Ext(fpath)]; !ok { - return md.Next.ServeHTTP(w, r) - } - - // At this point we have a supported extension/markdown - f, err := md.FileSys.Open(fpath) - switch { - case err == nil: // nop - case os.IsPermission(err): - return http.StatusForbidden, err - case os.IsNotExist(err): - return http.StatusNotFound, nil - default: // did we run out of FD? - return http.StatusInternalServerError, err - } - defer f.Close() - - fs, err := f.Stat() - if err != nil { - return http.StatusGone, nil - } - lastModTime = latest(lastModTime, fs.ModTime()) - - ctx := httpserver.NewContextWithHeader(w.Header()) - ctx.Root = md.FileSys - ctx.Req = r - ctx.URL = r.URL - html, err := cfg.Markdown(title(fpath), f, dirents, ctx) - if err != nil { - return http.StatusInternalServerError, err - } - - w.Header().Set("Content-Type", "text/html; charset=utf-8") - w.Header().Set("Content-Length", strconv.Itoa(len(html))) - httpserver.SetLastModifiedHeader(w, lastModTime) - if r.Method == http.MethodGet { - if _, err := w.Write(html); err != nil { - log.Println("[ERROR] failed to write html response: ", err) - } - } - return http.StatusOK, nil -} - -// latest returns the latest time.Time -func latest(t ...time.Time) time.Time { - var last time.Time - - for _, tt := range t { - if tt.After(last) { - last = tt - } - } - - return last -} - -// title gives a backup generated title for a page -func title(p string) string { - return strings.TrimSuffix(path.Base(p), path.Ext(p)) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata.go deleted file mode 100644 index 22826bf15..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "bufio" - "bytes" - "time" -) - -var ( - // Date format YYYY-MM-DD HH:MM:SS or YYYY-MM-DD - timeLayout = []string{ - `2006-01-02 15:04:05-0700`, - `2006-01-02 15:04:05`, - `2006-01-02`, - } -) - -// Metadata stores a page's metadata -type Metadata struct { - // Page title - Title string - - // Page template - Template string - - // Publish date - Date time.Time - - // Variables to be used with Template - Variables map[string]interface{} -} - -// NewMetadata returns a new Metadata struct, loaded with the given map -func NewMetadata(parsedMap map[string]interface{}) Metadata { - md := Metadata{ - Variables: make(map[string]interface{}), - } - md.load(parsedMap) - - return md -} - -// load loads parsed values in parsedMap into Metadata -func (m *Metadata) load(parsedMap map[string]interface{}) { - - // Pull top level things out - if title, ok := parsedMap["title"]; ok { - m.Title, _ = title.(string) - } - if template, ok := parsedMap["template"]; ok { - m.Template, _ = template.(string) - } - if date, ok := parsedMap["date"].(string); ok { - for _, layout := range timeLayout { - if t, err := time.Parse(layout, date); err == nil { - m.Date = t - break - } - } - } - - m.Variables = parsedMap -} - -// Parser is a an interface that must be satisfied by each parser -type Parser interface { - // Initialize a parser - Init(b *bytes.Buffer) bool - - // Type of metadata - Type() string - - // Parsed metadata. - Metadata() Metadata - - // Raw markdown. - Markdown() []byte -} - -// GetParser returns a parser for the given data -func GetParser(buf []byte) Parser { - for _, p := range parsers() { - b := bytes.NewBuffer(buf) - if p.Init(b) { - return p - } - } - - return nil -} - -// parsers returns all available parsers -func parsers() []Parser { - return []Parser{ - &TOMLParser{}, - &YAMLParser{}, - &JSONParser{}, - - // This one must be last - &NoneParser{}, - } -} - -// Split out prefixed/suffixed metadata with given delimiter -func splitBuffer(b *bytes.Buffer, delim string) (*bytes.Buffer, *bytes.Buffer) { - scanner := bufio.NewScanner(b) - - // Read and check first line - if !scanner.Scan() { - return nil, nil - } - if string(bytes.TrimSpace(scanner.Bytes())) != delim { - return nil, nil - } - - // Accumulate metadata, until delimiter - meta := bytes.NewBuffer(nil) - for scanner.Scan() { - if string(bytes.TrimSpace(scanner.Bytes())) == delim { - break - } - if _, err := meta.Write(scanner.Bytes()); err != nil { - return nil, nil - } - if _, err := meta.WriteRune('\n'); err != nil { - return nil, nil - } - } - // Make sure we saw closing delimiter - if string(bytes.TrimSpace(scanner.Bytes())) != delim { - return nil, nil - } - - // The rest is markdown - markdown := new(bytes.Buffer) - for scanner.Scan() { - if _, err := markdown.Write(scanner.Bytes()); err != nil { - return nil, nil - } - if _, err := markdown.WriteRune('\n'); err != nil { - return nil, nil - } - } - - return meta, markdown -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_json.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_json.go deleted file mode 100644 index 285e70e1a..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_json.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "bytes" - "encoding/json" -) - -// JSONParser is the MetadataParser for JSON -type JSONParser struct { - metadata Metadata - markdown *bytes.Buffer -} - -// Type returns the kind of metadata parser implemented by this struct. -func (j *JSONParser) Type() string { - return "JSON" -} - -// Init prepares the metadata metadata/markdown file and parses it -func (j *JSONParser) Init(b *bytes.Buffer) bool { - m := make(map[string]interface{}) - - err := json.Unmarshal(b.Bytes(), &m) - if err != nil { - var offset int - - jerr, ok := err.(*json.SyntaxError) - if !ok { - return false - } - - offset = int(jerr.Offset) - - m = make(map[string]interface{}) - err = json.Unmarshal(b.Next(offset-1), &m) - if err != nil { - return false - } - } - - j.metadata = NewMetadata(m) - j.markdown = bytes.NewBuffer(b.Bytes()) - - return true -} - -// Metadata returns parsed metadata. It should be called -// only after a call to Parse returns without error. -func (j *JSONParser) Metadata() Metadata { - return j.metadata -} - -// Markdown returns the markdown text. It should be called only after a call to Parse returns without error. -func (j *JSONParser) Markdown() []byte { - return j.markdown.Bytes() -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_none.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_none.go deleted file mode 100644 index e25e15dd0..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_none.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "bytes" -) - -// NoneParser is the parser for plaintext markdown with no metadata. -type NoneParser struct { - metadata Metadata - markdown *bytes.Buffer -} - -// Type returns the kind of parser this struct is. -func (n *NoneParser) Type() string { - return "None" -} - -// Init preparses and parses the metadata and markdown file -func (n *NoneParser) Init(b *bytes.Buffer) bool { - m := make(map[string]interface{}) - n.metadata = NewMetadata(m) - n.markdown = bytes.NewBuffer(b.Bytes()) - - return true -} - -// Parse the metadata -func (n *NoneParser) Parse(b []byte) ([]byte, error) { - return nil, nil -} - -// Metadata returns parsed metadata. It should be called -// only after a call to Parse returns without error. -func (n *NoneParser) Metadata() Metadata { - return n.metadata -} - -// Markdown returns parsed markdown. It should be called -// only after a call to Parse returns without error. -func (n *NoneParser) Markdown() []byte { - return n.markdown.Bytes() -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_toml.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_toml.go deleted file mode 100644 index 03298e653..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_toml.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "bytes" - - "github.com/naoina/toml" -) - -// TOMLParser is the Parser for TOML -type TOMLParser struct { - metadata Metadata - markdown *bytes.Buffer -} - -// Type returns the kind of parser this struct is. -func (t *TOMLParser) Type() string { - return "TOML" -} - -// Init prepares and parses the metadata and markdown file itself -func (t *TOMLParser) Init(b *bytes.Buffer) bool { - meta, data := splitBuffer(b, "+++") - if meta == nil || data == nil { - return false - } - t.markdown = data - - m := make(map[string]interface{}) - if err := toml.Unmarshal(meta.Bytes(), &m); err != nil { - return false - } - t.metadata = NewMetadata(m) - - return true -} - -// Metadata returns parsed metadata. It should be called -// only after a call to Parse returns without error. -func (t *TOMLParser) Metadata() Metadata { - return t.metadata -} - -// Markdown returns parser markdown. It should be called only after a call to Parse returns without error. -func (t *TOMLParser) Markdown() []byte { - return t.markdown.Bytes() -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_yaml.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_yaml.go deleted file mode 100644 index ddd90b600..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/metadata/metadata_yaml.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metadata - -import ( - "bytes" - - "gopkg.in/yaml.v2" -) - -// YAMLParser is the Parser for YAML -type YAMLParser struct { - metadata Metadata - markdown *bytes.Buffer -} - -// Type returns the kind of metadata parser. -func (y *YAMLParser) Type() string { - return "YAML" -} - -// Init prepares the metadata parser for parsing. -func (y *YAMLParser) Init(b *bytes.Buffer) bool { - meta, data := splitBuffer(b, "---") - if meta == nil || data == nil { - return false - } - y.markdown = data - - m := make(map[string]interface{}) - if err := yaml.Unmarshal(meta.Bytes(), &m); err != nil { - return false - } - y.metadata = NewMetadata(m) - - return true -} - -// Metadata returns parsed metadata. It should be called -// only after a call to Parse returns without error. -func (y *YAMLParser) Metadata() Metadata { - return y.metadata -} - -// Markdown renders the text as a byte array -func (y *YAMLParser) Markdown() []byte { - return y.markdown.Bytes() -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/process.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/process.go deleted file mode 100644 index e0594d991..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/process.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package markdown - -import ( - "io" - "io/ioutil" - "os" - - "github.com/mholt/caddy/caddyhttp/httpserver" - "github.com/mholt/caddy/caddyhttp/markdown/metadata" - "github.com/mholt/caddy/caddyhttp/markdown/summary" - "github.com/russross/blackfriday" -) - -// FileInfo represents a file in a particular server context. It wraps the os.FileInfo struct. -type FileInfo struct { - os.FileInfo - ctx httpserver.Context -} - -var recognizedMetaTags = []string{ - "author", - "copyright", - "description", - "subject", -} - -// Summarize returns an abbreviated string representation of the markdown stored in this file. -// wordcount is the number of words returned in the summary. -func (f FileInfo) Summarize(wordcount int) (string, error) { - fp, err := f.ctx.Root.Open(f.Name()) - if err != nil { - return "", err - } - defer fp.Close() - - buf, err := ioutil.ReadAll(fp) - if err != nil { - return "", err - } - - return string(summary.Markdown(buf, wordcount)), nil -} - -// Markdown processes the contents of a page in b. It parses the metadata -// (if any) and uses the template (if found). -func (c *Config) Markdown(title string, r io.Reader, dirents []os.FileInfo, ctx httpserver.Context) ([]byte, error) { - body, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - parser := metadata.GetParser(body) - markdown := parser.Markdown() - mdata := parser.Metadata() - - // process markdown - extns := 0 - extns |= blackfriday.EXTENSION_TABLES - extns |= blackfriday.EXTENSION_FENCED_CODE - extns |= blackfriday.EXTENSION_STRIKETHROUGH - extns |= blackfriday.EXTENSION_DEFINITION_LISTS - html := blackfriday.Markdown(markdown, c.Renderer, extns) - - // set it as body for template - mdata.Variables["body"] = string(html) - - // fixup title - mdata.Variables["title"] = mdata.Title - if mdata.Variables["title"] == "" { - mdata.Variables["title"] = title - } - - // move available and valid front matters to the meta values - meta := make(map[string]string) - for _, val := range recognizedMetaTags { - if mVal, ok := mdata.Variables[val]; ok { - meta[val] = mVal.(string) - } - } - - // massage possible files - files := []FileInfo{} - for _, ent := range dirents { - file := FileInfo{ - FileInfo: ent, - ctx: ctx, - } - files = append(files, file) - } - - return execTemplate(c, mdata, meta, files, ctx) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/setup.go deleted file mode 100644 index 45ca71330..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/setup.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package markdown - -import ( - "net/http" - "path/filepath" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" - "github.com/russross/blackfriday" -) - -func init() { - caddy.RegisterPlugin("markdown", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Markdown middleware instance. -func setup(c *caddy.Controller) error { - mdconfigs, err := markdownParse(c) - if err != nil { - return err - } - - cfg := httpserver.GetConfig(c) - - md := Markdown{ - Root: cfg.Root, - FileSys: http.Dir(cfg.Root), - Configs: mdconfigs, - } - - cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - md.Next = next - return md - }) - - return nil -} - -func markdownParse(c *caddy.Controller) ([]*Config, error) { - var mdconfigs []*Config - - for c.Next() { - md := &Config{ - Renderer: blackfriday.HtmlRenderer(0, "", ""), - Extensions: make(map[string]struct{}), - Template: GetDefaultTemplate(), - IndexFiles: []string{}, - TemplateFiles: make(map[string]*cachedFileInfo), - } - - // Get the path scope - args := c.RemainingArgs() - switch len(args) { - case 0: - md.PathScope = "/" - case 1: - md.PathScope = args[0] - default: - return mdconfigs, c.ArgErr() - } - - // Load any other configuration parameters - for c.NextBlock() { - if err := loadParams(c, md); err != nil { - return mdconfigs, err - } - } - - // If no extensions were specified, assume some defaults - if len(md.Extensions) == 0 { - md.Extensions[".md"] = struct{}{} - md.Extensions[".markdown"] = struct{}{} - md.Extensions[".mdown"] = struct{}{} - } - - // Make a list of index files to match extensions - for ext := range md.Extensions { - md.IndexFiles = append(md.IndexFiles, "index"+ext) - } - mdconfigs = append(mdconfigs, md) - } - - return mdconfigs, nil -} - -func loadParams(c *caddy.Controller, mdc *Config) error { - cfg := httpserver.GetConfig(c) - - switch c.Val() { - case "ext": - for _, ext := range c.RemainingArgs() { - mdc.Extensions[ext] = struct{}{} - } - return nil - case "css": - if !c.NextArg() { - return c.ArgErr() - } - mdc.Styles = append(mdc.Styles, c.Val()) - return nil - case "js": - if !c.NextArg() { - return c.ArgErr() - } - mdc.Scripts = append(mdc.Scripts, c.Val()) - return nil - case "template": - tArgs := c.RemainingArgs() - switch len(tArgs) { - default: - return c.ArgErr() - case 1: - fpath := filepath.ToSlash(filepath.Clean(cfg.Root + string(filepath.Separator) + tArgs[0])) - - if err := SetTemplate(mdc.Template, "", fpath); err != nil { - return c.Errf("default template parse error: %v", err) - } - - mdc.TemplateFiles[""] = &cachedFileInfo{ - path: fpath, - } - return nil - case 2: - fpath := filepath.ToSlash(filepath.Clean(cfg.Root + string(filepath.Separator) + tArgs[1])) - - if err := SetTemplate(mdc.Template, tArgs[0], fpath); err != nil { - return c.Errf("template parse error: %v", err) - } - - mdc.TemplateFiles[tArgs[0]] = &cachedFileInfo{ - path: fpath, - } - return nil - } - case "templatedir": - if !c.NextArg() { - return c.ArgErr() - } - - pattern := c.Val() - _, err := mdc.Template.ParseGlob(pattern) - if err != nil { - return c.Errf("template load error: %v", err) - } - if c.NextArg() { - return c.ArgErr() - } - - paths, err := filepath.Glob(pattern) - if err != nil { - return c.Errf("glob %q failed: %v", pattern, err) - } - for _, path := range paths { - mdc.TemplateFiles[filepath.Base(path)] = &cachedFileInfo{ - path: path, - } - } - return nil - default: - return c.Err("Expected valid markdown configuration property") - } -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/render.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/render.go deleted file mode 100644 index 95e375105..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/render.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package summary - -import ( - "bytes" - - "github.com/russross/blackfriday" -) - -// Ensure we implement the Blackfriday Markdown Renderer interface -var _ blackfriday.Renderer = (*renderer)(nil) - -// renderer renders Markdown to plain-text meant for listings and excerpts, -// and implements the blackfriday.Renderer interface. -// -// Many of the methods are stubs with no output to prevent output of HTML markup. -type renderer struct{} - -// Blocklevel callbacks - -// BlockCode is the code tag callback. -func (r renderer) BlockCode(out *bytes.Buffer, text []byte, land string) {} - -// BlockQuote is the quote tag callback. -func (r renderer) BlockQuote(out *bytes.Buffer, text []byte) {} - -// BlockHtml is the HTML tag callback. -func (r renderer) BlockHtml(out *bytes.Buffer, text []byte) {} - -// Header is the header tag callback. -func (r renderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {} - -// HRule is the horizontal rule tag callback. -func (r renderer) HRule(out *bytes.Buffer) {} - -// List is the list tag callback. -func (r renderer) List(out *bytes.Buffer, text func() bool, flags int) { - // TODO: This is not desired (we'd rather not write lists as part of summary), - // but see this issue: https://github.com/russross/blackfriday/issues/189 - marker := out.Len() - if !text() { - out.Truncate(marker) - } - out.Write([]byte{' '}) -} - -// ListItem is the list item tag callback. -func (r renderer) ListItem(out *bytes.Buffer, text []byte, flags int) {} - -// Paragraph is the paragraph tag callback. This renders simple paragraph text -// into plain text, such that summaries can be easily generated. -func (r renderer) Paragraph(out *bytes.Buffer, text func() bool) { - marker := out.Len() - if !text() { - out.Truncate(marker) - } - out.Write([]byte{' '}) -} - -// Table is the table tag callback. -func (r renderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {} - -// TableRow is the table row tag callback. -func (r renderer) TableRow(out *bytes.Buffer, text []byte) {} - -// TableHeaderCell is the table header cell tag callback. -func (r renderer) TableHeaderCell(out *bytes.Buffer, text []byte, flags int) {} - -// TableCell is the table cell tag callback. -func (r renderer) TableCell(out *bytes.Buffer, text []byte, flags int) {} - -// Footnotes is the foot notes tag callback. -func (r renderer) Footnotes(out *bytes.Buffer, text func() bool) {} - -// FootnoteItem is the footnote item tag callback. -func (r renderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {} - -// TitleBlock is the title tag callback. -func (r renderer) TitleBlock(out *bytes.Buffer, text []byte) {} - -// Spanlevel callbacks - -// AutoLink is the autolink tag callback. -func (r renderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {} - -// CodeSpan is the code span tag callback. Outputs a simple Markdown version -// of the code span. -func (r renderer) CodeSpan(out *bytes.Buffer, text []byte) { - out.Write([]byte("`")) - out.Write(text) - out.Write([]byte("`")) -} - -// DoubleEmphasis is the double emphasis tag callback. Outputs a simple -// plain-text version of the input. -func (r renderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -// Emphasis is the emphasis tag callback. Outputs a simple plain-text -// version of the input. -func (r renderer) Emphasis(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -// Image is the image tag callback. -func (r renderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {} - -// LineBreak is the line break tag callback. -func (r renderer) LineBreak(out *bytes.Buffer) {} - -// Link is the link tag callback. Outputs a simple plain-text version -// of the input. -func (r renderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { - out.Write(content) -} - -// RawHtmlTag is the raw HTML tag callback. -func (r renderer) RawHtmlTag(out *bytes.Buffer, tag []byte) {} - -// TripleEmphasis is the triple emphasis tag callback. Outputs a simple plain-text -// version of the input. -func (r renderer) TripleEmphasis(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -// StrikeThrough is the strikethrough tag callback. -func (r renderer) StrikeThrough(out *bytes.Buffer, text []byte) {} - -// FootnoteRef is the footnote ref tag callback. -func (r renderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {} - -// Lowlevel callbacks - -// Entity callback. Outputs a simple plain-text version of the input. -func (r renderer) Entity(out *bytes.Buffer, entity []byte) { - out.Write(entity) -} - -// NormalText callback. Outputs a simple plain-text version of the input. -func (r renderer) NormalText(out *bytes.Buffer, text []byte) { - out.Write(text) -} - -// Header and footer - -// DocumentHeader callback. -func (r renderer) DocumentHeader(out *bytes.Buffer) {} - -// DocumentFooter callback. -func (r renderer) DocumentFooter(out *bytes.Buffer) {} - -// GetFlags returns zero. -func (r renderer) GetFlags() int { return 0 } diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/summary.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/summary.go deleted file mode 100644 index 18d1db3b0..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/summary/summary.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package summary - -import ( - "bytes" - - "github.com/russross/blackfriday" -) - -// Markdown formats input using a plain-text renderer, and -// then returns up to the first `wordcount` words as a summary. -func Markdown(input []byte, wordcount int) []byte { - words := bytes.Fields(blackfriday.Markdown(input, renderer{}, 0)) - if wordcount > len(words) { - wordcount = len(words) - } - return bytes.Join(words[0:wordcount], []byte{' '}) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/markdown/template.go b/vendor/github.com/mholt/caddy/caddyhttp/markdown/template.go deleted file mode 100644 index ff8ad9b96..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/markdown/template.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package markdown - -import ( - "bytes" - "io/ioutil" - "os" - "sync" - "text/template" - - "github.com/mholt/caddy/caddyhttp/httpserver" - "github.com/mholt/caddy/caddyhttp/markdown/metadata" -) - -// Data represents a markdown document. -type Data struct { - httpserver.Context - Doc map[string]interface{} - Styles []string - Scripts []string - Meta map[string]string - Files []FileInfo -} - -// Include "overrides" the embedded httpserver.Context's Include() -// method so that included files have access to d's fields. -// Note: using {{template 'template-name' .}} instead might be better. -func (d Data) Include(filename string, args ...interface{}) (string, error) { - d.Args = args - return httpserver.ContextInclude(filename, d, d.Root) -} - -var templateUpdateMu sync.RWMutex - -// execTemplate executes a template given a requestPath, template, and metadata -func execTemplate(c *Config, mdata metadata.Metadata, meta map[string]string, files []FileInfo, ctx httpserver.Context) ([]byte, error) { - mdData := Data{ - Context: ctx, - Doc: mdata.Variables, - Styles: c.Styles, - Scripts: c.Scripts, - Meta: meta, - Files: files, - } - templateName := mdata.Template - - updateTemplate := func() error { - templateUpdateMu.Lock() - defer templateUpdateMu.Unlock() - - templateFile, ok := c.TemplateFiles[templateName] - if !ok { - return nil - } - - currentFileInfo, err := os.Lstat(templateFile.path) - if err != nil { - return err - } - - if !fileChanged(currentFileInfo, templateFile.fi) { - return nil - } - - // update template due to file changes - err = SetTemplate(c.Template, templateName, templateFile.path) - if err != nil { - return err - } - - templateFile.fi = currentFileInfo - return nil - } - - if err := updateTemplate(); err != nil { - return nil, err - } - - b := new(bytes.Buffer) - templateUpdateMu.RLock() - defer templateUpdateMu.RUnlock() - if err := c.Template.ExecuteTemplate(b, templateName, mdData); err != nil { - return nil, err - } - - return b.Bytes(), nil -} - -func fileChanged(new, old os.FileInfo) bool { - // never checked before - if old == nil { - return true - } - - if new.Size() != old.Size() || - new.Mode() != old.Mode() || - new.ModTime() != old.ModTime() { - return true - } - - return false -} - -// SetTemplate reads in the template with the filename provided. If the file does not exist or is not parsable, it will return an error. -func SetTemplate(t *template.Template, name, filename string) error { - - // Read template - buf, err := ioutil.ReadFile(filename) - if err != nil { - return err - } - - // Update if exists - if tt := t.Lookup(name); tt != nil { - _, err = tt.Parse(string(buf)) - return err - } - - // Allocate new name if not - _, err = t.New(name).Parse(string(buf)) - return err -} - -// GetDefaultTemplate returns the default template. -func GetDefaultTemplate() *template.Template { - return template.Must(template.New("").Parse(defaultTemplate)) -} - -const ( - defaultTemplate = ` - - - {{.Doc.title}} - - {{range $key, $val := .Meta}} - - {{end}} - {{- range .Styles}} - - {{- end}} - {{- range .Scripts}} - - {{- end}} - - - {{.Doc.body}} - -` -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/mime/mime.go b/vendor/github.com/mholt/caddy/caddyhttp/mime/mime.go deleted file mode 100644 index 740a0ab05..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/mime/mime.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mime - -import ( - "net/http" - "path" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Config represent a mime config. Map from extension to mime-type. -// Note, this should be safe with concurrent read access, as this is -// not modified concurrently. -type Config map[string]string - -// Mime sets Content-Type header of requests based on configurations. -type Mime struct { - Next httpserver.Handler - Configs Config -} - -// ServeHTTP implements the httpserver.Handler interface. -func (e Mime) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - // Get a clean /-path, grab the extension - ext := path.Ext(path.Clean(r.URL.Path)) - - if contentType, ok := e.Configs[ext]; ok { - w.Header().Set("Content-Type", contentType) - } - - return e.Next.ServeHTTP(w, r) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/mime/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/mime/setup.go deleted file mode 100644 index 7c9aee5f1..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/mime/setup.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mime - -import ( - "fmt" - "strings" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("mime", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new mime middleware instance. -func setup(c *caddy.Controller) error { - configs, err := mimeParse(c) - if err != nil { - return err - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Mime{Next: next, Configs: configs} - }) - - return nil -} - -func mimeParse(c *caddy.Controller) (Config, error) { - configs := Config{} - - for c.Next() { - // At least one extension is required - - args := c.RemainingArgs() - switch len(args) { - case 2: - if err := validateExt(configs, args[0]); err != nil { - return configs, err - } - configs[args[0]] = args[1] - case 1: - return configs, c.ArgErr() - case 0: - for c.NextBlock() { - ext := c.Val() - if err := validateExt(configs, ext); err != nil { - return configs, err - } - if !c.NextArg() { - return configs, c.ArgErr() - } - configs[ext] = c.Val() - } - } - - } - - return configs, nil -} - -// validateExt checks for valid file name extension. -func validateExt(configs Config, ext string) error { - if !strings.HasPrefix(ext, ".") { - return fmt.Errorf(`mime: invalid extension "%v" (must start with dot)`, ext) - } - if _, ok := configs[ext]; ok { - return fmt.Errorf(`mime: duplicate extension "%v" found`, ext) - } - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/pprof/pprof.go b/vendor/github.com/mholt/caddy/caddyhttp/pprof/pprof.go deleted file mode 100644 index 36938bae4..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/pprof/pprof.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pprof - -import ( - "net/http" - pp "net/http/pprof" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// BasePath is the base path to match for all pprof requests. -const BasePath = "/debug/pprof" - -// Handler is a simple struct whose ServeHTTP will delegate pprof -// endpoints to their equivalent net/http/pprof handlers. -type Handler struct { - Next httpserver.Handler - Mux *http.ServeMux -} - -// ServeHTTP handles requests to BasePath with pprof, or passes -// all other requests up the chain. -func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - if httpserver.Path(r.URL.Path).Matches(BasePath) { - h.Mux.ServeHTTP(w, r) - return 0, nil - } - return h.Next.ServeHTTP(w, r) -} - -// NewMux returns a new http.ServeMux that routes pprof requests. -// It pretty much copies what the std lib pprof does on init: -// https://golang.org/src/net/http/pprof/pprof.go#L67 -func NewMux() *http.ServeMux { - mux := http.NewServeMux() - mux.HandleFunc(BasePath+"/", func(w http.ResponseWriter, r *http.Request) { - // this endpoint, as implemented in the standard library, doesn't set - // its Content-Type header, so using this can confuse clients, especially - // if gzipping... - w.Header().Set("Content-Type", "text/html; charset=utf-8") - pp.Index(w, r) - }) - mux.HandleFunc(BasePath+"/cmdline", pp.Cmdline) - mux.HandleFunc(BasePath+"/profile", pp.Profile) - mux.HandleFunc(BasePath+"/symbol", pp.Symbol) - mux.HandleFunc(BasePath+"/trace", pp.Trace) - return mux -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/pprof/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/pprof/setup.go deleted file mode 100644 index c829020e7..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/pprof/setup.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pprof - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("pprof", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup returns a new instance of a pprof handler. It accepts no arguments or options. -func setup(c *caddy.Controller) error { - found := false - - for c.Next() { - if found { - return c.Err("pprof can only be specified once") - } - if len(c.RemainingArgs()) != 0 { - return c.ArgErr() - } - if c.NextBlock() { - return c.ArgErr() - } - found = true - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return &Handler{Next: next, Mux: NewMux()} - }) - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/proxy/body.go b/vendor/github.com/mholt/caddy/caddyhttp/proxy/body.go deleted file mode 100644 index 45ab6a906..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/proxy/body.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package proxy - -import ( - "bytes" - "io" - "io/ioutil" -) - -type bufferedBody struct { - *bytes.Reader -} - -func (*bufferedBody) Close() error { - return nil -} - -// rewind allows bufferedBody to be read again. -func (b *bufferedBody) rewind() error { - if b == nil { - return nil - } - _, err := b.Seek(0, io.SeekStart) - return err -} - -// newBufferedBody returns *bufferedBody to use in place of src. Closes src -// and returns Read error on src. All content from src is buffered. -func newBufferedBody(src io.ReadCloser) (*bufferedBody, error) { - if src == nil { - return nil, nil - } - b, err := ioutil.ReadAll(src) - src.Close() - if err != nil { - return nil, err - } - return &bufferedBody{ - Reader: bytes.NewReader(b), - }, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/proxy/policy.go b/vendor/github.com/mholt/caddy/caddyhttp/proxy/policy.go deleted file mode 100644 index 0f8fab44f..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/proxy/policy.go +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package proxy - -import ( - "hash/fnv" - "log" - "math" - "math/rand" - "net" - "net/http" - "sync" -) - -// HostPool is a collection of UpstreamHosts. -type HostPool []*UpstreamHost - -// Policy decides how a host will be selected from a pool. -type Policy interface { - Select(pool HostPool, r *http.Request) *UpstreamHost -} - -func init() { - RegisterPolicy("random", func(arg string) Policy { return &Random{} }) - RegisterPolicy("least_conn", func(arg string) Policy { return &LeastConn{} }) - RegisterPolicy("round_robin", func(arg string) Policy { return &RoundRobin{} }) - RegisterPolicy("ip_hash", func(arg string) Policy { return &IPHash{} }) - RegisterPolicy("first", func(arg string) Policy { return &First{} }) - RegisterPolicy("uri_hash", func(arg string) Policy { return &URIHash{} }) - RegisterPolicy("header", func(arg string) Policy { return &Header{arg} }) -} - -// Random is a policy that selects up hosts from a pool at random. -type Random struct{} - -// Select selects an up host at random from the specified pool. -func (r *Random) Select(pool HostPool, request *http.Request) *UpstreamHost { - - // Because the number of available hosts isn't known - // up front, the host is selected via reservoir sampling - // https://en.wikipedia.org/wiki/Reservoir_sampling - var randHost *UpstreamHost - count := 0 - for _, host := range pool { - if !host.Available() { - continue - } - - // (n % 1 == 0) holds for all n, therefore randHost - // will always get assigned a value if there is - // at least 1 available host - count++ - if (rand.Int() % count) == 0 { - randHost = host - } - } - return randHost -} - -// LeastConn is a policy that selects the host with the least connections. -type LeastConn struct{} - -// Select selects the up host with the least number of connections in the -// pool. If more than one host has the same least number of connections, -// one of the hosts is chosen at random. -func (r *LeastConn) Select(pool HostPool, request *http.Request) *UpstreamHost { - var bestHost *UpstreamHost - count := 0 - leastConn := int64(math.MaxInt64) - for _, host := range pool { - if !host.Available() { - continue - } - - if host.Conns < leastConn { - leastConn = host.Conns - count = 0 - } - - // Among hosts with same least connections, perform a reservoir - // sample: https://en.wikipedia.org/wiki/Reservoir_sampling - if host.Conns == leastConn { - count++ - if (rand.Int() % count) == 0 { - bestHost = host - } - } - } - return bestHost -} - -// RoundRobin is a policy that selects hosts based on round-robin ordering. -type RoundRobin struct { - robin uint32 - mutex sync.Mutex -} - -// Select selects an up host from the pool using a round-robin ordering scheme. -func (r *RoundRobin) Select(pool HostPool, request *http.Request) *UpstreamHost { - poolLen := uint32(len(pool)) - r.mutex.Lock() - defer r.mutex.Unlock() - // Return next available host - for i := uint32(0); i < poolLen; i++ { - r.robin++ - host := pool[r.robin%poolLen] - if host.Available() { - return host - } - } - return nil -} - -// hostByHashing returns an available host from pool based on a hashable string -func hostByHashing(pool HostPool, s string) *UpstreamHost { - poolLen := uint32(len(pool)) - index := hash(s) % poolLen - for i := uint32(0); i < poolLen; i++ { - index += i - host := pool[index%poolLen] - if host.Available() { - return host - } - } - return nil -} - -// hash calculates a hash based on string s -func hash(s string) uint32 { - h := fnv.New32a() - if _, err := h.Write([]byte(s)); err != nil { - log.Println("[ERROR] failed to write bytes: ", err) - } - return h.Sum32() -} - -// IPHash is a policy that selects hosts based on hashing the request IP -type IPHash struct{} - -// Select selects an up host from the pool based on hashing the request IP -func (r *IPHash) Select(pool HostPool, request *http.Request) *UpstreamHost { - clientIP, _, err := net.SplitHostPort(request.RemoteAddr) - if err != nil { - clientIP = request.RemoteAddr - } - return hostByHashing(pool, clientIP) -} - -// URIHash is a policy that selects the host based on hashing the request URI -type URIHash struct{} - -// Select selects the host based on hashing the URI -func (r *URIHash) Select(pool HostPool, request *http.Request) *UpstreamHost { - return hostByHashing(pool, request.RequestURI) -} - -// First is a policy that selects the first available host -type First struct{} - -// Select selects the first available host from the pool -func (r *First) Select(pool HostPool, request *http.Request) *UpstreamHost { - for _, host := range pool { - if host.Available() { - return host - } - } - return nil -} - -// Header is a policy that selects based on a hash of the given header -type Header struct { - // The name of the request header, the value of which will determine - // how the request is routed - Name string -} - -var roundRobinPolicier RoundRobin - -// Select selects the host based on hashing the header value -func (r *Header) Select(pool HostPool, request *http.Request) *UpstreamHost { - if r.Name == "" { - return nil - } - val := request.Header.Get(r.Name) - if val == "" { - // fallback to RoundRobin policy in case no Header in request - return roundRobinPolicier.Select(pool, request) - } - return hostByHashing(pool, val) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/proxy/proxy.go b/vendor/github.com/mholt/caddy/caddyhttp/proxy/proxy.go deleted file mode 100644 index fe10ea614..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/proxy/proxy.go +++ /dev/null @@ -1,420 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package proxy is middleware that proxies HTTP requests. -package proxy - -import ( - "context" - "errors" - "net" - "net/http" - "net/url" - "strings" - "sync/atomic" - "time" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Proxy represents a middleware instance that can proxy requests. -type Proxy struct { - Next httpserver.Handler - Upstreams []Upstream -} - -// Upstream manages a pool of proxy upstream hosts. -type Upstream interface { - // The path this upstream host should be routed on - From() string - - // Selects an upstream host to be routed to. It - // should return a suitable upstream host, or nil - // if no such hosts are available. - Select(*http.Request) *UpstreamHost - - // Checks if subpath is not an ignored path - AllowedPath(string) bool - - // Gets the duration of the headstart the first - // connection is given in the Go standard library's - // implementation of "Happy Eyeballs" when DualStack - // is enabled in net.Dialer. - GetFallbackDelay() time.Duration - - // Gets how long to try selecting upstream hosts - // in the case of cascading failures. - GetTryDuration() time.Duration - - // Gets how long to wait between selecting upstream - // hosts in the case of cascading failures. - GetTryInterval() time.Duration - - // Gets the number of upstream hosts. - GetHostCount() int - - // Gets how long to wait before timing out - // the request - GetTimeout() time.Duration - - // Stops the upstream from proxying requests to shutdown goroutines cleanly. - Stop() error -} - -// UpstreamHostDownFunc can be used to customize how Down behaves. -type UpstreamHostDownFunc func(*UpstreamHost) bool - -// UpstreamHost represents a single proxy upstream -type UpstreamHost struct { - // This field is read & written to concurrently, so all access must use - // atomic operations. - Conns int64 // must be first field to be 64-bit aligned on 32-bit systems - MaxConns int64 - Name string // hostname of this upstream host - UpstreamHeaders http.Header - DownstreamHeaders http.Header - FailTimeout time.Duration - CheckDown UpstreamHostDownFunc - WithoutPathPrefix string - ReverseProxy *ReverseProxy - Fails int32 - // This is an int32 so that we can use atomic operations to do concurrent - // reads & writes to this value. The default value of 0 indicates that it - // is healthy and any non-zero value indicates unhealthy. - Unhealthy int32 - HealthCheckResult atomic.Value - UpstreamHeaderReplacements headerReplacements - DownstreamHeaderReplacements headerReplacements -} - -// Down checks whether the upstream host is down or not. -// Down will try to use uh.CheckDown first, and will fall -// back to some default criteria if necessary. -func (uh *UpstreamHost) Down() bool { - if uh.CheckDown == nil { - // Default settings - return atomic.LoadInt32(&uh.Unhealthy) != 0 || atomic.LoadInt32(&uh.Fails) > 0 - } - return uh.CheckDown(uh) -} - -// Full checks whether the upstream host has reached its maximum connections -func (uh *UpstreamHost) Full() bool { - return uh.MaxConns > 0 && atomic.LoadInt64(&uh.Conns) >= uh.MaxConns -} - -// Available checks whether the upstream host is available for proxying to -func (uh *UpstreamHost) Available() bool { - return !uh.Down() && !uh.Full() -} - -// ServeHTTP satisfies the httpserver.Handler interface. -func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - // start by selecting most specific matching upstream config - upstream := p.match(r) - if upstream == nil { - return p.Next.ServeHTTP(w, r) - } - - // this replacer is used to fill in header field values - replacer := httpserver.NewReplacer(r, nil, "") - - // outreq is the request that makes a roundtrip to the backend - outreq, cancel := createUpstreamRequest(w, r) - defer cancel() - - // If we have more than one upstream host defined and if retrying is enabled - // by setting try_duration to a non-zero value, caddy will try to - // retry the request at a different host if the first one failed. - // - // This requires us to possibly rewind and replay the request body though, - // which in turn requires us to buffer the request body first. - // - // An unbuffered request is usually preferrable, because it reduces latency - // as well as memory usage. Furthermore it enables different kinds of - // HTTP streaming applications like gRPC for instance. - requiresBuffering := upstream.GetHostCount() > 1 && upstream.GetTryDuration() != 0 - - if requiresBuffering { - body, err := newBufferedBody(outreq.Body) - if err != nil { - return http.StatusBadRequest, errors.New("failed to read downstream request body") - } - if body != nil { - outreq.Body = body - } - } - - // The keepRetrying function will return true if we should - // loop and try to select another host, or false if we - // should break and stop retrying. - start := time.Now() - keepRetrying := func(backendErr error) bool { - // if downstream has canceled the request, break - if backendErr == context.Canceled { - return false - } - // if we've tried long enough, break - if time.Since(start) >= upstream.GetTryDuration() { - return false - } - // otherwise, wait and try the next available host - time.Sleep(upstream.GetTryInterval()) - return true - } - - var backendErr error - for { - // since Select() should give us "up" hosts, keep retrying - // hosts until timeout (or until we get a nil host). - host := upstream.Select(r) - if host == nil { - if backendErr == nil { - backendErr = errors.New("no hosts available upstream") - } - if !keepRetrying(backendErr) { - break - } - continue - } - if rr, ok := w.(*httpserver.ResponseRecorder); ok && rr.Replacer != nil { - rr.Replacer.Set("upstream", host.Name) - } - - proxy := host.ReverseProxy - - // a backend's name may contain more than just the host, - // so we parse it as a URL to try to isolate the host. - if nameURL, err := url.Parse(host.Name); err == nil { - outreq.Host = nameURL.Host - if proxy == nil { - proxy = NewSingleHostReverseProxy(nameURL, - host.WithoutPathPrefix, - http.DefaultMaxIdleConnsPerHost, - upstream.GetTimeout(), - upstream.GetFallbackDelay(), - ) - } - - // use upstream credentials by default - if outreq.Header.Get("Authorization") == "" && nameURL.User != nil { - pwd, _ := nameURL.User.Password() - outreq.SetBasicAuth(nameURL.User.Username(), pwd) - } - } else { - outreq.Host = host.Name - } - if proxy == nil { - return http.StatusInternalServerError, errors.New("proxy for host '" + host.Name + "' is nil") - } - - // set headers for request going upstream - if host.UpstreamHeaders != nil { - // modify headers for request that will be sent to the upstream host - mutateHeadersByRules(outreq.Header, host.UpstreamHeaders, replacer, host.UpstreamHeaderReplacements) - if hostHeaders, ok := outreq.Header["Host"]; ok && len(hostHeaders) > 0 { - outreq.Host = hostHeaders[len(hostHeaders)-1] - } - } - - // prepare a function that will update response - // headers coming back downstream - var downHeaderUpdateFn respUpdateFn - if host.DownstreamHeaders != nil { - downHeaderUpdateFn = createRespHeaderUpdateFn(host.DownstreamHeaders, replacer, host.DownstreamHeaderReplacements) - } - - // Before we retry the request we have to make sure - // that the body is rewound to it's beginning. - if bb, ok := outreq.Body.(*bufferedBody); ok { - if err := bb.rewind(); err != nil { - return http.StatusInternalServerError, errors.New("unable to rewind downstream request body") - } - } - - // tell the proxy to serve the request - // - // NOTE: - // The call to proxy.ServeHTTP can theoretically panic. - // To prevent host.Conns from getting out-of-sync we thus have to - // make sure that it's _always_ correctly decremented afterwards. - func() { - atomic.AddInt64(&host.Conns, 1) - defer atomic.AddInt64(&host.Conns, -1) - backendErr = proxy.ServeHTTP(w, outreq, downHeaderUpdateFn) - }() - - // if no errors, we're done here - if backendErr == nil { - return 0, nil - } - - if backendErr == httpserver.ErrMaxBytesExceeded { - return http.StatusRequestEntityTooLarge, backendErr - } - - if backendErr == context.Canceled { - return CustomStatusContextCancelled, backendErr - } - - // failover; remember this failure for some time if - // request failure counting is enabled - timeout := host.FailTimeout - if timeout > 0 { - atomic.AddInt32(&host.Fails, 1) - go func(host *UpstreamHost, timeout time.Duration) { - time.Sleep(timeout) - atomic.AddInt32(&host.Fails, -1) - }(host, timeout) - } - - // if we've tried long enough, break - if !keepRetrying(backendErr) { - break - } - } - - return http.StatusBadGateway, backendErr -} - -// match finds the best match for a proxy config based on r. -func (p Proxy) match(r *http.Request) Upstream { - var u Upstream - var longestMatch int - for _, upstream := range p.Upstreams { - basePath := upstream.From() - if !httpserver.Path(r.URL.Path).Matches(basePath) || !upstream.AllowedPath(r.URL.Path) { - continue - } - if len(basePath) > longestMatch { - longestMatch = len(basePath) - u = upstream - } - } - return u -} - -// createUpstreamRequest shallow-copies r into a new request -// that can be sent upstream. -// -// Derived from reverseproxy.go in the standard Go httputil package. -func createUpstreamRequest(rw http.ResponseWriter, r *http.Request) (*http.Request, context.CancelFunc) { - // Original incoming server request may be canceled by the - // user or by std lib(e.g. too many idle connections). - ctx, cancel := context.WithCancel(r.Context()) - if cn, ok := rw.(http.CloseNotifier); ok { - notifyChan := cn.CloseNotify() - go func() { - select { - case <-notifyChan: - cancel() - case <-ctx.Done(): - } - }() - } - - outreq := r.WithContext(ctx) // includes shallow copies of maps, but okay - - // We should set body to nil explicitly if request body is empty. - // For server requests the Request Body is always non-nil. - if r.ContentLength == 0 { - outreq.Body = nil - } - - // We are modifying the same underlying map from req (shallow - // copied above) so we only copy it if necessary. - copiedHeaders := false - - // Remove hop-by-hop headers listed in the "Connection" header. - // See RFC 2616, section 14.10. - if c := outreq.Header.Get("Connection"); c != "" { - for _, f := range strings.Split(c, ",") { - if f = strings.TrimSpace(f); f != "" { - if !copiedHeaders { - outreq.Header = make(http.Header) - copyHeader(outreq.Header, r.Header) - copiedHeaders = true - } - outreq.Header.Del(f) - } - } - } - - // Remove hop-by-hop headers to the backend. Especially - // important is "Connection" because we want a persistent - // connection, regardless of what the client sent to us. - for _, h := range hopHeaders { - if outreq.Header.Get(h) != "" { - if !copiedHeaders { - outreq.Header = make(http.Header) - copyHeader(outreq.Header, r.Header) - copiedHeaders = true - } - outreq.Header.Del(h) - } - } - - if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil { - // If we aren't the first proxy, retain prior - // X-Forwarded-For information as a comma+space - // separated list and fold multiple headers into one. - if prior, ok := outreq.Header["X-Forwarded-For"]; ok { - clientIP = strings.Join(prior, ", ") + ", " + clientIP - } - outreq.Header.Set("X-Forwarded-For", clientIP) - } - - return outreq, cancel -} - -func createRespHeaderUpdateFn(rules http.Header, replacer httpserver.Replacer, replacements headerReplacements) respUpdateFn { - return func(resp *http.Response) { - mutateHeadersByRules(resp.Header, rules, replacer, replacements) - } -} - -func mutateHeadersByRules(headers, rules http.Header, repl httpserver.Replacer, replacements headerReplacements) { - for ruleField, ruleValues := range rules { - if strings.HasPrefix(ruleField, "+") { - for _, ruleValue := range ruleValues { - replacement := repl.Replace(ruleValue) - if len(replacement) > 0 { - headers.Add(strings.TrimPrefix(ruleField, "+"), replacement) - } - } - } else if strings.HasPrefix(ruleField, "-") { - headers.Del(strings.TrimPrefix(ruleField, "-")) - } else if len(ruleValues) > 0 { - replacement := repl.Replace(ruleValues[len(ruleValues)-1]) - if len(replacement) > 0 { - headers.Set(ruleField, replacement) - } - } - } - - for ruleField, ruleValues := range replacements { - for _, ruleValue := range ruleValues { - // Replace variables in replacement string - replacement := repl.Replace(ruleValue.to) - original := headers.Get(ruleField) - if len(replacement) > 0 && len(original) > 0 { - // Replace matches in original string with replacement string - replaced := ruleValue.regexp.ReplaceAllString(original, replacement) - headers.Set(ruleField, replaced) - } - } - } -} - -const CustomStatusContextCancelled = 499 diff --git a/vendor/github.com/mholt/caddy/caddyhttp/proxy/reverseproxy.go b/vendor/github.com/mholt/caddy/caddyhttp/proxy/reverseproxy.go deleted file mode 100644 index c20be0c1f..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/proxy/reverseproxy.go +++ /dev/null @@ -1,776 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file is adapted from code in the net/http/httputil -// package of the Go standard library, which is by the -// Go Authors, and bears this copyright and license info: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// This file has been modified from the standard lib to -// meet the needs of the application. - -package proxy - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "log" - "net" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "golang.org/x/net/http2" - - "github.com/lucas-clemente/quic-go" - "github.com/lucas-clemente/quic-go/h2quic" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -var ( - defaultDialer = &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - bufferPool = sync.Pool{New: createBuffer} - - defaultCryptoHandshakeTimeout = 10 * time.Second -) - -func createBuffer() interface{} { - return make([]byte, 0, 32*1024) -} - -func pooledIoCopy(dst io.Writer, src io.Reader) { - buf := bufferPool.Get().([]byte) - defer bufferPool.Put(buf) - - // CopyBuffer only uses buf up to its length and panics if it's 0. - // Due to that we extend buf's length to its capacity here and - // ensure it's always non-zero. - bufCap := cap(buf) - if _, err := io.CopyBuffer(dst, src, buf[0:bufCap:bufCap]); err != nil { - log.Println("[ERROR] failed to copy buffer: ", err) - } -} - -// onExitFlushLoop is a callback set by tests to detect the state of the -// flushLoop() goroutine. -var onExitFlushLoop func() - -// ReverseProxy is an HTTP Handler that takes an incoming request and -// sends it to another server, proxying the response back to the -// client. -type ReverseProxy struct { - // Director must be a function which modifies - // the request into a new request to be sent - // using Transport. Its response is then copied - // back to the original client unmodified. - Director func(*http.Request) - - // The transport used to perform proxy requests. - Transport http.RoundTripper - - // FlushInterval specifies the flush interval - // to flush to the client while copying the - // response body. - // If zero, no periodic flushing is done. - FlushInterval time.Duration - - // dialer is used when values from the - // defaultDialer need to be overridden per Proxy - dialer *net.Dialer - - srvResolver srvResolver -} - -// Though the relevant directive prefix is just "unix:", url.Parse -// will - assuming the regular URL scheme - add additional slashes -// as if "unix" was a request protocol. -// What we need is just the path, so if "unix:/var/run/www.socket" -// was the proxy directive, the parsed hostName would be -// "unix:///var/run/www.socket", hence the ambiguous trimming. -func socketDial(hostName string, timeout time.Duration) func(network, addr string) (conn net.Conn, err error) { - return func(network, addr string) (conn net.Conn, err error) { - return net.DialTimeout("unix", hostName[len("unix://"):], timeout) - } -} - -func (rp *ReverseProxy) srvDialerFunc(locator string, timeout time.Duration) func(network, addr string) (conn net.Conn, err error) { - service := locator - if strings.HasPrefix(locator, "srv://") { - service = locator[6:] - } else if strings.HasPrefix(locator, "srv+https://") { - service = locator[12:] - } - - return func(network, addr string) (conn net.Conn, err error) { - _, addrs, err := rp.srvResolver.LookupSRV(context.Background(), "", "", service) - if err != nil { - return nil, err - } - return net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port), timeout) - } -} - -func singleJoiningSlash(a, b string) string { - aSlash := strings.HasSuffix(a, "/") - bSlash := strings.HasPrefix(b, "/") - switch { - case aSlash && bSlash: - return a + b[1:] - case !aSlash && !bSlash && b != "": - return a + "/" + b - } - return a + b -} - -// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites -// URLs to the scheme, host, and base path provided in target. If the -// target's path is "/base" and the incoming request was for "/dir", -// the target request will be for /base/dir. -// Without logic: target's path is "/", incoming is "/api/messages", -// without is "/api", then the target request will be for /messages. -func NewSingleHostReverseProxy(target *url.URL, without string, keepalive int, timeout, fallbackDelay time.Duration) *ReverseProxy { - targetQuery := target.RawQuery - director := func(req *http.Request) { - if target.Scheme == "unix" { - // to make Dial work with unix URL, - // scheme and host have to be faked - req.URL.Scheme = "http" - req.URL.Host = "socket" - } else if target.Scheme == "srv" { - req.URL.Scheme = "http" - req.URL.Host = target.Host - } else if target.Scheme == "srv+https" { - req.URL.Scheme = "https" - req.URL.Host = target.Host - } else { - req.URL.Scheme = target.Scheme - req.URL.Host = target.Host - } - - // remove the `without` prefix - if without != "" { - req.URL.Path = strings.TrimPrefix(req.URL.Path, without) - if req.URL.Opaque != "" { - req.URL.Opaque = strings.TrimPrefix(req.URL.Opaque, without) - } - if req.URL.RawPath != "" { - req.URL.RawPath = strings.TrimPrefix(req.URL.RawPath, without) - } - } - - // prefer returns val if it isn't empty, otherwise def - prefer := func(val, def string) string { - if val != "" { - return val - } - return def - } - - // Make up the final URL by concatenating the request and target URL. - // - // If there is encoded part in request or target URL, - // the final URL should also be in encoded format. - // Here, we concatenate their encoded parts which are stored - // in URL.Opaque and URL.RawPath, if it is empty use - // URL.Path instead. - if req.URL.Opaque != "" || target.Opaque != "" { - req.URL.Opaque = singleJoiningSlash( - prefer(target.Opaque, target.Path), - prefer(req.URL.Opaque, req.URL.Path)) - } - if req.URL.RawPath != "" || target.RawPath != "" { - req.URL.RawPath = singleJoiningSlash( - prefer(target.RawPath, target.Path), - prefer(req.URL.RawPath, req.URL.Path)) - } - req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) - - // Trims the path of the socket from the URL path. - // This is done because req.URL passed to your proxied service - // will have the full path of the socket file prefixed to it. - // Calling /test on a server that proxies requests to - // unix:/var/run/www.socket will thus set the requested path - // to /var/run/www.socket/test, rendering paths useless. - if target.Scheme == "unix" { - // See comment on socketDial for the trim - socketPrefix := target.String()[len("unix://"):] - req.URL.Path = strings.TrimPrefix(req.URL.Path, socketPrefix) - if req.URL.Opaque != "" { - req.URL.Opaque = strings.TrimPrefix(req.URL.Opaque, socketPrefix) - } - if req.URL.RawPath != "" { - req.URL.RawPath = strings.TrimPrefix(req.URL.RawPath, socketPrefix) - } - } - - if targetQuery == "" || req.URL.RawQuery == "" { - req.URL.RawQuery = targetQuery + req.URL.RawQuery - } else { - req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery - } - } - - dialer := *defaultDialer - if timeout != defaultDialer.Timeout { - dialer.Timeout = timeout - } - if fallbackDelay != defaultDialer.FallbackDelay { - dialer.FallbackDelay = fallbackDelay - } - - rp := &ReverseProxy{ - Director: director, - FlushInterval: 250 * time.Millisecond, // flushing good for streaming & server-sent events - srvResolver: net.DefaultResolver, - dialer: &dialer, - } - - if target.Scheme == "unix" { - rp.Transport = &http.Transport{ - Dial: socketDial(target.String(), timeout), - } - } else if target.Scheme == "quic" { - rp.Transport = &h2quic.RoundTripper{ - QuicConfig: &quic.Config{ - HandshakeTimeout: defaultCryptoHandshakeTimeout, - KeepAlive: true, - }, - } - } else if keepalive != http.DefaultMaxIdleConnsPerHost || strings.HasPrefix(target.Scheme, "srv") { - dialFunc := rp.dialer.Dial - if strings.HasPrefix(target.Scheme, "srv") { - dialFunc = rp.srvDialerFunc(target.String(), timeout) - } - - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: dialFunc, - TLSHandshakeTimeout: defaultCryptoHandshakeTimeout, - ExpectContinueTimeout: 1 * time.Second, - } - if keepalive == 0 { - transport.DisableKeepAlives = true - } else { - transport.MaxIdleConnsPerHost = keepalive - } - if httpserver.HTTP2 { - if err := http2.ConfigureTransport(transport); err != nil { - log.Println("[ERROR] failed to configure transport to use HTTP/2: ", err) - } - } - rp.Transport = transport - } else { - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: rp.dialer.Dial, - } - if httpserver.HTTP2 { - if err := http2.ConfigureTransport(transport); err != nil { - log.Println("[ERROR] failed to configure transport to use HTTP/2: ", err) - } - } - rp.Transport = transport - } - return rp -} - -// UseInsecureTransport is used to facilitate HTTPS proxying -// when it is OK for upstream to be using a bad certificate, -// since this transport skips verification. -func (rp *ReverseProxy) UseInsecureTransport() { - if transport, ok := rp.Transport.(*http.Transport); ok { - if transport.TLSClientConfig == nil { - transport.TLSClientConfig = &tls.Config{} - } - transport.TLSClientConfig.InsecureSkipVerify = true - // No http2.ConfigureTransport() here. - // For now this is only added in places where - // an http.Transport is actually created. - } else if transport, ok := rp.Transport.(*h2quic.RoundTripper); ok { - if transport.TLSClientConfig == nil { - transport.TLSClientConfig = &tls.Config{} - } - transport.TLSClientConfig.InsecureSkipVerify = true - } -} - -// UseOwnCertificate is used to facilitate HTTPS proxying -// with locally provided certificate. -func (rp *ReverseProxy) UseOwnCACertificates(CaCertPool *x509.CertPool) { - if transport, ok := rp.Transport.(*http.Transport); ok { - if transport.TLSClientConfig == nil { - transport.TLSClientConfig = &tls.Config{} - } - transport.TLSClientConfig.RootCAs = CaCertPool - // No http2.ConfigureTransport() here. - // For now this is only added in places where - // an http.Transport is actually created. - } else if transport, ok := rp.Transport.(*h2quic.RoundTripper); ok { - if transport.TLSClientConfig == nil { - transport.TLSClientConfig = &tls.Config{} - } - transport.TLSClientConfig.RootCAs = CaCertPool - } -} - -// ServeHTTP serves the proxied request to the upstream by performing a roundtrip. -// It is designed to handle websocket connection upgrades as well. -func (rp *ReverseProxy) ServeHTTP(rw http.ResponseWriter, outreq *http.Request, respUpdateFn respUpdateFn) error { - transport := rp.Transport - if requestIsWebsocket(outreq) { - transport = newConnHijackerTransport(transport) - } - - rp.Director(outreq) - - if outreq.URL.Scheme == "quic" { - outreq.URL.Scheme = "https" // Change scheme back to https for QUIC RoundTripper - } - - res, err := transport.RoundTrip(outreq) - if err != nil { - return err - } - - isWebsocket := res.StatusCode == http.StatusSwitchingProtocols && strings.EqualFold(res.Header.Get("Upgrade"), "websocket") - - // Remove hop-by-hop headers listed in the - // "Connection" header of the response. - if c := res.Header.Get("Connection"); c != "" { - for _, f := range strings.Split(c, ",") { - if f = strings.TrimSpace(f); f != "" { - res.Header.Del(f) - } - } - } - - for _, h := range hopHeaders { - res.Header.Del(h) - } - - if respUpdateFn != nil { - respUpdateFn(res) - } - - if isWebsocket { - defer res.Body.Close() - hj, ok := rw.(http.Hijacker) - if !ok { - panic(httpserver.NonHijackerError{Underlying: rw}) - } - - conn, brw, err := hj.Hijack() - if err != nil { - return err - } - defer conn.Close() - - var backendConn net.Conn - if hj, ok := transport.(*connHijackerTransport); ok { - backendConn = hj.Conn - if _, err := conn.Write(hj.Replay); err != nil { - return err - } - bufferPool.Put(hj.Replay) - } else { - backendConn, err = net.DialTimeout("tcp", outreq.URL.Host, rp.dialer.Timeout) - if err != nil { - return err - } - if err := outreq.Write(backendConn); err != nil { - log.Println("[ERROR] failed to write: ", err) - } - } - defer backendConn.Close() - - proxyDone := make(chan struct{}, 2) - - // Proxy backend -> frontend. - go func() { - pooledIoCopy(conn, backendConn) - proxyDone <- struct{}{} - }() - - // Proxy frontend -> backend. - // - // NOTE: Hijack() sometimes returns buffered up bytes in brw which - // would be lost if we didn't read them out manually below. - if brw != nil { - if n := brw.Reader.Buffered(); n > 0 { - rbuf, err := brw.Reader.Peek(n) - if err != nil { - return err - } - if _, err := backendConn.Write(rbuf); err != nil { - log.Println("[ERROR] failed to write data to connection: ", err) - } - } - } - go func() { - pooledIoCopy(backendConn, conn) - proxyDone <- struct{}{} - }() - - // If one side is done, we are done. - <-proxyDone - } else { - // NOTE: - // Closing the Body involves acquiring a mutex, which is a - // unnecessarily heavy operation, considering that this defer will - // pretty much never be executed with the Body still unclosed. - bodyOpen := true - closeBody := func() { - if bodyOpen { - _ = res.Body.Close() - bodyOpen = false - } - } - defer closeBody() - - // Copy all headers over. - // res.Header does not include the "Trailer" header, - // which means we will have to do that manually below. - copyHeader(rw.Header(), res.Header) - - // The "Trailer" header isn't included in res' Header map, which - // is why we have to build one ourselves from res.Trailer. - // - // But res.Trailer does not necessarily contain all trailer keys at this - // point yet. The HTTP spec allows one to send "unannounced trailers" - // after a request and certain systems like gRPC make use of that. - announcedTrailerKeyCount := len(res.Trailer) - if announcedTrailerKeyCount > 0 { - vv := make([]string, 0, announcedTrailerKeyCount) - for k := range res.Trailer { - vv = append(vv, k) - } - rw.Header()["Trailer"] = vv - } - - // Now copy over the status code as well as the response body. - rw.WriteHeader(res.StatusCode) - if announcedTrailerKeyCount > 0 { - // Force chunking if we saw a response trailer. - // This prevents net/http from calculating the length - // for short bodies and adding a Content-Length. - if fl, ok := rw.(http.Flusher); ok { - fl.Flush() - } - } - rp.copyResponse(rw, res.Body) - - // Now close the body to fully populate res.Trailer. - closeBody() - - // Since Go does not remove keys from res.Trailer we - // can safely do a length comparison to check whether - // we received further, unannounced trailers. - // - // Most of the time forceSetTrailers should be false. - forceSetTrailers := len(res.Trailer) != announcedTrailerKeyCount - shallowCopyTrailers(rw.Header(), res.Trailer, forceSetTrailers) - } - - return nil -} - -func (rp *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) { - if rp.FlushInterval != 0 { - if wf, ok := dst.(writeFlusher); ok { - mlw := &maxLatencyWriter{ - dst: wf, - latency: rp.FlushInterval, - done: make(chan bool), - } - go mlw.flushLoop() - defer mlw.stop() - dst = mlw - } - } - pooledIoCopy(dst, src) -} - -// skip these headers if they already exist. -// see https://github.com/mholt/caddy/pull/1112#discussion_r80092582 -var skipHeaders = map[string]struct{}{ - "Content-Type": {}, - "Content-Disposition": {}, - "Accept-Ranges": {}, - "Set-Cookie": {}, - "Cache-Control": {}, - "Expires": {}, -} - -func copyHeader(dst, src http.Header) { - for k, vv := range src { - if _, ok := dst[k]; ok { - // skip some predefined headers - // see https://github.com/mholt/caddy/issues/1086 - if _, shouldSkip := skipHeaders[k]; shouldSkip { - continue - } - // otherwise, overwrite to avoid duplicated fields that can be - // problematic (see issue #1086) -- however, allow duplicate - // Server fields so we can see the reality of the proxying. - if k != "Server" { - dst.Del(k) - } - } - for _, v := range vv { - dst.Add(k, v) - } - } -} - -// shallowCopyTrailers copies all headers from srcTrailer to dstHeader. -// -// If forceSetTrailers is set to true, the http.TrailerPrefix will be added to -// all srcTrailer key names. Otherwise the Go stdlib will ignore all keys -// which weren't listed in the Trailer map before submitting the Response. -// -// WARNING: Only a shallow copy will be created! -func shallowCopyTrailers(dstHeader, srcTrailer http.Header, forceSetTrailers bool) { - for k, vv := range srcTrailer { - if forceSetTrailers { - k = http.TrailerPrefix + k - } - dstHeader[k] = vv - } -} - -// Hop-by-hop headers. These are removed when sent to the backend. -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html -var hopHeaders = []string{ - "Alt-Svc", - "Alternate-Protocol", - "Connection", - "Keep-Alive", - "Proxy-Authenticate", - "Proxy-Authorization", - "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google - "Te", // canonicalized version of "TE" - "Trailer", // not Trailers per URL above; http://www.rfc-editor.org/errata_search.php?eid=4522 - "Transfer-Encoding", - "Upgrade", -} - -type respUpdateFn func(resp *http.Response) - -type hijackedConn struct { - net.Conn - hj *connHijackerTransport -} - -func (c *hijackedConn) Read(b []byte) (n int, err error) { - n, err = c.Conn.Read(b) - c.hj.Replay = append(c.hj.Replay, b[:n]...) - return -} - -func (c *hijackedConn) Close() error { - return nil -} - -type connHijackerTransport struct { - *http.Transport - Conn net.Conn - Replay []byte -} - -func newConnHijackerTransport(base http.RoundTripper) *connHijackerTransport { - t := &http.Transport{ - MaxIdleConnsPerHost: -1, - } - if b, _ := base.(*http.Transport); b != nil { - tlsClientConfig := b.TLSClientConfig - if tlsClientConfig != nil && tlsClientConfig.NextProtos != nil { - tlsClientConfig = tlsClientConfig.Clone() - tlsClientConfig.NextProtos = nil - } - - t.Proxy = b.Proxy - t.TLSClientConfig = tlsClientConfig - t.TLSHandshakeTimeout = b.TLSHandshakeTimeout - t.Dial = b.Dial - t.DialTLS = b.DialTLS - } else { - t.Proxy = http.ProxyFromEnvironment - t.TLSHandshakeTimeout = 10 * time.Second - } - hj := &connHijackerTransport{t, nil, bufferPool.Get().([]byte)[:0]} - - dial := getTransportDial(t) - dialTLS := getTransportDialTLS(t) - t.Dial = func(network, addr string) (net.Conn, error) { - c, err := dial(network, addr) - hj.Conn = c - return &hijackedConn{c, hj}, err - } - t.DialTLS = func(network, addr string) (net.Conn, error) { - c, err := dialTLS(network, addr) - hj.Conn = c - return &hijackedConn{c, hj}, err - } - - return hj -} - -// getTransportDial always returns a plain Dialer -// and defaults to the existing t.Dial. -func getTransportDial(t *http.Transport) func(network, addr string) (net.Conn, error) { - if t.Dial != nil { - return t.Dial - } - return defaultDialer.Dial -} - -// getTransportDial always returns a TLS Dialer -// and defaults to the existing t.DialTLS. -func getTransportDialTLS(t *http.Transport) func(network, addr string) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - - // newConnHijackerTransport will modify t.Dial after calling this method - // => Create a backup reference. - plainDial := getTransportDial(t) - - // The following DialTLS implementation stems from the Go stdlib and - // is identical to what happens if DialTLS is not provided. - // Source: https://github.com/golang/go/blob/230a376b5a67f0e9341e1fa47e670ff762213c83/src/net/http/transport.go#L1018-L1051 - return func(network, addr string) (net.Conn, error) { - plainConn, err := plainDial(network, addr) - if err != nil { - return nil, err - } - - tlsClientConfig := t.TLSClientConfig - if tlsClientConfig == nil { - tlsClientConfig = &tls.Config{} - } - if !tlsClientConfig.InsecureSkipVerify && tlsClientConfig.ServerName == "" { - tlsClientConfig.ServerName = stripPort(addr) - } - - tlsConn := tls.Client(plainConn, tlsClientConfig) - errc := make(chan error, 2) - var timer *time.Timer - if d := t.TLSHandshakeTimeout; d != 0 { - timer = time.AfterFunc(d, func() { - errc <- tlsHandshakeTimeoutError{} - }) - } - go func() { - err := tlsConn.Handshake() - if timer != nil { - timer.Stop() - } - errc <- err - }() - if err := <-errc; err != nil { - _ = plainConn.Close() - return nil, err - } - if !tlsClientConfig.InsecureSkipVerify { - hostname := tlsClientConfig.ServerName - if hostname == "" { - hostname = stripPort(addr) - } - if err := tlsConn.VerifyHostname(hostname); err != nil { - _ = plainConn.Close() - return nil, err - } - } - - return tlsConn, nil - } -} - -// stripPort returns address without its port if it has one and -// works with IP addresses as well as hostnames formatted as host:port. -// -// IPv6 addresses (excluding the port) must be enclosed in -// square brackets similar to the requirements of Go's stdlib. -func stripPort(address string) string { - // Keep in mind that the address might be a IPv6 address - // and thus contain a colon, but not have a port. - portIdx := strings.LastIndex(address, ":") - ipv6Idx := strings.LastIndex(address, "]") - if portIdx > ipv6Idx { - address = address[:portIdx] - } - return address -} - -type tlsHandshakeTimeoutError struct{} - -func (tlsHandshakeTimeoutError) Timeout() bool { return true } -func (tlsHandshakeTimeoutError) Temporary() bool { return true } -func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" } - -func requestIsWebsocket(req *http.Request) bool { - return strings.EqualFold(req.Header.Get("Upgrade"), "websocket") && strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") -} - -type writeFlusher interface { - io.Writer - http.Flusher -} - -type maxLatencyWriter struct { - dst writeFlusher - latency time.Duration - - lk sync.Mutex // protects Write + Flush - done chan bool -} - -func (m *maxLatencyWriter) Write(p []byte) (int, error) { - m.lk.Lock() - defer m.lk.Unlock() - return m.dst.Write(p) -} - -func (m *maxLatencyWriter) flushLoop() { - t := time.NewTicker(m.latency) - defer t.Stop() - for { - select { - case <-m.done: - if onExitFlushLoop != nil { - onExitFlushLoop() - } - return - case <-t.C: - m.lk.Lock() - m.dst.Flush() - m.lk.Unlock() - } - } -} - -func (m *maxLatencyWriter) stop() { m.done <- true } diff --git a/vendor/github.com/mholt/caddy/caddyhttp/proxy/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/proxy/setup.go deleted file mode 100644 index 8db25b660..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/proxy/setup.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package proxy - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("proxy", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Proxy middleware instance. -func setup(c *caddy.Controller) error { - upstreams, err := NewStaticUpstreams(c.Dispenser, httpserver.GetConfig(c).Host()) - if err != nil { - return err - } - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Proxy{Next: next, Upstreams: upstreams} - }) - - // Register shutdown handlers. - for _, upstream := range upstreams { - c.OnShutdown(upstream.Stop) - } - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/proxy/upstream.go b/vendor/github.com/mholt/caddy/caddyhttp/proxy/upstream.go deleted file mode 100644 index 882a0aeb0..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/proxy/upstream.go +++ /dev/null @@ -1,782 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package proxy - -import ( - "bytes" - "context" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/http" - "net/textproto" - "net/url" - "path" - "regexp" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "crypto/tls" - - "github.com/mholt/caddy/caddyfile" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -var ( - supportedPolicies = make(map[string]func(string) Policy) -) - -type staticUpstream struct { - from string - upstreamHeaders http.Header - downstreamHeaders http.Header - stop chan struct{} // Signals running goroutines to stop. - wg sync.WaitGroup // Used to wait for running goroutines to stop. - Hosts HostPool - Policy Policy - KeepAlive int - FallbackDelay time.Duration - Timeout time.Duration - FailTimeout time.Duration - TryDuration time.Duration - TryInterval time.Duration - MaxConns int64 - HealthCheck struct { - Client http.Client - Path string - Interval time.Duration - Timeout time.Duration - Host string - Port string - ContentString string - } - WithoutPathPrefix string - IgnoredSubPaths []string - insecureSkipVerify bool - MaxFails int32 - resolver srvResolver - CaCertPool *x509.CertPool - upstreamHeaderReplacements headerReplacements - downstreamHeaderReplacements headerReplacements -} - -type srvResolver interface { - LookupSRV(context.Context, string, string, string) (string, []*net.SRV, error) -} - -// headerReplacement stores a compiled regex matcher and a string replacer, for replacement rules -type headerReplacement struct { - regexp *regexp.Regexp - to string -} - -// headerReplacements stores a mapping of canonical MIME header to headerReplacement -// Implements a subset of http.Header functions, to allow convenient addition and deletion of rules -type headerReplacements map[string][]headerReplacement - -func (h headerReplacements) Add(key string, value headerReplacement) { - key = textproto.CanonicalMIMEHeaderKey(key) - h[key] = append(h[key], value) -} - -func (h headerReplacements) Del(key string) { - delete(h, textproto.CanonicalMIMEHeaderKey(key)) -} - -// NewStaticUpstreams parses the configuration input and sets up -// static upstreams for the proxy middleware. The host string parameter, -// if not empty, is used for setting the upstream Host header for the -// health checks if the upstream header config requires it. -func NewStaticUpstreams(c caddyfile.Dispenser, host string) ([]Upstream, error) { - var upstreams []Upstream - for c.Next() { - - upstream := &staticUpstream{ - from: "", - stop: make(chan struct{}), - upstreamHeaders: make(http.Header), - downstreamHeaders: make(http.Header), - Hosts: nil, - Policy: &Random{}, - MaxFails: 1, - TryInterval: 250 * time.Millisecond, - MaxConns: 0, - KeepAlive: http.DefaultMaxIdleConnsPerHost, - Timeout: 30 * time.Second, - resolver: net.DefaultResolver, - upstreamHeaderReplacements: make(headerReplacements), - downstreamHeaderReplacements: make(headerReplacements), - } - - if !c.Args(&upstream.from) { - return upstreams, c.ArgErr() - } - - var to []string - hasSrv := false - - for _, t := range c.RemainingArgs() { - if len(to) > 0 && hasSrv { - return upstreams, c.Err("only one upstream is supported when using SRV locator") - } - - if strings.HasPrefix(t, "srv://") || strings.HasPrefix(t, "srv+https://") { - if len(to) > 0 { - return upstreams, c.Err("service locator upstreams can not be mixed with host names") - } - - hasSrv = true - } - - parsed, err := parseUpstream(t) - if err != nil { - return upstreams, err - } - to = append(to, parsed...) - } - - for c.NextBlock() { - switch c.Val() { - case "upstream": - if !c.NextArg() { - return upstreams, c.ArgErr() - } - - if hasSrv { - return upstreams, c.Err("upstream directive is not supported when backend is service locator") - } - - parsed, err := parseUpstream(c.Val()) - if err != nil { - return upstreams, err - } - to = append(to, parsed...) - default: - if err := parseBlock(&c, upstream, hasSrv); err != nil { - return upstreams, err - } - } - } - - if len(to) == 0 { - return upstreams, c.ArgErr() - } - - upstream.Hosts = make([]*UpstreamHost, len(to)) - for i, host := range to { - uh, err := upstream.NewHost(host) - if err != nil { - return upstreams, err - } - upstream.Hosts[i] = uh - } - - if upstream.HealthCheck.Path != "" { - upstream.HealthCheck.Client = http.Client{ - Timeout: upstream.HealthCheck.Timeout, - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: upstream.insecureSkipVerify}, - }, - } - - // set up health check upstream host if we have one - if host != "" { - hostHeader := upstream.upstreamHeaders.Get("Host") - if strings.Contains(hostHeader, "{host}") { - upstream.HealthCheck.Host = strings.Replace(hostHeader, "{host}", host, -1) - } - } - upstream.wg.Add(1) - go func() { - defer upstream.wg.Done() - upstream.HealthCheckWorker(upstream.stop) - }() - } - upstreams = append(upstreams, upstream) - } - return upstreams, nil -} - -func (u *staticUpstream) From() string { - return u.from -} - -func (u *staticUpstream) NewHost(host string) (*UpstreamHost, error) { - if !strings.HasPrefix(host, "http") && - !strings.HasPrefix(host, "unix:") && - !strings.HasPrefix(host, "quic:") && - !strings.HasPrefix(host, "srv://") && - !strings.HasPrefix(host, "srv+https://") { - host = "http://" + host - } - uh := &UpstreamHost{ - Name: host, - Conns: 0, - Fails: 0, - FailTimeout: u.FailTimeout, - Unhealthy: 0, - UpstreamHeaders: u.upstreamHeaders, - DownstreamHeaders: u.downstreamHeaders, - CheckDown: func(u *staticUpstream) UpstreamHostDownFunc { - return func(uh *UpstreamHost) bool { - if atomic.LoadInt32(&uh.Unhealthy) != 0 { - return true - } - if atomic.LoadInt32(&uh.Fails) >= u.MaxFails { - return true - } - return false - } - }(u), - WithoutPathPrefix: u.WithoutPathPrefix, - MaxConns: u.MaxConns, - HealthCheckResult: atomic.Value{}, - UpstreamHeaderReplacements: u.upstreamHeaderReplacements, - DownstreamHeaderReplacements: u.downstreamHeaderReplacements, - } - - baseURL, err := url.Parse(uh.Name) - if err != nil { - return nil, err - } - - uh.ReverseProxy = NewSingleHostReverseProxy(baseURL, uh.WithoutPathPrefix, u.KeepAlive, u.Timeout, u.FallbackDelay) - if u.insecureSkipVerify { - uh.ReverseProxy.UseInsecureTransport() - } - - if u.CaCertPool != nil { - uh.ReverseProxy.UseOwnCACertificates(u.CaCertPool) - } - - return uh, nil -} - -func parseUpstream(u string) ([]string, error) { - if strings.HasPrefix(u, "unix:") { - return []string{u}, nil - } - - isSrv := strings.HasPrefix(u, "srv://") || strings.HasPrefix(u, "srv+https://") - colonIdx := strings.LastIndex(u, ":") - protoIdx := strings.Index(u, "://") - - if colonIdx == -1 || colonIdx == protoIdx { - return []string{u}, nil - } - - if isSrv { - return nil, fmt.Errorf("service locator %s can not have port specified", u) - } - - us := u[:colonIdx] - ue := "" - portsEnd := len(u) - if nextSlash := strings.Index(u[colonIdx:], "/"); nextSlash != -1 { - portsEnd = colonIdx + nextSlash - ue = u[portsEnd:] - } - - ports := u[len(us)+1 : portsEnd] - separators := strings.Count(ports, "-") - - if separators == 0 { - return []string{u}, nil - } - - if separators > 1 { - return nil, fmt.Errorf("port range [%s] has %d separators", ports, separators) - } - - portsStr := strings.Split(ports, "-") - pIni, err := strconv.Atoi(portsStr[0]) - if err != nil { - return nil, err - } - - pEnd, err := strconv.Atoi(portsStr[1]) - if err != nil { - return nil, err - } - - if pEnd <= pIni { - return nil, fmt.Errorf("port range [%s] is invalid", ports) - } - - hosts := []string{} - for p := pIni; p <= pEnd; p++ { - hosts = append(hosts, fmt.Sprintf("%s:%d%s", us, p, ue)) - } - - return hosts, nil -} - -func parseBlock(c *caddyfile.Dispenser, u *staticUpstream, hasSrv bool) error { - var isUpstream bool - - switch c.Val() { - case "policy": - if !c.NextArg() { - return c.ArgErr() - } - policyCreateFunc, ok := supportedPolicies[c.Val()] - if !ok { - return c.ArgErr() - } - arg := "" - if c.NextArg() { - arg = c.Val() - } - u.Policy = policyCreateFunc(arg) - case "fallback_delay": - if !c.NextArg() { - return c.ArgErr() - } - dur, err := time.ParseDuration(c.Val()) - if err != nil { - return err - } - u.FallbackDelay = dur - case "fail_timeout": - if !c.NextArg() { - return c.ArgErr() - } - dur, err := time.ParseDuration(c.Val()) - if err != nil { - return err - } - u.FailTimeout = dur - case "max_fails": - if !c.NextArg() { - return c.ArgErr() - } - n, err := strconv.Atoi(c.Val()) - if err != nil { - return err - } - if n < 1 { - return c.Err("max_fails must be at least 1") - } - u.MaxFails = int32(n) - case "try_duration": - if !c.NextArg() { - return c.ArgErr() - } - dur, err := time.ParseDuration(c.Val()) - if err != nil { - return err - } - u.TryDuration = dur - case "try_interval": - if !c.NextArg() { - return c.ArgErr() - } - interval, err := time.ParseDuration(c.Val()) - if err != nil { - return err - } - u.TryInterval = interval - case "max_conns": - if !c.NextArg() { - return c.ArgErr() - } - n, err := strconv.ParseInt(c.Val(), 10, 64) - if err != nil { - return err - } - u.MaxConns = n - case "health_check": - if !c.NextArg() { - return c.ArgErr() - } - u.HealthCheck.Path = c.Val() - - // Set defaults - if u.HealthCheck.Interval == 0 { - u.HealthCheck.Interval = 30 * time.Second - } - if u.HealthCheck.Timeout == 0 { - u.HealthCheck.Timeout = 60 * time.Second - } - case "health_check_interval": - var interval string - if !c.Args(&interval) { - return c.ArgErr() - } - dur, err := time.ParseDuration(interval) - if err != nil { - return err - } - u.HealthCheck.Interval = dur - case "health_check_timeout": - var interval string - if !c.Args(&interval) { - return c.ArgErr() - } - dur, err := time.ParseDuration(interval) - if err != nil { - return err - } - u.HealthCheck.Timeout = dur - case "health_check_port": - if !c.NextArg() { - return c.ArgErr() - } - - if hasSrv { - return c.Err("health_check_port directive is not allowed when upstream is SRV locator") - } - - port := c.Val() - n, err := strconv.Atoi(port) - if err != nil { - return err - } - - if n < 0 { - return c.Errf("invalid health_check_port '%s'", port) - } - u.HealthCheck.Port = port - case "health_check_contains": - if !c.NextArg() { - return c.ArgErr() - } - u.HealthCheck.ContentString = c.Val() - case "header_upstream": - isUpstream = true - fallthrough - case "header_downstream": - var header, value, replaced string - if c.Args(&header, &value, &replaced) { - // Don't allow - or + in replacements - if strings.HasPrefix(header, "-") || strings.HasPrefix(header, "+") { - return c.ArgErr() - } - r, err := regexp.Compile(value) - if err != nil { - return err - } - if isUpstream { - u.upstreamHeaderReplacements.Add(header, headerReplacement{r, replaced}) - } else { - u.downstreamHeaderReplacements.Add(header, headerReplacement{r, replaced}) - } - } else { - if len(value) == 0 { - // When removing a header, the value can be optional. - if !strings.HasPrefix(header, "-") { - return c.ArgErr() - } - } - if isUpstream { - u.upstreamHeaders.Add(header, value) - } else { - u.downstreamHeaders.Add(header, value) - } - } - case "transparent": - // Note: X-Forwarded-For header is always being appended for proxy connections - // See implementation of createUpstreamRequest in proxy.go - u.upstreamHeaders.Add("Host", "{host}") - u.upstreamHeaders.Add("X-Real-IP", "{remote}") - u.upstreamHeaders.Add("X-Forwarded-Proto", "{scheme}") - u.upstreamHeaders.Add("X-Forwarded-Port", "{server_port}") - case "websocket": - u.upstreamHeaders.Add("Connection", "{>Connection}") - u.upstreamHeaders.Add("Upgrade", "{>Upgrade}") - case "without": - if !c.NextArg() { - return c.ArgErr() - } - u.WithoutPathPrefix = c.Val() - case "except": - ignoredPaths := c.RemainingArgs() - if len(ignoredPaths) == 0 { - return c.ArgErr() - } - u.IgnoredSubPaths = ignoredPaths - case "insecure_skip_verify": - u.insecureSkipVerify = true - case "ca_certificates": - caCertificates := c.RemainingArgs() - if len(caCertificates) == 0 { - return c.ArgErr() - } - - pool := x509.NewCertPool() - caCertificatesAdded := make(map[string]struct{}) - for _, caFile := range caCertificates { - // don't add cert to pool more than once - if _, ok := caCertificatesAdded[caFile]; ok { - continue - } - caCertificatesAdded[caFile] = struct{}{} - - // any client with a certificate from this CA will be allowed to connect - caCrt, err := ioutil.ReadFile(caFile) - if err != nil { - return c.Err(err.Error()) - } - - // attempt to parse pem and append to cert pool - if ok := pool.AppendCertsFromPEM(caCrt); !ok { - return c.Errf("loading CA certificate '%s': no certificates were successfully parsed", caFile) - } - } - - u.CaCertPool = pool - case "keepalive": - if !c.NextArg() { - return c.ArgErr() - } - n, err := strconv.Atoi(c.Val()) - if err != nil { - return err - } - if n < 0 { - return c.ArgErr() - } - u.KeepAlive = n - case "timeout": - if !c.NextArg() { - return c.ArgErr() - } - dur, err := time.ParseDuration(c.Val()) - if err != nil { - return c.Errf("unable to parse timeout duration '%s'", c.Val()) - } - u.Timeout = dur - default: - return c.Errf("unknown property '%s'", c.Val()) - } - - // these settings are at odds with one another. insecure_skip_verify disables security features over HTTPS - // which is what we are trying to achieve with ca_certificates - if u.insecureSkipVerify && u.CaCertPool != nil { - return c.Errf("both insecure_skip_verify and ca_certificates cannot be set in the proxy directive") - } - - return nil -} - -func (u *staticUpstream) resolveHost(h string) ([]string, bool, error) { - names := []string{} - proto := "http" - if !strings.HasPrefix(h, "srv://") && !strings.HasPrefix(h, "srv+https://") { - return []string{h}, false, nil - } - - if strings.HasPrefix(h, "srv+https://") { - proto = "https" - } - - _, addrs, err := u.resolver.LookupSRV(context.Background(), "", "", h) - if err != nil { - return names, true, err - } - - for _, addr := range addrs { - names = append(names, fmt.Sprintf("%s://%s:%d", proto, addr.Target, addr.Port)) - } - - return names, true, nil -} - -func (u *staticUpstream) healthCheck() { - for _, host := range u.Hosts { - candidates, isSrv, err := u.resolveHost(host.Name) - if err != nil { - host.HealthCheckResult.Store(err.Error()) - atomic.StoreInt32(&host.Unhealthy, 1) - continue - } - - unhealthyCount := 0 - for _, addr := range candidates { - hostURL := addr - if !isSrv && u.HealthCheck.Port != "" { - hostURL = replacePort(hostURL, u.HealthCheck.Port) - } - hostURL += u.HealthCheck.Path - - unhealthy := func() bool { - // set up request, needed to be able to modify headers - // possible errors are bad HTTP methods or un-parsable urls - req, err := http.NewRequest("GET", hostURL, nil) - if err != nil { - return true - } - // set host for request going upstream - if u.HealthCheck.Host != "" { - req.Host = u.HealthCheck.Host - } - r, err := u.HealthCheck.Client.Do(req) - if err != nil { - return true - } - defer func() { - if _, err := io.Copy(ioutil.Discard, r.Body); err != nil { - log.Println("[ERROR] failed to copy: ", err) - } - _ = r.Body.Close() - }() - if r.StatusCode < 200 || r.StatusCode >= 400 { - return true - } - if u.HealthCheck.ContentString == "" { // don't check for content string - return false - } - // TODO ReadAll will be replaced if deemed necessary - // See https://github.com/mholt/caddy/pull/1691 - buf, err := ioutil.ReadAll(r.Body) - if err != nil { - return true - } - if bytes.Contains(buf, []byte(u.HealthCheck.ContentString)) { - return false - } - return true - }() - - if unhealthy { - unhealthyCount++ - } - } - - if unhealthyCount == len(candidates) { - atomic.StoreInt32(&host.Unhealthy, 1) - host.HealthCheckResult.Store("Failed") - } else { - atomic.StoreInt32(&host.Unhealthy, 0) - host.HealthCheckResult.Store("OK") - } - } -} - -func (u *staticUpstream) HealthCheckWorker(stop chan struct{}) { - ticker := time.NewTicker(u.HealthCheck.Interval) - u.healthCheck() - for { - select { - case <-ticker.C: - u.healthCheck() - case <-stop: - ticker.Stop() - return - } - } -} - -func (u *staticUpstream) Select(r *http.Request) *UpstreamHost { - pool := u.Hosts - if len(pool) == 1 { - if !pool[0].Available() { - return nil - } - return pool[0] - } - allUnavailable := true - for _, host := range pool { - if host.Available() { - allUnavailable = false - break - } - } - if allUnavailable { - return nil - } - if u.Policy == nil { - return (&Random{}).Select(pool, r) - } - return u.Policy.Select(pool, r) -} - -func (u *staticUpstream) AllowedPath(requestPath string) bool { - for _, ignoredSubPath := range u.IgnoredSubPaths { - p := path.Clean(requestPath) - e := path.Join(u.From(), ignoredSubPath) - // Re-add a trailing slashes if the original - // paths had one and the cleaned paths don't - if strings.HasSuffix(requestPath, "/") && !strings.HasSuffix(p, "/") { - p = p + "/" - } - if strings.HasSuffix(ignoredSubPath, "/") && !strings.HasSuffix(e, "/") { - e = e + "/" - } - if httpserver.Path(p).Matches(e) { - return false - } - } - return true -} - -// GetFallbackDelay returns u.FallbackDelay. -func (u *staticUpstream) GetFallbackDelay() time.Duration { - return u.FallbackDelay -} - -// GetTryDuration returns u.TryDuration. -func (u *staticUpstream) GetTryDuration() time.Duration { - return u.TryDuration -} - -// GetTryInterval returns u.TryInterval. -func (u *staticUpstream) GetTryInterval() time.Duration { - return u.TryInterval -} - -// GetTimeout returns u.Timeout. -func (u *staticUpstream) GetTimeout() time.Duration { - return u.Timeout -} - -func (u *staticUpstream) GetHostCount() int { - return len(u.Hosts) -} - -// Stop sends a signal to all goroutines started by this staticUpstream to exit -// and waits for them to finish before returning. -func (u *staticUpstream) Stop() error { - close(u.stop) - u.wg.Wait() - return nil -} - -// RegisterPolicy adds a custom policy to the proxy. -func RegisterPolicy(name string, policy func(string) Policy) { - supportedPolicies[name] = policy -} - -func replacePort(originalURL string, newPort string) string { - parsedURL, err := url.Parse(originalURL) - if err != nil { - return originalURL - } - - // handles 'localhost' and 'localhost:8080' - parsedHost, _, err := net.SplitHostPort(parsedURL.Host) - if err != nil { - parsedHost = parsedURL.Host - } - - parsedURL.Host = net.JoinHostPort(parsedHost, newPort) - return parsedURL.String() -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/push/handler.go b/vendor/github.com/mholt/caddy/caddyhttp/push/handler.go deleted file mode 100644 index 4a676c272..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/push/handler.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -import ( - "net/http" - "strings" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func (h Middleware) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - pusher, hasPusher := w.(http.Pusher) - - // no push possible, carry on - if !hasPusher { - return h.Next.ServeHTTP(w, r) - } - - // check if this is a request for the pushed resource (avoid recursion) - if _, exists := r.Header[pushHeader]; exists { - return h.Next.ServeHTTP(w, r) - } - - headers := h.filterProxiedHeaders(r.Header) - - // push first -outer: - for _, rule := range h.Rules { - urlPath := r.URL.Path - matches := httpserver.Path(urlPath).Matches(rule.Path) - // Also check IndexPages when requesting a directory - if !matches { - indexFile, isIndexFile := httpserver.IndexFile(h.Root, urlPath, h.indexPages) - if isIndexFile { - matches = httpserver.Path(indexFile).Matches(rule.Path) - } - } - if matches { - for _, resource := range rule.Resources { - pushErr := pusher.Push(resource.Path, &http.PushOptions{ - Method: resource.Method, - Header: h.mergeHeaders(headers, resource.Header), - }) - if pushErr != nil { - // if we cannot push (either not supported or concurrent streams are full - break) - break outer - } - } - } - } - - // serve later - code, err := h.Next.ServeHTTP(w, r) - - // push resources returned in Link headers from upstream middlewares or proxied apps - if links, exists := w.Header()["Link"]; exists { - h.servePreloadLinks(pusher, headers, links) - } - - return code, err -} - -// servePreloadLinks parses Link headers from backend and pushes resources found in them. -// For accepted header formats check parseLinkHeader function. -// -// If resource has 'nopush' attribute then it will be omitted. -func (h Middleware) servePreloadLinks(pusher http.Pusher, headers http.Header, resources []string) { -outer: - for _, resource := range resources { - for _, resource := range parseLinkHeader(resource) { - if _, exists := resource.params["nopush"]; exists { - continue - } - - if h.isRemoteResource(resource.uri) { - continue - } - - err := pusher.Push(resource.uri, &http.PushOptions{ - Method: http.MethodGet, - Header: headers, - }) - - if err != nil { - break outer - } - } - } -} - -func (h Middleware) isRemoteResource(resource string) bool { - return strings.HasPrefix(resource, "//") || - strings.HasPrefix(resource, "http://") || - strings.HasPrefix(resource, "https://") -} - -func (h Middleware) mergeHeaders(l, r http.Header) http.Header { - out := http.Header{} - - for k, v := range l { - out[k] = v - } - - for k, vv := range r { - for _, v := range vv { - out.Add(k, v) - } - } - - return out -} - -func (h Middleware) filterProxiedHeaders(headers http.Header) http.Header { - filter := http.Header{} - - for _, header := range proxiedHeaders { - if val, ok := headers[header]; ok { - filter[header] = val - } - } - - return filter -} - -var proxiedHeaders = []string{ - "Accept-Encoding", - "Accept-Language", - "Cache-Control", - "Host", - "User-Agent", -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/push/link_parser.go b/vendor/github.com/mholt/caddy/caddyhttp/push/link_parser.go deleted file mode 100644 index de7d078e3..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/push/link_parser.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -import ( - "strings" -) - -const ( - commaSeparator = "," - semicolonSeparator = ";" - equalSeparator = "=" -) - -type linkResource struct { - uri string - params map[string]string -} - -// parseLinkHeader is responsible for parsing Link header and returning list of found resources. -// -// Accepted formats are: -// Link: ; as=script -// Link: ; as=script,; as=style -// Link: ; -func parseLinkHeader(header string) []linkResource { - resources := []linkResource{} - - if header == "" { - return resources - } - - for _, link := range strings.Split(header, commaSeparator) { - l := linkResource{params: make(map[string]string)} - - li, ri := strings.Index(link, "<"), strings.Index(link, ">") - - if li == -1 || ri == -1 { - continue - } - - l.uri = strings.TrimSpace(link[li+1 : ri]) - - for _, param := range strings.Split(strings.TrimSpace(link[ri+1:]), semicolonSeparator) { - parts := strings.SplitN(strings.TrimSpace(param), equalSeparator, 2) - key := strings.TrimSpace(parts[0]) - - if key == "" { - continue - } - - if len(parts) == 1 { - l.params[key] = key - } - - if len(parts) == 2 { - l.params[key] = strings.TrimSpace(parts[1]) - } - } - - resources = append(resources, l) - } - - return resources -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/push/push.go b/vendor/github.com/mholt/caddy/caddyhttp/push/push.go deleted file mode 100644 index 5dc75c113..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/push/push.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -import ( - "net/http" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -type ( - // Rule describes conditions on which resources will be pushed - Rule struct { - Path string - Resources []Resource - } - - // Resource describes resource to be pushed - Resource struct { - Path string - Method string - Header http.Header - } - - // Middleware supports pushing resources to clients - Middleware struct { - Next httpserver.Handler - Rules []Rule - Root http.FileSystem - indexPages []string // will be injected from SiteConfig on setup - } - - ruleOp func([]Resource) -) diff --git a/vendor/github.com/mholt/caddy/caddyhttp/push/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/push/setup.go deleted file mode 100644 index a5037df3f..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/push/setup.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package push - -import ( - "errors" - "fmt" - "net/http" - "strings" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("push", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -var errInvalidHeader = errors.New("header directive requires [name] [value]") - -var errHeaderStartsWithColon = errors.New("header cannot start with colon") -var errMethodNotSupported = errors.New("push supports only GET and HEAD methods") - -const pushHeader = "X-Push" - -var emptyRules = []Rule{} - -// setup configures a new Push middleware -func setup(c *caddy.Controller) error { - rules, err := parsePushRules(c) - - if err != nil { - return err - } - - cfg := httpserver.GetConfig(c) - cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Middleware{Next: next, Rules: rules, Root: http.Dir(cfg.Root), indexPages: cfg.IndexPages} - }) - - return nil -} - -func parsePushRules(c *caddy.Controller) ([]Rule, error) { - var rules = make(map[string]*Rule) - - for c.NextLine() { - var rule *Rule - var resources []Resource - var ops []ruleOp - - parseBlock := func() error { - for c.NextBlock() { - val := c.Val() - - switch val { - case "method": - if !c.NextArg() { - return c.ArgErr() - } - - method := c.Val() - - if err := validateMethod(method); err != nil { - return errMethodNotSupported - } - - ops = append(ops, setMethodOp(method)) - - case "header": - args := c.RemainingArgs() - - if len(args) != 2 { - return errInvalidHeader - } - - if err := validateHeader(args[0]); err != nil { - return err - } - - ops = append(ops, setHeaderOp(args[0], args[1])) - default: - resources = append(resources, Resource{ - Path: val, - Method: http.MethodGet, - Header: http.Header{pushHeader: []string{}}, - }) - } - } - return nil - } - - args := c.RemainingArgs() - - if len(args) == 0 { - rule = new(Rule) - rule.Path = "/" - rules["/"] = rule - err := parseBlock() - if err != nil { - return emptyRules, err - } - } else { - path := args[0] - - if existingRule, ok := rules[path]; ok { - rule = existingRule - } else { - rule = new(Rule) - rule.Path = path - rules[rule.Path] = rule - } - - for i := 1; i < len(args); i++ { - resources = append(resources, Resource{ - Path: args[i], - Method: http.MethodGet, - Header: http.Header{pushHeader: []string{}}, - }) - } - - err := parseBlock() - if err != nil { - return emptyRules, err - } - } - - for _, op := range ops { - op(resources) - } - rule.Resources = append(rule.Resources, resources...) - } - - var returnRules []Rule - for _, rule := range rules { - returnRules = append(returnRules, *rule) - } - - return returnRules, nil -} - -func setHeaderOp(key, value string) func(resources []Resource) { - return func(resources []Resource) { - for index := range resources { - resources[index].Header.Set(key, value) - } - } -} - -func setMethodOp(method string) func(resources []Resource) { - return func(resources []Resource) { - for index := range resources { - resources[index].Method = method - } - } -} - -func validateHeader(header string) error { - if strings.HasPrefix(header, ":") { - return errHeaderStartsWithColon - } - - switch strings.ToLower(header) { - case "content-length", "content-encoding", "trailer", "te", "expect", "host": - return fmt.Errorf("push headers cannot include %s", header) - } - - return nil -} - -// rules based on https://go-review.googlesource.com/#/c/29439/4/http2/go18.go#94 -func validateMethod(method string) error { - if method != http.MethodGet && method != http.MethodHead { - return errMethodNotSupported - } - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/redirect/redirect.go b/vendor/github.com/mholt/caddy/caddyhttp/redirect/redirect.go deleted file mode 100644 index 9ae102fe7..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/redirect/redirect.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package redirect is middleware for redirecting certain requests -// to other locations. -package redirect - -import ( - "fmt" - "html" - "net/http" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Redirect is middleware to respond with HTTP redirects -type Redirect struct { - Next httpserver.Handler - Rules []Rule -} - -// ServeHTTP implements the httpserver.Handler interface. -func (rd Redirect) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - for _, rule := range rd.Rules { - if (rule.FromPath == "/" || r.URL.Path == rule.FromPath) && schemeMatches(rule, r) && rule.Match(r) { - to := httpserver.NewReplacer(r, nil, "").Replace(rule.To) - if rule.Meta { - safeTo := html.EscapeString(to) - fmt.Fprintf(w, metaRedir, safeTo, safeTo) - } else { - http.Redirect(w, r, to, rule.Code) - } - return 0, nil - } - } - return rd.Next.ServeHTTP(w, r) -} - -func schemeMatches(rule Rule, req *http.Request) bool { - return (rule.FromScheme() == "https" && req.TLS != nil) || - (rule.FromScheme() != "https" && req.TLS == nil) -} - -// Rule describes an HTTP redirect rule. -type Rule struct { - FromScheme func() string - FromPath, To string - Code int - Meta bool - httpserver.RequestMatcher -} - -// Script tag comes first since that will better imitate a redirect in the browser's -// history, but the meta tag is a fallback for most non-JS clients. -const metaRedir = ` - - - - - - Redirecting... - -` diff --git a/vendor/github.com/mholt/caddy/caddyhttp/redirect/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/redirect/setup.go deleted file mode 100644 index 7813fe24a..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/redirect/setup.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package redirect - -import ( - "net/http" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("redir", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Redirect middleware instance. -func setup(c *caddy.Controller) error { - rules, err := redirParse(c) - if err != nil { - return err - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Redirect{Next: next, Rules: rules} - }) - - return nil -} - -func redirParse(c *caddy.Controller) ([]Rule, error) { - var redirects []Rule - - cfg := httpserver.GetConfig(c) - - initRule := func(rule *Rule, defaultCode string, args []string) error { - rule.FromScheme = func() string { - if cfg.TLS.Enabled { - return "https" - } - return "http" - } - - var ( - from = "/" - to string - code = defaultCode - ) - switch len(args) { - case 1: - // To specified (catch-all redirect) - // Not sure why user is doing this in a table, as it causes all other redirects to be ignored. - // As such, this feature remains undocumented. - to = args[0] - case 2: - // From and To specified - from = args[0] - to = args[1] - case 3: - // From, To, and Code specified - from = args[0] - to = args[1] - code = args[2] - default: - return c.ArgErr() - } - - rule.FromPath = from - rule.To = to - if code == "meta" { - rule.Meta = true - code = defaultCode - } - if codeNumber, ok := httpRedirs[code]; ok { - rule.Code = codeNumber - } else { - return c.Errf("Invalid redirect code '%v'", code) - } - - return nil - } - - // checkAndSaveRule checks the rule for validity (except the redir code) - // and saves it if it's valid, or returns an error. - checkAndSaveRule := func(rule Rule) error { - if rule.FromPath == rule.To { - return c.Err("'from' and 'to' values of redirect rule cannot be the same") - } - - // prevent obvious duplicates (rules with if statements exempt) - if ifm, ok := rule.RequestMatcher.(httpserver.IfMatcher); !ok || !ifm.Enabled { - for _, otherRule := range redirects { - if otherRule.FromPath == rule.FromPath { - return c.Errf("rule with duplicate 'from' value: %s -> %s", otherRule.FromPath, otherRule.To) - } - } - } - - redirects = append(redirects, rule) - return nil - } - - const initDefaultCode = "301" - - for c.Next() { - args := c.RemainingArgs() - matcher, err := httpserver.SetupIfMatcher(c) - if err != nil { - return nil, err - } - - var hadOptionalBlock bool - for c.NextBlock() { - if httpserver.IfMatcherKeyword(c) { - continue - } - - hadOptionalBlock = true - - rule := Rule{ - RequestMatcher: matcher, - } - - defaultCode := initDefaultCode - // Set initial redirect code - if len(args) == 1 { - defaultCode = args[0] - } - - // RemainingArgs only gets the values after the current token, but in our - // case we want to include the current token to get an accurate count. - insideArgs := append([]string{c.Val()}, c.RemainingArgs()...) - err := initRule(&rule, defaultCode, insideArgs) - if err != nil { - return redirects, err - } - - err = checkAndSaveRule(rule) - if err != nil { - return redirects, err - } - } - - if !hadOptionalBlock { - rule := Rule{ - RequestMatcher: matcher, - } - err := initRule(&rule, initDefaultCode, args) - if err != nil { - return redirects, err - } - - err = checkAndSaveRule(rule) - if err != nil { - return redirects, err - } - } - } - - return redirects, nil -} - -// httpRedirs is a list of supported HTTP redirect codes. -var httpRedirs = map[string]int{ - "300": http.StatusMultipleChoices, - "301": http.StatusMovedPermanently, - "302": http.StatusFound, // (NOT CORRECT for "Temporary Redirect", see 307) - "303": http.StatusSeeOther, - "304": http.StatusNotModified, - "305": http.StatusUseProxy, - "307": http.StatusTemporaryRedirect, - "308": http.StatusPermanentRedirect, // Permanent Redirect (RFC 7238) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/requestid/requestid.go b/vendor/github.com/mholt/caddy/caddyhttp/requestid/requestid.go deleted file mode 100644 index b03c449f6..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/requestid/requestid.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package requestid - -import ( - "context" - "log" - "net/http" - - "github.com/google/uuid" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Handler is a middleware handler -type Handler struct { - Next httpserver.Handler - HeaderName string // (optional) header from which to read an existing ID -} - -func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - var reqid uuid.UUID - - uuidFromHeader := r.Header.Get(h.HeaderName) - if h.HeaderName != "" && uuidFromHeader != "" { - // use the ID in the header field if it exists - var err error - reqid, err = uuid.Parse(uuidFromHeader) - if err != nil { - log.Printf("[NOTICE] Parsing request ID from %s header: %v", h.HeaderName, err) - reqid = uuid.New() - } - } else { - // otherwise, create a new one - reqid = uuid.New() - } - - // set the request ID on the context - c := context.WithValue(r.Context(), httpserver.RequestIDCtxKey, reqid.String()) - r = r.WithContext(c) - - return h.Next.ServeHTTP(w, r) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/requestid/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/requestid/setup.go deleted file mode 100644 index 689f99e33..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/requestid/setup.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package requestid - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("request_id", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -func setup(c *caddy.Controller) error { - var headerName string - - for c.Next() { - if c.NextArg() { - headerName = c.Val() - } - if c.NextArg() { - return c.ArgErr() - } - } - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Handler{Next: next, HeaderName: headerName} - }) - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/rewrite/rewrite.go b/vendor/github.com/mholt/caddy/caddyhttp/rewrite/rewrite.go deleted file mode 100644 index f7f56cd39..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/rewrite/rewrite.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package rewrite is middleware for rewriting requests internally to -// a different path. -package rewrite - -import ( - "fmt" - "net/http" - "net/url" - "path" - "path/filepath" - "regexp" - "strings" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Result is the result of a rewrite -type Result int - -const ( - // RewriteIgnored is returned when rewrite is not done on request. - RewriteIgnored Result = iota - // RewriteDone is returned when rewrite is done on request. - RewriteDone -) - -// Rewrite is middleware to rewrite request locations internally before being handled. -type Rewrite struct { - Next httpserver.Handler - FileSys http.FileSystem - Rules []httpserver.HandlerConfig -} - -// ServeHTTP implements the httpserver.Handler interface. -func (rw Rewrite) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - if rule := httpserver.ConfigSelector(rw.Rules).Select(r); rule != nil { - rule.(Rule).Rewrite(rw.FileSys, r) - } - - return rw.Next.ServeHTTP(w, r) -} - -// Rule describes an internal location rewrite rule. -type Rule interface { - httpserver.HandlerConfig - // Rewrite rewrites the internal location of the current request. - Rewrite(http.FileSystem, *http.Request) Result -} - -// SimpleRule is a simple rewrite rule. -type SimpleRule struct { - Regexp *regexp.Regexp - To string - Negate bool -} - -// NewSimpleRule creates a new Simple Rule -func NewSimpleRule(from, to string, negate bool) (*SimpleRule, error) { - r, err := regexp.Compile(from) - if err != nil { - return nil, err - } - return &SimpleRule{ - Regexp: r, - To: to, - Negate: negate, - }, nil -} - -// BasePath satisfies httpserver.Config -func (s SimpleRule) BasePath() string { return "/" } - -// Match satisfies httpserver.Config -func (s *SimpleRule) Match(r *http.Request) bool { - matches := regexpMatches(s.Regexp, "/", r.URL.Path) - if s.Negate { - return len(matches) == 0 - } - return len(matches) > 0 -} - -// Rewrite rewrites the internal location of the current request. -func (s *SimpleRule) Rewrite(fs http.FileSystem, r *http.Request) Result { - - // attempt rewrite - return To(fs, r, s.To, newReplacer(r)) -} - -// ComplexRule is a rewrite rule based on a regular expression -type ComplexRule struct { - // Path base. Request to this path and subpaths will be rewritten - Base string - - // Path to rewrite to - To string - - // Extensions to filter by - Exts []string - - // Request matcher - httpserver.RequestMatcher - - Regexp *regexp.Regexp -} - -// NewComplexRule creates a new RegexpRule. It returns an error if regexp -// pattern (pattern) or extensions (ext) are invalid. -func NewComplexRule(base, pattern, to string, ext []string, matcher httpserver.RequestMatcher) (ComplexRule, error) { - // validate regexp if present - var r *regexp.Regexp - if pattern != "" { - var err error - r, err = regexp.Compile(pattern) - if err != nil { - return ComplexRule{}, err - } - } - - // validate extensions if present - for _, v := range ext { - if len(v) < 2 || (len(v) < 3 && v[0] == '!') { - // check if no extension is specified - if v != "/" && v != "!/" { - return ComplexRule{}, fmt.Errorf("invalid extension %v", v) - } - } - } - - // use both IfMatcher and PathMatcher - matcher = httpserver.MergeRequestMatchers( - // If condition matcher - matcher, - // Base path matcher - httpserver.PathMatcher(base), - ) - - return ComplexRule{ - Base: base, - To: to, - Exts: ext, - RequestMatcher: matcher, - Regexp: r, - }, nil -} - -// BasePath satisfies httpserver.Config -func (r ComplexRule) BasePath() string { return r.Base } - -// Match satisfies httpserver.Config. -// -// Though ComplexRule embeds a RequestMatcher, additional -// checks are needed which requires a custom implementation. -func (r ComplexRule) Match(req *http.Request) bool { - // validate RequestMatcher - // includes if and path - if !r.RequestMatcher.Match(req) { - return false - } - - // validate extensions - if !r.matchExt(req.URL.Path) { - return false - } - - // if regex is nil, ignore - if r.Regexp == nil { - return true - } - // otherwise validate regex - return regexpMatches(r.Regexp, r.Base, req.URL.Path) != nil -} - -// Rewrite rewrites the internal location of the current request. -func (r ComplexRule) Rewrite(fs http.FileSystem, req *http.Request) (re Result) { - replacer := newReplacer(req) - - // validate regexp if present - if r.Regexp != nil { - matches := regexpMatches(r.Regexp, r.Base, req.URL.Path) - switch len(matches) { - case 0: - // no match - return - default: - // set regexp match variables {1}, {2} ... - - // url escaped values of ? and #. - q, f := url.QueryEscape("?"), url.QueryEscape("#") - - for i := 1; i < len(matches); i++ { - // Special case of unescaped # and ? by stdlib regexp. - // Reverse the unescape. - if strings.ContainsAny(matches[i], "?#") { - matches[i] = strings.NewReplacer("?", q, "#", f).Replace(matches[i]) - } - - replacer.Set(fmt.Sprint(i), matches[i]) - } - } - } - - // attempt rewrite - return To(fs, req, r.To, replacer) -} - -// matchExt matches rPath against registered file extensions. -// Returns true if a match is found and false otherwise. -func (r ComplexRule) matchExt(rPath string) bool { - f := filepath.Base(rPath) - ext := path.Ext(f) - if ext == "" { - ext = "/" - } - - mustUse := false - for _, v := range r.Exts { - use := true - if v[0] == '!' { - use = false - v = v[1:] - } - - if use { - mustUse = true - } - - if ext == v { - return use - } - } - - return !mustUse -} - -func regexpMatches(regexp *regexp.Regexp, base, rPath string) []string { - if regexp != nil { - // include trailing slash in regexp if present - start := len(base) - if strings.HasSuffix(base, "/") { - start-- - } - return regexp.FindStringSubmatch(rPath[start:]) - } - return nil -} - -func newReplacer(r *http.Request) httpserver.Replacer { - return httpserver.NewReplacer(r, nil, "") -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/rewrite/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/rewrite/setup.go deleted file mode 100644 index f73d76a70..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/rewrite/setup.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rewrite - -import ( - "net/http" - "strings" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("rewrite", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Rewrite middleware instance. -func setup(c *caddy.Controller) error { - rewrites, err := rewriteParse(c) - if err != nil { - return err - } - - cfg := httpserver.GetConfig(c) - - cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return Rewrite{ - Next: next, - FileSys: http.Dir(cfg.Root), - Rules: rewrites, - } - }) - - return nil -} - -func rewriteParse(c *caddy.Controller) ([]httpserver.HandlerConfig, error) { - var rules []httpserver.HandlerConfig - - for c.Next() { - var rule Rule - var err error - var base = "/" - var pattern, to string - var ext []string - var negate bool - - args := c.RemainingArgs() - - var matcher httpserver.RequestMatcher - - switch len(args) { - case 1: - base = args[0] - fallthrough - case 0: - // Integrate request matcher for 'if' conditions. - matcher, err = httpserver.SetupIfMatcher(c) - if err != nil { - return nil, err - } - - for c.NextBlock() { - if httpserver.IfMatcherKeyword(c) { - continue - } - switch c.Val() { - case "r", "regexp": - if !c.NextArg() { - return nil, c.ArgErr() - } - pattern = c.Val() - case "to": - args1 := c.RemainingArgs() - if len(args1) == 0 { - return nil, c.ArgErr() - } - to = strings.Join(args1, " ") - case "ext": - args1 := c.RemainingArgs() - if len(args1) == 0 { - return nil, c.ArgErr() - } - ext = args1 - default: - return nil, c.ArgErr() - } - } - // ensure to is specified - if to == "" { - return nil, c.ArgErr() - } - if rule, err = NewComplexRule(base, pattern, to, ext, matcher); err != nil { - return nil, err - } - rules = append(rules, rule) - - // the only unhandled case is 2 and above - default: - if args[0] == "not" { - negate = true - args = args[1:] - } - rule, err = NewSimpleRule(args[0], strings.Join(args[1:], " "), negate) - if err != nil { - return nil, err - } - rules = append(rules, rule) - } - - } - - return rules, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/rewrite/to.go b/vendor/github.com/mholt/caddy/caddyhttp/rewrite/to.go deleted file mode 100644 index 1b0856f70..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/rewrite/to.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rewrite - -import ( - "log" - "net/http" - "net/url" - "path" - "strings" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// To attempts rewrite. It attempts to rewrite to first valid path -// or the last path if none of the paths are valid. -func To(fs http.FileSystem, r *http.Request, to string, replacer httpserver.Replacer) Result { - tos := strings.Fields(to) - - // try each rewrite paths - t := "" - query := "" - for _, v := range tos { - t = replacer.Replace(v) - tparts := strings.SplitN(t, "?", 2) - t = path.Clean(tparts[0]) - - if len(tparts) > 1 { - query = tparts[1] - } - - // add trailing slash for directories, if present - if strings.HasSuffix(tparts[0], "/") && !strings.HasSuffix(t, "/") { - t += "/" - } - - // validate file - if validFile(fs, t) { - break - } - } - - // validate resulting path - u, err := url.Parse(t) - if err != nil { - // Let the user know we got here. Rewrite is expected but - // the resulting url is invalid. - log.Printf("[ERROR] rewrite: resulting path '%v' is invalid. error: %v", t, err) - return RewriteIgnored - } - - // perform rewrite - r.URL.Path = u.Path - if query != "" { - // overwrite query string if present - r.URL.RawQuery = query - } - if u.Fragment != "" { - // overwrite fragment if present - r.URL.Fragment = u.Fragment - } - - return RewriteDone -} - -// validFile checks if file exists on the filesystem. -// if file ends with `/`, it is validated as a directory. -func validFile(fs http.FileSystem, file string) bool { - if fs == nil { - return false - } - - f, err := fs.Open(file) - if err != nil { - return false - } - defer f.Close() - - stat, err := f.Stat() - if err != nil { - return false - } - - // directory - if strings.HasSuffix(file, "/") { - return stat.IsDir() - } - - // file - return !stat.IsDir() -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/root/root.go b/vendor/github.com/mholt/caddy/caddyhttp/root/root.go deleted file mode 100644 index 47df53db8..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/root/root.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package root - -import ( - "log" - "os" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("root", caddy.Plugin{ - ServerType: "http", - Action: setupRoot, - }) -} - -func setupRoot(c *caddy.Controller) error { - config := httpserver.GetConfig(c) - - for c.Next() { - if !c.NextArg() { - return c.ArgErr() - } - config.Root = c.Val() - if c.NextArg() { - // only one argument allowed - return c.ArgErr() - } - } - //first check that the path is not a symlink, os.Stat panics when this is true - info, _ := os.Lstat(config.Root) - if info != nil && info.Mode()&os.ModeSymlink == os.ModeSymlink { - //just print out info, delegate responsibility for symlink validity to - //underlying Go framework, no need to test / verify twice - log.Printf("[INFO] Root path is symlink: %s", config.Root) - } else { - // Check if root path exists - _, err := os.Stat(config.Root) - if err != nil { - if os.IsNotExist(err) { - // Allow this, because the folder might appear later. - // But make sure the user knows! - log.Printf("[WARNING] Root path does not exist: %s", config.Root) - } else { - return c.Errf("Unable to access root path '%s': %v", config.Root, err) - } - } - } - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/staticfiles/fileserver.go b/vendor/github.com/mholt/caddy/caddyhttp/staticfiles/fileserver.go deleted file mode 100644 index 0863ebe58..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/staticfiles/fileserver.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package staticfiles provides middleware for serving static files from disk. -// Its handler is the default HTTP handler for the HTTP server. -// -// TODO: Should this package be rolled into the httpserver package? -package staticfiles - -import ( - "math/rand" - "net/http" - "os" - "path" - "path/filepath" - "runtime" - "strconv" - "strings" - - "github.com/mholt/caddy" -) - -// FileServer implements a production-ready file server -// and is the 'default' handler for all requests to Caddy. -// It simply loads and serves the URI requested. FileServer -// is adapted from the one in net/http by the Go authors. -// Significant modifications have been made. -// -// Original license: -// -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -type FileServer struct { - Root http.FileSystem // jailed access to the file system - Hide []string // list of files for which to respond with "Not Found" - - // A list of pages that may be understood as the "index" files to directories. - // Injected from *SiteConfig. - IndexPages []string -} - -// ServeHTTP serves static files for r according to fs's configuration. -func (fs FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - if r.Method != "GET" && r.Method != "HEAD" { - return http.StatusMethodNotAllowed, nil - } - return fs.serveFile(w, r) -} - -// serveFile writes the specified file to the HTTP response. -// name is '/'-separated, not filepath.Separator. -func (fs FileServer) serveFile(w http.ResponseWriter, r *http.Request) (int, error) { - reqPath := r.URL.Path - - // Prevent absolute path access on Windows. - // TODO remove when stdlib http.Dir fixes this. - if runtime.GOOS == "windows" && len(reqPath) > 0 && filepath.IsAbs(reqPath[1:]) { - return http.StatusNotFound, nil - } - - // open the requested file - f, err := fs.Root.Open(reqPath) - if err != nil { - if os.IsNotExist(err) { - return http.StatusNotFound, nil - } else if os.IsPermission(err) { - return http.StatusForbidden, err - } - // otherwise, maybe the server is under load and ran out of file descriptors? - backoff := int(3 + rand.Int31()%3) // 3–5 seconds to prevent a stampede - w.Header().Set("Retry-After", strconv.Itoa(backoff)) - return http.StatusServiceUnavailable, err - } - defer f.Close() - - // get information about the file - d, err := f.Stat() - if err != nil { - if os.IsNotExist(err) { - return http.StatusNotFound, nil - } else if os.IsPermission(err) { - return http.StatusForbidden, err - } - // return a different status code than above to distinguish these cases - return http.StatusInternalServerError, err - } - - // redirect to canonical path (being careful to preserve other parts of URL and - // considering cases where a site is defined with a path prefix that gets stripped) - urlCopy := *r.URL - pathPrefix, _ := r.Context().Value(caddy.CtxKey("path_prefix")).(string) - if pathPrefix != "/" { - urlCopy.Path = pathPrefix + urlCopy.Path - } - if urlCopy.Path == "" { - urlCopy.Path = "/" - } - if d.IsDir() { - // ensure there is a trailing slash - if urlCopy.Path[len(urlCopy.Path)-1] != '/' { - for strings.HasPrefix(urlCopy.Path, "//") { - // prevent path-based open redirects - urlCopy.Path = strings.TrimPrefix(urlCopy.Path, "/") - } - urlCopy.Path += "/" - http.Redirect(w, r, urlCopy.String(), http.StatusMovedPermanently) - return http.StatusMovedPermanently, nil - } - } else { - // ensure no trailing slash - redir := false - if urlCopy.Path[len(urlCopy.Path)-1] == '/' { - urlCopy.Path = urlCopy.Path[:len(urlCopy.Path)-1] - redir = true - } - - // if an index file was explicitly requested, strip file name from the request - // ("/foo/index.html" -> "/foo/") - var requestPage = path.Base(urlCopy.Path) - for _, indexPage := range fs.IndexPages { - if requestPage == indexPage { - urlCopy.Path = urlCopy.Path[:len(urlCopy.Path)-len(indexPage)] - redir = true - break - } - } - - if redir { - for strings.HasPrefix(urlCopy.Path, "//") { - // prevent path-based open redirects - urlCopy.Path = strings.TrimPrefix(urlCopy.Path, "/") - } - http.Redirect(w, r, urlCopy.String(), http.StatusMovedPermanently) - return http.StatusMovedPermanently, nil - } - } - - // use contents of an index file, if present, for directory requests - if d.IsDir() { - for _, indexPage := range fs.IndexPages { - indexPath := path.Join(reqPath, indexPage) - indexFile, err := fs.Root.Open(indexPath) - if err != nil { - continue - } - - indexInfo, err := indexFile.Stat() - if err != nil { - indexFile.Close() - continue - } - - // this defer does not leak fds even though we are in a loop, - // because previous iterations of the loop must have had an - // err, so there's nothing to close from earlier iterations. - defer indexFile.Close() - - // close previously-opened file immediately to release fd - f.Close() - - // switch to using the index file, and we're done here - d = indexInfo - f = indexFile - reqPath = indexPath - break - } - } - - // return Not Found if we either did not find an index file (and thus are - // still a directory) or if this file is supposed to be hidden - if d.IsDir() || fs.IsHidden(d) { - return http.StatusNotFound, nil - } - - etag := calculateEtag(d) - - // look for compressed versions of the file on disk, if the client supports that encoding - for _, encoding := range staticEncodingPriority { - // see if the client accepts a compressed encoding we offer - acceptEncoding := strings.Split(r.Header.Get("Accept-Encoding"), ",") - accepted := false - for _, acc := range acceptEncoding { - if strings.TrimSpace(acc) == encoding { - accepted = true - break - } - } - - // if client doesn't support this encoding, don't even bother; try next one - if !accepted { - continue - } - - // see if the compressed version of this file exists - encodedFile, err := fs.Root.Open(reqPath + staticEncoding[encoding]) - if err != nil { - continue - } - - encodedFileInfo, err := encodedFile.Stat() - if err != nil { - encodedFile.Close() - continue - } - - // close the encoded file when we're done, and close the - // previously-opened file immediately to release the fd - defer encodedFile.Close() - f.Close() - - // the encoded file is now what we're serving - f = encodedFile - etag = calculateEtag(encodedFileInfo) - w.Header().Add("Vary", "Accept-Encoding") - w.Header().Set("Content-Encoding", encoding) - w.Header().Set("Content-Length", strconv.FormatInt(encodedFileInfo.Size(), 10)) - break - } - - // Set the ETag returned to the user-agent. Note that a conditional If-None-Match - // request is handled in http.ServeContent below, which checks against this ETag value. - w.Header().Set("ETag", etag) - - // Note: Errors generated by ServeContent are written immediately - // to the response. This usually only happens if seeking fails (rare). - // Its signature does not bubble the error up to us, so we cannot - // return it for any logging middleware to record. Oh well. - http.ServeContent(w, r, d.Name(), d.ModTime(), f) - - return http.StatusOK, nil -} - -// IsHidden checks if file with FileInfo d is on hide list. -func (fs FileServer) IsHidden(d os.FileInfo) bool { - for _, hiddenPath := range fs.Hide { - // TODO: Could these FileInfos be stored instead of their paths, to avoid opening them all the time? - if hFile, err := fs.Root.Open(hiddenPath); err == nil { - fs, _ := hFile.Stat() - hFile.Close() - if os.SameFile(d, fs) { - return true - } - } - } - return false -} - -// calculateEtag produces a strong etag by default, although, for -// efficiency reasons, it does not actually consume the contents -// of the file to make a hash of all the bytes. ¯\_(ツ)_/¯ -// Prefix the etag with "W/" to convert it into a weak etag. -// See: https://tools.ietf.org/html/rfc7232#section-2.3 -func calculateEtag(d os.FileInfo) string { - t := strconv.FormatInt(d.ModTime().Unix(), 36) - s := strconv.FormatInt(d.Size(), 36) - return `"` + t + s + `"` -} - -// DefaultIndexPages is a list of pages that may be understood as -// the "index" files to directories. -var DefaultIndexPages = []string{ - "index.html", - "index.htm", - "index.txt", - "default.html", - "default.htm", - "default.txt", -} - -// staticEncoding is a map of content-encoding to a file extension. -// If client accepts given encoding (via Accept-Encoding header) and compressed file with given extensions exists -// it will be served to the client instead of original one. -var staticEncoding = map[string]string{ - "gzip": ".gz", - "br": ".br", -} - -// staticEncodingPriority is a list of preferred static encodings (most efficient compression to least one). -var staticEncodingPriority = []string{ - "br", - "gzip", -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/status/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/status/setup.go deleted file mode 100644 index 5c2943142..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/status/setup.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package status - -import ( - "strconv" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// init registers Status plugin -func init() { - caddy.RegisterPlugin("status", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures new Status middleware instance. -func setup(c *caddy.Controller) error { - rules, err := statusParse(c) - if err != nil { - return err - } - - cfg := httpserver.GetConfig(c) - mid := func(next httpserver.Handler) httpserver.Handler { - return Status{Rules: rules, Next: next} - } - cfg.AddMiddleware(mid) - - return nil -} - -// statusParse parses status directive -func statusParse(c *caddy.Controller) ([]httpserver.HandlerConfig, error) { - var rules []httpserver.HandlerConfig - - for c.Next() { - hadBlock := false - args := c.RemainingArgs() - - switch len(args) { - case 1: - status, err := strconv.Atoi(args[0]) - if err != nil { - return rules, c.Errf("Expecting a numeric status code, got '%s'", args[0]) - } - - for c.NextBlock() { - hadBlock = true - basePath := c.Val() - - for _, cfg := range rules { - rule := cfg.(*Rule) - if rule.Base == basePath { - return rules, c.Errf("Duplicate path: '%s'", basePath) - } - } - - rule := NewRule(basePath, status) - rules = append(rules, rule) - - if c.NextArg() { - return rules, c.ArgErr() - } - } - - if !hadBlock { - return rules, c.ArgErr() - } - case 2: - status, err := strconv.Atoi(args[0]) - if err != nil { - return rules, c.Errf("Expecting a numeric status code, got '%s'", args[0]) - } - - basePath := args[1] - for _, cfg := range rules { - rule := cfg.(*Rule) - if rule.Base == basePath { - return rules, c.Errf("Duplicate path: '%s'", basePath) - } - } - - rule := NewRule(basePath, status) - rules = append(rules, rule) - default: - return rules, c.ArgErr() - } - } - - return rules, nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/status/status.go b/vendor/github.com/mholt/caddy/caddyhttp/status/status.go deleted file mode 100644 index a00366e6a..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/status/status.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package status is middleware for returning status code for requests -package status - -import ( - "net/http" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// Rule describes status rewriting rule -type Rule struct { - // Base path. Request to this path and sub-paths will be answered with StatusCode - Base string - - // Status code to return - StatusCode int - - // Request matcher - httpserver.RequestMatcher -} - -// NewRule creates new Rule. -func NewRule(basePath string, status int) *Rule { - return &Rule{ - Base: basePath, - StatusCode: status, - RequestMatcher: httpserver.PathMatcher(basePath), - } -} - -// BasePath implements httpserver.HandlerConfig interface -func (rule *Rule) BasePath() string { - return rule.Base -} - -// Status is a middleware to return status code for request -type Status struct { - Rules []httpserver.HandlerConfig - Next httpserver.Handler -} - -// ServeHTTP implements the httpserver.Handler interface -func (status Status) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - if cfg := httpserver.ConfigSelector(status.Rules).Select(r); cfg != nil { - rule := cfg.(*Rule) - - if rule.StatusCode < 400 { - // There's no ability to return response body -- - // write the response status code in header and signal - // to other handlers that response is already handled - w.WriteHeader(rule.StatusCode) - return 0, nil - } - - return rule.StatusCode, nil - } - - return status.Next.ServeHTTP(w, r) -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/templates/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/templates/setup.go deleted file mode 100644 index e9fe105ff..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/templates/setup.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package templates - -import ( - "bytes" - "net/http" - "sync" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("templates", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new Templates middleware instance. -func setup(c *caddy.Controller) error { - rules, err := templatesParse(c) - if err != nil { - return err - } - - cfg := httpserver.GetConfig(c) - - tmpls := Templates{ - Rules: rules, - Root: cfg.Root, - FileSys: http.Dir(cfg.Root), - BufPool: &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, - }, - } - - cfg.AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - tmpls.Next = next - return tmpls - }) - - return nil -} - -func templatesParse(c *caddy.Controller) ([]Rule, error) { - var rules []Rule - - for c.Next() { - var rule Rule - - rule.Path = defaultTemplatePath - rule.Extensions = defaultTemplateExtensions - - args := c.RemainingArgs() - - switch len(args) { - case 0: - // Optional block - for c.NextBlock() { - switch c.Val() { - case "path": - args := c.RemainingArgs() - if len(args) != 1 { - return nil, c.ArgErr() - } - rule.Path = args[0] - - case "ext": - args := c.RemainingArgs() - if len(args) == 0 { - return nil, c.ArgErr() - } - rule.Extensions = args - - case "between": - args := c.RemainingArgs() - if len(args) != 2 { - return nil, c.ArgErr() - } - rule.Delims[0] = args[0] - rule.Delims[1] = args[1] - } - } - default: - // First argument would be the path - rule.Path = args[0] - - // Any remaining arguments are extensions - rule.Extensions = args[1:] - if len(rule.Extensions) == 0 { - rule.Extensions = defaultTemplateExtensions - } - } - - for _, ext := range rule.Extensions { - rule.IndexFiles = append(rule.IndexFiles, "index"+ext) - } - - rules = append(rules, rule) - } - return rules, nil -} - -const defaultTemplatePath = "/" - -var defaultTemplateExtensions = []string{".html", ".htm", ".tmpl", ".tpl", ".txt"} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/templates/templates.go b/vendor/github.com/mholt/caddy/caddyhttp/templates/templates.go deleted file mode 100644 index 9a1ae4ba1..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/templates/templates.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package templates implements template execution for files to be -// dynamically rendered for the client. -package templates - -import ( - "bytes" - "mime" - "net/http" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "text/template" - "time" - - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -// ServeHTTP implements the httpserver.Handler interface. -func (t Templates) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - // iterate rules, to find first one that matches the request path - for _, rule := range t.Rules { - if !httpserver.Path(r.URL.Path).Matches(rule.Path) { - continue - } - - fpath := r.URL.Path - - // get a buffer from the pool and make a response recorder - buf := t.BufPool.Get().(*bytes.Buffer) - buf.Reset() - defer t.BufPool.Put(buf) - - // only buffer the response when we want to execute a template - shouldBuf := func(status int, header http.Header) bool { - // see if this request matches a template extension - reqExt := path.Ext(fpath) - for _, ext := range rule.Extensions { - if reqExt == "" { - // request has no extension, so check response Content-Type - ct := mime.TypeByExtension(ext) - if ct != "" && strings.Contains(header.Get("Content-Type"), ct) { - return true - } - } else if reqExt == ext { - return true - } - } - return false - } - - // prepare a buffer to hold the response, if applicable - rb := httpserver.NewResponseBuffer(buf, w, shouldBuf) - - // pass request up the chain to let another middleware provide us the template - code, err := t.Next.ServeHTTP(rb, r) - if !rb.Buffered() || code >= 300 || err != nil { - return code, err - } - - // create a new template - templateName := filepath.Base(fpath) - tpl := template.New(templateName) - - // set delimiters - if rule.Delims != [2]string{} { - tpl.Delims(rule.Delims[0], rule.Delims[1]) - } - - // add custom functions - tpl.Funcs(httpserver.TemplateFuncs) - - // parse the template - parsedTpl, err := tpl.Parse(rb.Buffer.String()) - if err != nil { - return http.StatusInternalServerError, err - } - - // create execution context for the template template - ctx := httpserver.NewContextWithHeader(w.Header()) - ctx.Root = t.FileSys - ctx.Req = r - ctx.URL = r.URL - - // execute the template - buf.Reset() - err = parsedTpl.Execute(buf, ctx) - if err != nil { - return http.StatusInternalServerError, err - } - - // copy the buffered header into the real ResponseWriter - rb.CopyHeader() - - // set the actual content length now that the template was executed - w.Header().Set("Content-Length", strconv.Itoa(buf.Len())) - - // delete the headers related to cache - w.Header().Del("ETag") - w.Header().Del("Last-Modified") - - // get the modification time in preparation for http.ServeContent - modTime, _ := time.Parse(http.TimeFormat, w.Header().Get("Last-Modified")) - - // at last, write the rendered template to the response; make sure to use - // use the proper status code, since ServeContent hard-codes 2xx codes... - http.ServeContent(rb.StatusCodeWriter(w), r, templateName, modTime, bytes.NewReader(buf.Bytes())) - - return 0, nil - } - - return t.Next.ServeHTTP(w, r) -} - -// Templates is middleware to render templated files as the HTTP response. -type Templates struct { - Next httpserver.Handler - Rules []Rule - Root string - FileSys http.FileSystem - BufPool *sync.Pool // docs: "A Pool must not be copied after first use." -} - -// Rule represents a template rule. A template will only execute -// with this rule if the request path matches the Path specified -// and requests a resource with one of the extensions specified. -type Rule struct { - Path string - Extensions []string - IndexFiles []string - Delims [2]string -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/timeouts/timeouts.go b/vendor/github.com/mholt/caddy/caddyhttp/timeouts/timeouts.go deleted file mode 100644 index 749c03b02..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/timeouts/timeouts.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package timeouts - -import ( - "time" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("timeouts", caddy.Plugin{ - ServerType: "http", - Action: setupTimeouts, - }) -} - -func setupTimeouts(c *caddy.Controller) error { - config := httpserver.GetConfig(c) - - for c.Next() { - var hasOptionalBlock bool - for c.NextBlock() { - hasOptionalBlock = true - - // ensure the kind of timeout is recognized - kind := c.Val() - if kind != "read" && kind != "header" && kind != "write" && kind != "idle" { - return c.Errf("unknown timeout '%s': must be read, header, write, or idle", kind) - } - - // parse the timeout duration - if !c.NextArg() { - return c.ArgErr() - } - if c.NextArg() { - // only one value permitted - return c.ArgErr() - } - var dur time.Duration - if c.Val() != "none" { - var err error - dur, err = time.ParseDuration(c.Val()) - if err != nil { - return c.Errf("%v", err) - } - if dur < 0 { - return c.Err("non-negative duration required for timeout value") - } - } - - // set this timeout's duration - switch kind { - case "read": - config.Timeouts.ReadTimeout = dur - config.Timeouts.ReadTimeoutSet = true - case "header": - config.Timeouts.ReadHeaderTimeout = dur - config.Timeouts.ReadHeaderTimeoutSet = true - case "write": - config.Timeouts.WriteTimeout = dur - config.Timeouts.WriteTimeoutSet = true - case "idle": - config.Timeouts.IdleTimeout = dur - config.Timeouts.IdleTimeoutSet = true - } - } - if !hasOptionalBlock { - // set all timeouts to the same value - - if !c.NextArg() { - return c.ArgErr() - } - if c.NextArg() { - // only one value permitted - return c.ArgErr() - } - val := c.Val() - - config.Timeouts.ReadTimeoutSet = true - config.Timeouts.ReadHeaderTimeoutSet = true - config.Timeouts.WriteTimeoutSet = true - config.Timeouts.IdleTimeoutSet = true - - if val == "none" { - config.Timeouts.ReadTimeout = 0 - config.Timeouts.ReadHeaderTimeout = 0 - config.Timeouts.WriteTimeout = 0 - config.Timeouts.IdleTimeout = 0 - } else { - dur, err := time.ParseDuration(val) - if err != nil { - return c.Errf("unknown timeout duration: %v", err) - } - if dur < 0 { - return c.Err("non-negative duration required for timeout value") - } - config.Timeouts.ReadTimeout = dur - config.Timeouts.ReadHeaderTimeout = dur - config.Timeouts.WriteTimeout = dur - config.Timeouts.IdleTimeout = dur - } - } - } - - return nil -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/websocket/setup.go b/vendor/github.com/mholt/caddy/caddyhttp/websocket/setup.go deleted file mode 100644 index 822ccd155..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/websocket/setup.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package websocket - -import ( - "github.com/mholt/caddy" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -func init() { - caddy.RegisterPlugin("websocket", caddy.Plugin{ - ServerType: "http", - Action: setup, - }) -} - -// setup configures a new WebSocket middleware instance. -func setup(c *caddy.Controller) error { - websocks, err := webSocketParse(c) - if err != nil { - return err - } - - GatewayInterface = caddy.AppName + "-CGI/1.1" - ServerSoftware = caddy.AppName + "/" + caddy.AppVersion - - httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler { - return WebSocket{Next: next, Sockets: websocks} - }) - - return nil -} - -func webSocketParse(c *caddy.Controller) ([]Config, error) { - var websocks []Config - var respawn bool - - optionalBlock := func() (hadBlock bool, err error) { - for c.NextBlock() { - hadBlock = true - if c.Val() == "respawn" { - respawn = true - } else { - return true, c.Err("Expected websocket configuration parameter in block") - } - } - return - } - - for c.Next() { - var val, path, command string - - // Path or command; not sure which yet - if !c.NextArg() { - return nil, c.ArgErr() - } - val = c.Val() - - // Extra configuration may be in a block - hadBlock, err := optionalBlock() - if err != nil { - return nil, err - } - - if !hadBlock { - // The next argument on this line will be the command or an open curly brace - if c.NextArg() { - path = val - command = c.Val() - } else { - path = "/" - command = val - } - - // Okay, check again for optional block - _, err = optionalBlock() - if err != nil { - return nil, err - } - } - - // Split command into the actual command and its arguments - cmd, args, err := caddy.SplitCommandAndArgs(command) - if err != nil { - return nil, err - } - - websocks = append(websocks, Config{ - Path: path, - Command: cmd, - Arguments: args, - Respawn: respawn, // TODO: This isn't used currently - }) - } - - return websocks, nil - -} diff --git a/vendor/github.com/mholt/caddy/caddyhttp/websocket/websocket.go b/vendor/github.com/mholt/caddy/caddyhttp/websocket/websocket.go deleted file mode 100644 index a1f659bff..000000000 --- a/vendor/github.com/mholt/caddy/caddyhttp/websocket/websocket.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package websocket implements a WebSocket server by executing -// a command and piping its input and output through the WebSocket -// connection. -package websocket - -import ( - "bufio" - "bytes" - "io" - "log" - "net" - "net/http" - "os" - "os/exec" - "strings" - "time" - - "github.com/gorilla/websocket" - "github.com/mholt/caddy/caddyhttp/httpserver" -) - -const ( - // Time allowed to write a message to the peer. - writeWait = 10 * time.Second - - // Time allowed to read the next pong message from the peer. - pongWait = 60 * time.Second - - // Send pings to peer with this period. Must be less than pongWait. - pingPeriod = (pongWait * 9) / 10 - - // Maximum message size allowed from peer. - maxMessageSize = 1024 * 1024 * 10 // 10 MB default. -) - -var ( - // GatewayInterface is the dialect of CGI being used by the server - // to communicate with the script. See CGI spec, 4.1.4 - GatewayInterface string - - // ServerSoftware is the name and version of the information server - // software making the CGI request. See CGI spec, 4.1.17 - ServerSoftware string -) - -type ( - // WebSocket is a type that holds configuration for the - // websocket middleware generally, like a list of all the - // websocket endpoints. - WebSocket struct { - // Next is the next HTTP handler in the chain for when the path doesn't match - Next httpserver.Handler - - // Sockets holds all the web socket endpoint configurations - Sockets []Config - } - - // Config holds the configuration for a single websocket - // endpoint which may serve multiple websocket connections. - Config struct { - Path string - Command string - Arguments []string - Respawn bool // TODO: Not used, but parser supports it until we decide on it - } -) - -// ServeHTTP converts the HTTP request to a WebSocket connection and serves it up. -func (ws WebSocket) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { - for _, sockConfig := range ws.Sockets { - if httpserver.Path(r.URL.Path).Matches(sockConfig.Path) { - return serveWS(w, r, &sockConfig) - } - } - - // Didn't match a websocket path, so pass-through - return ws.Next.ServeHTTP(w, r) -} - -// serveWS is used for setting and upgrading the HTTP connection to a websocket connection. -// It also spawns the child process that is associated with matched HTTP path/url. -func serveWS(w http.ResponseWriter, r *http.Request, config *Config) (int, error) { - upgrader := websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, - CheckOrigin: func(r *http.Request) bool { return true }, - } - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - // the connection has been "handled" -- WriteHeader was called with Upgrade, - // so don't return an error status code; just return an error - return 0, err - } - defer conn.Close() - - cmd := exec.Command(config.Command, config.Arguments...) - - stdout, err := cmd.StdoutPipe() - if err != nil { - return http.StatusBadGateway, err - } - defer stdout.Close() - - stdin, err := cmd.StdinPipe() - if err != nil { - return http.StatusBadGateway, err - } - defer stdin.Close() - - metavars, err := buildEnv(cmd.Path, r) - if err != nil { - return http.StatusBadGateway, err - } - - cmd.Env = metavars - - if err := cmd.Start(); err != nil { - return http.StatusBadGateway, err - } - - done := make(chan struct{}) - go pumpStdout(conn, stdout, done) - pumpStdin(conn, stdin) - - _ = stdin.Close() // close stdin to end the process - - if err := cmd.Process.Signal(os.Interrupt); err != nil { // signal an interrupt to kill the process - return http.StatusInternalServerError, err - } - - select { - case <-done: - case <-time.After(time.Second): - // terminate with extreme prejudice. - if err := cmd.Process.Signal(os.Kill); err != nil { - return http.StatusInternalServerError, err - } - <-done - } - - // not sure what we want to do here. - // status for an "exited" process is greater - // than 0, but isn't really an error per se. - // just going to ignore it for now. - if err := cmd.Wait(); err != nil { - log.Println("[ERROR] failed to release resources: ", err) - } - - return 0, nil -} - -// buildEnv creates the meta-variables for the child process according -// to the CGI 1.1 specification: http://tools.ietf.org/html/rfc3875#section-4.1 -// cmdPath should be the path of the command being run. -// The returned string slice can be set to the command's Env property. -func buildEnv(cmdPath string, r *http.Request) (metavars []string, err error) { - if !strings.Contains(r.RemoteAddr, ":") { - r.RemoteAddr += ":" - } - remoteHost, remotePort, err := net.SplitHostPort(r.RemoteAddr) - if err != nil { - return - } - - if !strings.Contains(r.Host, ":") { - r.Host += ":" - } - serverHost, serverPort, err := net.SplitHostPort(r.Host) - if err != nil { - return - } - - metavars = []string{ - `AUTH_TYPE=`, // Not used - `CONTENT_LENGTH=`, // Not used - `CONTENT_TYPE=`, // Not used - `GATEWAY_INTERFACE=` + GatewayInterface, - `PATH_INFO=`, // TODO - `PATH_TRANSLATED=`, // TODO - `QUERY_STRING=` + r.URL.RawQuery, - `REMOTE_ADDR=` + remoteHost, - `REMOTE_HOST=` + remoteHost, // Host lookups are slow - don't do them - `REMOTE_IDENT=`, // Not used - `REMOTE_PORT=` + remotePort, - `REMOTE_USER=`, // Not used, - `REQUEST_METHOD=` + r.Method, - `REQUEST_URI=` + r.RequestURI, - `SCRIPT_NAME=` + cmdPath, // path of the program being executed - `SERVER_NAME=` + serverHost, - `SERVER_PORT=` + serverPort, - `SERVER_PROTOCOL=` + r.Proto, - `SERVER_SOFTWARE=` + ServerSoftware, - } - - // Add each HTTP header to the environment as well - for header, values := range r.Header { - value := strings.Join(values, ", ") - header = strings.ToUpper(header) - header = strings.Replace(header, "-", "_", -1) - value = strings.Replace(value, "\n", " ", -1) - metavars = append(metavars, "HTTP_"+header+"="+value) - } - - return -} - -// pumpStdin handles reading data from the websocket connection and writing -// it to stdin of the process. -func pumpStdin(conn *websocket.Conn, stdin io.WriteCloser) { - // Setup our connection's websocket ping/pong handlers from our const values. - defer conn.Close() - conn.SetReadLimit(maxMessageSize) - if err := conn.SetReadDeadline(time.Now().Add(pongWait)); err != nil { - log.Println("[ERROR] failed to set read deadline: ", err) - } - conn.SetPongHandler(func(string) error { - if err := conn.SetReadDeadline(time.Now().Add(pongWait)); err != nil { - log.Println("[ERROR] failed to set read deadline: ", err) - } - return nil - }) - for { - _, message, err := conn.ReadMessage() - if err != nil { - break - } - message = append(message, '\n') - if _, err := stdin.Write(message); err != nil { - break - } - } -} - -// pumpStdout handles reading data from stdout of the process and writing -// it to websocket connection. -func pumpStdout(conn *websocket.Conn, stdout io.Reader, done chan struct{}) { - go pinger(conn, done) - defer func() { - _ = conn.Close() - close(done) // make sure to close the pinger when we are done. - }() - - s := bufio.NewScanner(stdout) - for s.Scan() { - if err := conn.SetWriteDeadline(time.Now().Add(writeWait)); err != nil { - log.Println("[ERROR] failed to set write deadline: ", err) - } - if err := conn.WriteMessage(websocket.TextMessage, bytes.TrimSpace(s.Bytes())); err != nil { - break - } - } - if s.Err() != nil { - err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseGoingAway, s.Err().Error()), time.Time{}) - if err != nil { - log.Println("[ERROR] WriteControl failed: ", err) - } - } -} - -// pinger simulates the websocket to keep it alive with ping messages. -func pinger(conn *websocket.Conn, done chan struct{}) { - ticker := time.NewTicker(pingPeriod) - defer ticker.Stop() - - for { // blocking loop with select to wait for stimulation. - select { - case <-ticker.C: - if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait)); err != nil { - err := conn.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseGoingAway, err.Error()), time.Time{}) - if err != nil { - log.Println("[ERROR] WriteControl failed: ", err) - } - return - } - case <-done: - return // clean up this routine. - } - } -} diff --git a/vendor/github.com/mholt/caddy/caddytls/config.go b/vendor/github.com/mholt/caddy/caddytls/config.go deleted file mode 100644 index 9fca894f5..000000000 --- a/vendor/github.com/mholt/caddy/caddytls/config.go +++ /dev/null @@ -1,567 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - "sync/atomic" - "time" - - "github.com/go-acme/lego/challenge/tlsalpn01" - - "github.com/go-acme/lego/certcrypto" - "github.com/klauspost/cpuid" - "github.com/mholt/caddy" - "github.com/mholt/certmagic" -) - -// Config describes how TLS should be configured and used. -type Config struct { - // The hostname or class of hostnames this config is - // designated for; can contain wildcard characters - // according to RFC 6125 §6.4.3 - this field MUST - // be set in order for things to work as expected, - // must be normalized, and if an IP address, must - // be normalized - Hostname string - - // Whether TLS is enabled - Enabled bool - - // Minimum and maximum protocol versions to allow - ProtocolMinVersion uint16 - ProtocolMaxVersion uint16 - - // The list of cipher suites; first should be - // TLS_FALLBACK_SCSV to prevent degrade attacks - Ciphers []uint16 - - // Whether to prefer server cipher suites - PreferServerCipherSuites bool - - // The list of preferred curves - CurvePreferences []tls.CurveID - - // Client authentication policy - ClientAuth tls.ClientAuthType - - // List of client CA certificates to allow, if - // client authentication is enabled - ClientCerts []string - - // Manual means user provides own certs and keys - Manual bool - - // Managed means this config should be managed - // by the CertMagic Config (Manager field) - Managed bool - - // Manager is how certificates are managed - Manager *certmagic.Config - - // SelfSigned means that this hostname is - // served with a self-signed certificate - // that we generated in memory for convenience - SelfSigned bool - - // The email address to use when creating or - // using an ACME account (fun fact: if this - // is set to "off" then this config will not - // qualify for managed TLS) - ACMEEmail string - - // The list of protocols to choose from for Application Layer - // Protocol Negotiation (ALPN). - ALPN []string - - // The final tls.Config created with - // buildStandardTLSConfig() - tlsConfig *tls.Config -} - -// NewConfig returns a new Config with a pointer to the instance's -// certificate cache. You will usually need to set other fields on -// the returned Config for successful practical use. -func NewConfig(inst *caddy.Instance) (*Config, error) { - inst.StorageMu.RLock() - certCache, ok := inst.Storage[CertCacheInstStorageKey].(*certmagic.Cache) - inst.StorageMu.RUnlock() - if !ok || certCache == nil { - // set up the clustering plugin, if there is one (and there should always - // be one since this tls plugin requires it) -- this should be done exactly - // once, but we can't do it during init while plugins are still registering, - // so do it as soon as we run a setup) - if atomic.CompareAndSwapInt32(&clusterPluginSetup, 0, 1) { - clusterPluginName := os.Getenv("CADDY_CLUSTERING") - if clusterPluginName == "" { - clusterPluginName = "file" // name of default storage plugin - } - clusterFn, ok := clusterProviders[clusterPluginName] - if ok { - storage, err := clusterFn() - if err != nil { - return nil, fmt.Errorf("constructing cluster plugin %s: %v", clusterPluginName, err) - } - certmagic.Default.Storage = storage - } else { - return nil, fmt.Errorf("unrecognized cluster plugin (was it included in the Caddy build?): %s", clusterPluginName) - } - } - certCache = certmagic.NewCache(certmagic.CacheOptions{ - GetConfigForCert: func(cert certmagic.Certificate) (certmagic.Config, error) { - inst.StorageMu.Lock() - cfgMap, ok := inst.Storage[configMapKey].(map[string]*Config) - inst.StorageMu.Unlock() - if ok { - for hostname, cfg := range cfgMap { - if cfg.Manager != nil && hostname == cert.Names[0] { - return *cfg.Manager, nil - } - } - } - // returning Default not strictly necessary, since Default is used as template - // anyway; but this makes it clear that that's what we fall back to - return certmagic.Default, nil - }, - }) - storageCleaningTicker := time.NewTicker(12 * time.Hour) - go func() { - for range storageCleaningTicker.C { - certmagic.CleanStorage(certmagic.Default.Storage, certmagic.CleanStorageOptions{ - OCSPStaples: true, - }) - } - }() - inst.OnShutdown = append(inst.OnShutdown, func() error { - certCache.Stop() - storageCleaningTicker.Stop() - return nil - }) - - inst.StorageMu.Lock() - inst.Storage[CertCacheInstStorageKey] = certCache - inst.StorageMu.Unlock() - } - return &Config{ - Manager: certmagic.New(certCache, certmagic.Config{}), - }, nil -} - -// buildStandardTLSConfig converts cfg (*caddytls.Config) to a *tls.Config -// and stores it in cfg so it can be used in servers. If TLS is disabled, -// no tls.Config is created. -func (c *Config) buildStandardTLSConfig() error { - if !c.Enabled { - return nil - } - - config := new(tls.Config) - - ciphersAdded := make(map[uint16]struct{}) - curvesAdded := make(map[tls.CurveID]struct{}) - - // add cipher suites - for _, ciph := range c.Ciphers { - if _, ok := ciphersAdded[ciph]; !ok { - ciphersAdded[ciph] = struct{}{} - config.CipherSuites = append(config.CipherSuites, ciph) - } - } - - config.PreferServerCipherSuites = c.PreferServerCipherSuites - - // add curve preferences - for _, curv := range c.CurvePreferences { - if _, ok := curvesAdded[curv]; !ok { - curvesAdded[curv] = struct{}{} - config.CurvePreferences = append(config.CurvePreferences, curv) - } - } - - // ensure ALPN includes the ACME TLS-ALPN protocol - var alpnFound bool - for _, a := range c.ALPN { - if a == tlsalpn01.ACMETLS1Protocol { - alpnFound = true - break - } - } - if !alpnFound { - c.ALPN = append(c.ALPN, tlsalpn01.ACMETLS1Protocol) - } - - config.MinVersion = c.ProtocolMinVersion - config.MaxVersion = c.ProtocolMaxVersion - config.ClientAuth = c.ClientAuth - config.NextProtos = c.ALPN - config.GetCertificate = c.Manager.GetCertificate - - // set up client authentication if enabled - if config.ClientAuth != tls.NoClientCert { - pool := x509.NewCertPool() - clientCertsAdded := make(map[string]struct{}) - - for _, caFile := range c.ClientCerts { - // don't add cert to pool more than once - if _, ok := clientCertsAdded[caFile]; ok { - continue - } - clientCertsAdded[caFile] = struct{}{} - - // Any client with a certificate from this CA will be allowed to connect - caCrt, err := ioutil.ReadFile(caFile) - if err != nil { - return err - } - - if !pool.AppendCertsFromPEM(caCrt) { - return fmt.Errorf("error loading client certificate '%s': no certificates were successfully parsed", caFile) - } - } - - config.ClientCAs = pool - } - - // default cipher suites - if len(config.CipherSuites) == 0 { - config.CipherSuites = getPreferredDefaultCiphers() - } - - // for security, ensure TLS_FALLBACK_SCSV is always included first - if len(config.CipherSuites) == 0 || config.CipherSuites[0] != tls.TLS_FALLBACK_SCSV { - config.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, config.CipherSuites...) - } - - // store the resulting new tls.Config - c.tlsConfig = config - - return nil -} - -// MakeTLSConfig makes a tls.Config from configs. The returned -// tls.Config is programmed to load the matching caddytls.Config -// based on the hostname in SNI, but that's all. This is used -// to create a single TLS configuration for a listener (a group -// of sites). -func MakeTLSConfig(configs []*Config) (*tls.Config, error) { - if len(configs) == 0 { - return nil, nil - } - - configMap := make(configGroup) - - for i, cfg := range configs { - if cfg == nil { - // avoid nil pointer dereference below this loop - configs[i] = new(Config) - continue - } - - // can't serve TLS and non-TLS on same port - if i > 0 && cfg.Enabled != configs[i-1].Enabled { - thisConfProto, lastConfProto := "not TLS", "not TLS" - if cfg.Enabled { - thisConfProto = "TLS" - } - if configs[i-1].Enabled { - lastConfProto = "TLS" - } - return nil, fmt.Errorf("cannot multiplex %s (%s) and %s (%s) on same listener", - configs[i-1].Hostname, lastConfProto, cfg.Hostname, thisConfProto) - } - - // convert this caddytls.Config into a tls.Config - if err := cfg.buildStandardTLSConfig(); err != nil { - return nil, err - } - - // if an existing config with this hostname was already - // configured, then they must be identical (or at least - // compatible), otherwise that is a configuration error - if otherConfig, ok := configMap[cfg.Hostname]; ok { - if err := assertConfigsCompatible(cfg, otherConfig); err != nil { - return nil, fmt.Errorf("incompatible TLS configurations for the same SNI "+ - "name (%s) on the same listener: %v", - cfg.Hostname, err) - } - } - - // key this config by its hostname (overwrites - // configs with the same hostname pattern; should - // be OK since we already asserted they are roughly - // the same); during TLS handshakes, configs are - // loaded based on the hostname pattern according - // to client's ServerName (SNI) value - if cfg.Hostname == "0.0.0.0" || cfg.Hostname == "::" { - configMap[""] = cfg - } else { - configMap[cfg.Hostname] = cfg - } - } - - // Is TLS disabled? By now, we know that all - // configs agree whether it is or not, so we - // can just look at the first one. If so, - // we're done here. - if len(configs) == 0 || !configs[0].Enabled { - return nil, nil - } - - return &tls.Config{ - // A tls.Config must have Certificates or GetCertificate - // set, in order to be accepted by tls.Listen and quic.Listen. - // TODO: remove this once the standard library allows a tls.Config with - // only GetConfigForClient set. https://github.com/mholt/caddy/pull/2404 - GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) { - return nil, fmt.Errorf("all certificates configured via GetConfigForClient") - }, - GetConfigForClient: configMap.GetConfigForClient, - }, nil -} - -// assertConfigsCompatible returns an error if the two Configs -// do not have the same (or roughly compatible) configurations. -// If one of the tlsConfig pointers on either Config is nil, -// an error will be returned. If both are nil, no error. -func assertConfigsCompatible(cfg1, cfg2 *Config) error { - c1, c2 := cfg1.tlsConfig, cfg2.tlsConfig - - if (c1 == nil && c2 != nil) || (c1 != nil && c2 == nil) { - return fmt.Errorf("one config is not made") - } - if c1 == nil && c2 == nil { - return nil - } - - if len(c1.CipherSuites) != len(c2.CipherSuites) { - return fmt.Errorf("different number of allowed cipher suites") - } - for i, ciph := range c1.CipherSuites { - if c2.CipherSuites[i] != ciph { - return fmt.Errorf("different cipher suites or different order") - } - } - - if len(c1.CurvePreferences) != len(c2.CurvePreferences) { - return fmt.Errorf("different number of allowed cipher suites") - } - for i, curve := range c1.CurvePreferences { - if c2.CurvePreferences[i] != curve { - return fmt.Errorf("different curve preferences or different order") - } - } - - if len(c1.NextProtos) != len(c2.NextProtos) { - return fmt.Errorf("different number of ALPN (NextProtos) values") - } - for i, proto := range c1.NextProtos { - if c2.NextProtos[i] != proto { - return fmt.Errorf("different ALPN (NextProtos) values or different order") - } - } - - if c1.PreferServerCipherSuites != c2.PreferServerCipherSuites { - return fmt.Errorf("one prefers server cipher suites, the other does not") - } - if c1.MinVersion != c2.MinVersion { - return fmt.Errorf("minimum TLS version mismatch") - } - if c1.MaxVersion != c2.MaxVersion { - return fmt.Errorf("maximum TLS version mismatch") - } - if c1.ClientAuth != c2.ClientAuth { - return fmt.Errorf("client authentication policy mismatch") - } - if c1.ClientAuth != tls.NoClientCert && c2.ClientAuth != tls.NoClientCert && c1.ClientCAs != c2.ClientCAs { - // Two hosts defined on the same listener are not compatible if they - // have ClientAuth enabled, because there's no guarantee beyond the - // hostname which config will be used (because SNI only has server name). - // To prevent clients from bypassing authentication, require that - // ClientAuth be configured in an unambiguous manner. - return fmt.Errorf("multiple hosts requiring client authentication ambiguously configured") - } - - return nil -} - -// ConfigGetter gets a Config keyed by key. -type ConfigGetter func(c *caddy.Controller) *Config - -var configGetters = make(map[string]ConfigGetter) - -// RegisterConfigGetter registers fn as the way to get a -// Config for server type serverType. -func RegisterConfigGetter(serverType string, fn ConfigGetter) { - configGetters[serverType] = fn -} - -// SetDefaultTLSParams sets the default TLS cipher suites, protocol versions, -// and server preferences of a server.Config if they were not previously set -// (it does not overwrite; only fills in missing values). -func SetDefaultTLSParams(config *Config) { - // If no ciphers provided, use default list - if len(config.Ciphers) == 0 { - config.Ciphers = getPreferredDefaultCiphers() - } - - // Not a cipher suite, but still important for mitigating protocol downgrade attacks - // (prepend since having it at end breaks http2 due to non-h2-approved suites before it) - config.Ciphers = append([]uint16{tls.TLS_FALLBACK_SCSV}, config.Ciphers...) - - // If no curves provided, use default list - if len(config.CurvePreferences) == 0 { - config.CurvePreferences = defaultCurves - } - - // Set default protocol min and max versions - must balance compatibility and security - if config.ProtocolMinVersion == 0 { - config.ProtocolMinVersion = tls.VersionTLS12 - } - if config.ProtocolMaxVersion == 0 { - config.ProtocolMaxVersion = tls.VersionTLS13 - } - - // Prefer server cipher suites - config.PreferServerCipherSuites = true -} - -// Map of supported key types -var supportedKeyTypes = map[string]certcrypto.KeyType{ - "P384": certcrypto.EC384, - "P256": certcrypto.EC256, - "RSA4096": certcrypto.RSA4096, - "RSA2048": certcrypto.RSA2048, -} - -// SupportedProtocols is a map of supported protocols. -// HTTP/2 only supports TLS 1.2 and higher. -// If updating this map, also update tlsProtocolStringToMap in caddyhttp/fastcgi/fastcgi.go -var SupportedProtocols = map[string]uint16{ - "tls1.0": tls.VersionTLS10, - "tls1.1": tls.VersionTLS11, - "tls1.2": tls.VersionTLS12, - "tls1.3": tls.VersionTLS13, -} - -// GetSupportedProtocolName returns the protocol name -func GetSupportedProtocolName(protocol uint16) (string, error) { - for k, v := range SupportedProtocols { - if v == protocol { - return k, nil - } - } - - return "", fmt.Errorf("name: unsupported protocol") -} - -// SupportedCiphersMap has supported ciphers, used only for parsing config. -// -// Note that, at time of writing, HTTP/2 blacklists 276 cipher suites, -// including all but four of the suites below (the four GCM suites). -// See https://http2.github.io/http2-spec/#BadCipherSuites -// -// TLS_FALLBACK_SCSV is not in this list because we manually ensure -// it is always added (even though it is not technically a cipher suite). -// -// This map, like any map, is NOT ORDERED. Do not range over this map. -var SupportedCiphersMap = map[string]uint16{ - "ECDHE-ECDSA-AES256-GCM-SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - "ECDHE-RSA-AES256-GCM-SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - "ECDHE-ECDSA-AES128-GCM-SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - "ECDHE-RSA-AES128-GCM-SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - "ECDHE-ECDSA-WITH-CHACHA20-POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - "ECDHE-RSA-WITH-CHACHA20-POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - "ECDHE-RSA-AES256-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - "ECDHE-RSA-AES128-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - "ECDHE-ECDSA-AES256-CBC-SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - "ECDHE-ECDSA-AES128-CBC-SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - "RSA-AES256-CBC-SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, - "RSA-AES128-CBC-SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, - "ECDHE-RSA-3DES-EDE-CBC-SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - "RSA-3DES-EDE-CBC-SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, -} - -// GetSupportedCipherName returns the cipher name -func GetSupportedCipherName(cipher uint16) (string, error) { - for k, v := range SupportedCiphersMap { - if v == cipher { - return k, nil - } - } - - return "", fmt.Errorf("name: unsupported cipher") -} - -// List of all the ciphers we want to use by default -var defaultCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, -} - -// List of ciphers we should prefer if native AESNI support is missing -var defaultCiphersNonAESNI = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - -// getPreferredDefaultCiphers returns an appropriate cipher suite to use, depending on -// the hardware support available for AES-NI. -// -// See https://github.com/mholt/caddy/issues/1674 -func getPreferredDefaultCiphers() []uint16 { - if cpuid.CPU.AesNi() { - return defaultCiphers - } - - // Return a cipher suite that prefers ChaCha20 - return defaultCiphersNonAESNI -} - -// Map of supported curves -// https://golang.org/pkg/crypto/tls/#CurveID -var supportedCurvesMap = map[string]tls.CurveID{ - "X25519": tls.X25519, - "P256": tls.CurveP256, - "P384": tls.CurveP384, - "P521": tls.CurveP521, -} - -// List of all the curves we want to use by default. -// -// This list should only include curves which are fast by design (e.g. X25519) -// and those for which an optimized assembly implementation exists (e.g. P256). -// The latter ones can be found here: https://github.com/golang/go/tree/master/src/crypto/elliptic -var defaultCurves = []tls.CurveID{ - tls.X25519, - tls.CurveP256, -} - -var clusterPluginSetup int32 // access atomically - -// CertCacheInstStorageKey is the name of the key for -// accessing the certificate storage on the *caddy.Instance. -const CertCacheInstStorageKey = "tls_cert_cache" diff --git a/vendor/github.com/mholt/caddy/caddytls/crypto.go b/vendor/github.com/mholt/caddy/caddytls/crypto.go deleted file mode 100644 index ff2e9f912..000000000 --- a/vendor/github.com/mholt/caddy/caddytls/crypto.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/rand" - "crypto/tls" - "io" - "sync" - "time" -) - -// RotateSessionTicketKeys rotates the TLS session ticket keys -// on cfg every TicketRotateInterval. It spawns a new goroutine so -// this function does NOT block. It returns a channel you should -// close when you are ready to stop the key rotation, like when the -// server using cfg is no longer running. -// -// TODO: See about moving this into CertMagic and using its Storage -func RotateSessionTicketKeys(cfg *tls.Config) chan struct{} { - ch := make(chan struct{}) - ticker := time.NewTicker(TicketRotateInterval) - go runTLSTicketKeyRotation(cfg, ticker, ch) - return ch -} - -// Functions that may be swapped out for testing -var ( - runTLSTicketKeyRotation = standaloneTLSTicketKeyRotation - setSessionTicketKeysTestHook = func(keys [][32]byte) [][32]byte { return keys } - setSessionTicketKeysTestHookMu sync.Mutex -) - -// standaloneTLSTicketKeyRotation governs over the array of TLS ticket keys used to de/crypt TLS tickets. -// It periodically sets a new ticket key as the first one, used to encrypt (and decrypt), -// pushing any old ticket keys to the back, where they are considered for decryption only. -// -// Lack of entropy for the very first ticket key results in the feature being disabled (as does Go), -// later lack of entropy temporarily disables ticket key rotation. -// Old ticket keys are still phased out, though. -// -// Stops the ticker when returning. -func standaloneTLSTicketKeyRotation(c *tls.Config, ticker *time.Ticker, exitChan chan struct{}) { - defer ticker.Stop() - - // The entire page should be marked as sticky, but Go cannot do that - // without resorting to syscall#Mlock. And, we don't have madvise (for NODUMP), too. ☹ - keys := make([][32]byte, 1, NumTickets) - - rng := c.Rand - if rng == nil { - rng = rand.Reader - } - if _, err := io.ReadFull(rng, keys[0][:]); err != nil { - c.SessionTicketsDisabled = true // bail if we don't have the entropy for the first one - return - } - setSessionTicketKeysTestHookMu.Lock() - setSessionTicketKeysHook := setSessionTicketKeysTestHook - setSessionTicketKeysTestHookMu.Unlock() - c.SetSessionTicketKeys(setSessionTicketKeysHook(keys)) - - for { - select { - case _, isOpen := <-exitChan: - if !isOpen { - return - } - case <-ticker.C: - rng = c.Rand // could've changed since the start - if rng == nil { - rng = rand.Reader - } - var newTicketKey [32]byte - _, err := io.ReadFull(rng, newTicketKey[:]) - - if len(keys) < NumTickets { - keys = append(keys, keys[0]) // manipulates the internal length - } - for idx := len(keys) - 1; idx >= 1; idx-- { - keys[idx] = keys[idx-1] // yes, this makes copies - } - - if err == nil { - keys[0] = newTicketKey - } - // pushes the last key out, doesn't matter that we don't have a new one - c.SetSessionTicketKeys(setSessionTicketKeysHook(keys)) - } - } -} - -const ( - // NumTickets is how many tickets to hold and consider - // to decrypt TLS sessions. - NumTickets = 4 - - // TicketRotateInterval is how often to generate - // new ticket for TLS PFS encryption - TicketRotateInterval = 10 * time.Hour -) diff --git a/vendor/github.com/mholt/caddy/caddytls/handshake.go b/vendor/github.com/mholt/caddy/caddytls/handshake.go deleted file mode 100644 index 0cd8a15ad..000000000 --- a/vendor/github.com/mholt/caddy/caddytls/handshake.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "crypto/tls" - "fmt" - "log" - "net" - "strings" - - "github.com/mholt/caddy/telemetry" - "github.com/mholt/certmagic" -) - -// configGroup is a type that keys configs by their hostname -// (hostnames can have wildcard characters; use the getConfig -// method to get a config by matching its hostname). -type configGroup map[string]*Config - -// getConfig gets the config by the first key match for hello. -// In other words, "sub.foo.bar" will get the config for "*.foo.bar" -// if that is the closest match. If no match is found, the first -// (random) config will be loaded, which will defer any TLS alerts -// to the certificate validation (this may or may not be ideal; -// let's talk about it if this becomes problematic). -// -// This function follows nearly the same logic to lookup -// a hostname as the getCertificate function uses. -func (cg configGroup) getConfig(hello *tls.ClientHelloInfo) *Config { - name := certmagic.NormalizedName(hello.ServerName) - if name == "" { - name = certmagic.NormalizedName(certmagic.Default.DefaultServerName) - } - - // if SNI is empty, prefer matching IP address (it is - // more specific than a "catch-all" configuration) - if name == "" && hello.Conn != nil { - addr := hello.Conn.LocalAddr().String() - ip, _, err := net.SplitHostPort(addr) - if err == nil { - addr = ip - } - if config, ok := cg[addr]; ok { - return config - } - } - - // otherwise, try an exact match - if config, ok := cg[name]; ok { - return config - } - - // then try replacing labels in the name with - // wildcards until we get a match - labels := strings.Split(name, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - if config, ok := cg[candidate]; ok { - return config - } - } - - // try a config that matches all names - this - // is needed to match configs defined without - // a specific host, like ":443", when SNI is - // a non-empty value - if config, ok := cg[""]; ok { - return config - } - - // failover with a random config: this is necessary - // because we might be needing to solve a TLS-ALPN - // ACME challenge for a name that we don't have a - // TLS configuration for; any config will do for - // this purpose - for _, config := range cg { - return config - } - - log.Printf("[ERROR] No TLS configuration available for ClientHello with ServerName: %s", hello.ServerName) - - return nil -} - -// GetConfigForClient gets a TLS configuration satisfying clientHello. -// In getting the configuration, it abides the rules and settings -// defined in the Config that matches clientHello.ServerName. If no -// tls.Config is set on the matching Config, a nil value is returned. -// -// This method is safe for use as a tls.Config.GetConfigForClient callback. -func (cg configGroup) GetConfigForClient(clientHello *tls.ClientHelloInfo) (*tls.Config, error) { - config := cg.getConfig(clientHello) - if config != nil { - return config.tlsConfig, nil - } - return nil, nil -} - -// ClientHelloInfo is our own version of the standard lib's -// tls.ClientHelloInfo. As of May 2018, any fields populated -// by the Go standard library are not guaranteed to have their -// values in the original order as on the wire. -type ClientHelloInfo struct { - Version uint16 `json:"version,omitempty"` - CipherSuites []uint16 `json:"cipher_suites,omitempty"` - Extensions []uint16 `json:"extensions,omitempty"` - CompressionMethods []byte `json:"compression,omitempty"` - Curves []tls.CurveID `json:"curves,omitempty"` - Points []uint8 `json:"points,omitempty"` - - // Whether a couple of fields are unknown; if not, the key will encode - // differently to reflect that, as opposed to being known empty values. - // (some fields may be unknown depending on what package is being used; - // i.e. the Go standard lib doesn't expose some things) - // (very important to NOT encode these to JSON) - ExtensionsUnknown bool `json:"-"` - CompressionMethodsUnknown bool `json:"-"` -} - -// Key returns a standardized string form of the data in info, -// useful for identifying duplicates. -func (info ClientHelloInfo) Key() string { - extensions, compressionMethods := "?", "?" - if !info.ExtensionsUnknown { - extensions = fmt.Sprintf("%x", info.Extensions) - } - if !info.CompressionMethodsUnknown { - compressionMethods = fmt.Sprintf("%x", info.CompressionMethods) - } - return telemetry.FastHash([]byte(fmt.Sprintf("%x-%x-%s-%s-%x-%x", - info.Version, info.CipherSuites, extensions, - compressionMethods, info.Curves, info.Points))) -} - -// ClientHelloTelemetry determines whether to report -// TLS ClientHellos to telemetry. Disable if doing -// it from a different package. -var ClientHelloTelemetry = true diff --git a/vendor/github.com/mholt/caddy/caddytls/selfsigned.go b/vendor/github.com/mholt/caddy/caddytls/selfsigned.go deleted file mode 100644 index 639edc0c3..000000000 --- a/vendor/github.com/mholt/caddy/caddytls/selfsigned.go +++ /dev/null @@ -1,103 +0,0 @@ -package caddytls - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "fmt" - "math/big" - "net" - "strings" - "time" - - "github.com/go-acme/lego/certcrypto" -) - -// newSelfSignedCertificate returns a new self-signed certificate. -func newSelfSignedCertificate(ssconfig selfSignedConfig) (tls.Certificate, error) { - // start by generating private key - var privKey interface{} - var err error - switch ssconfig.KeyType { - case "", certcrypto.EC256: - privKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case certcrypto.EC384: - privKey, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case certcrypto.RSA2048: - privKey, err = rsa.GenerateKey(rand.Reader, 2048) - case certcrypto.RSA4096: - privKey, err = rsa.GenerateKey(rand.Reader, 4096) - case certcrypto.RSA8192: - privKey, err = rsa.GenerateKey(rand.Reader, 8192) - default: - return tls.Certificate{}, fmt.Errorf("cannot generate private key; unknown key type %v", ssconfig.KeyType) - } - if err != nil { - return tls.Certificate{}, fmt.Errorf("failed to generate private key: %v", err) - } - - // create certificate structure with proper values - notBefore := time.Now() - notAfter := ssconfig.Expire - if notAfter.IsZero() || notAfter.Before(notBefore) { - notAfter = notBefore.Add(24 * time.Hour * 7) - } - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return tls.Certificate{}, fmt.Errorf("failed to generate serial number: %v", err) - } - cert := &x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{Organization: []string{"Caddy Self-Signed"}}, - NotBefore: notBefore, - NotAfter: notAfter, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - } - if len(ssconfig.SAN) == 0 { - ssconfig.SAN = []string{""} - } - for _, san := range ssconfig.SAN { - if ip := net.ParseIP(san); ip != nil { - cert.IPAddresses = append(cert.IPAddresses, ip) - } else { - cert.DNSNames = append(cert.DNSNames, strings.ToLower(san)) - } - } - - // generate the associated public key - publicKey := func(privKey interface{}) interface{} { - switch k := privKey.(type) { - case *rsa.PrivateKey: - return &k.PublicKey - case *ecdsa.PrivateKey: - return &k.PublicKey - default: - return fmt.Errorf("unknown key type") - } - } - derBytes, err := x509.CreateCertificate(rand.Reader, cert, cert, publicKey(privKey), privKey) - if err != nil { - return tls.Certificate{}, fmt.Errorf("could not create certificate: %v", err) - } - - chain := [][]byte{derBytes} - - return tls.Certificate{ - Certificate: chain, - PrivateKey: privKey, - Leaf: cert, - }, nil -} - -// selfSignedConfig configures a self-signed certificate. -type selfSignedConfig struct { - SAN []string - KeyType certcrypto.KeyType - Expire time.Time -} diff --git a/vendor/github.com/mholt/caddy/caddytls/setup.go b/vendor/github.com/mholt/caddy/caddytls/setup.go deleted file mode 100644 index 3f0546904..000000000 --- a/vendor/github.com/mholt/caddy/caddytls/setup.go +++ /dev/null @@ -1,471 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddytls - -import ( - "bytes" - "crypto/tls" - "encoding/pem" - "fmt" - "io/ioutil" - "log" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "sync/atomic" - - "github.com/mholt/caddy" - "github.com/mholt/caddy/telemetry" - "github.com/mholt/certmagic" -) - -func init() { - // opt-in TLS 1.3 for Go1.12 - // TODO: remove this line when Go1.13 is released. - if err := os.Setenv("GODEBUG", os.Getenv("GODEBUG")+",tls13=1"); err != nil { - log.Println("[ERROR] failed to set environment variable: ", err) - } - - caddy.RegisterPlugin("tls", caddy.Plugin{Action: setupTLS}) - - // ensure the default Storage implementation is plugged in - RegisterClusterPlugin("file", constructDefaultClusterPlugin) -} - -// setupTLS sets up the TLS configuration and installs certificates that -// are specified by the user in the config file. All the automatic HTTPS -// stuff comes later outside of this function. -func setupTLS(c *caddy.Controller) error { - // set up the clustering plugin, if there is one (and there should always - // be one since this tls plugin requires it) -- this should be done exactly - // once, but we can't do it during init while plugins are still registering, - // so do it as soon as we run a setup) - if atomic.CompareAndSwapInt32(&clusterPluginSetup, 0, 1) { - clusterPluginName := os.Getenv("CADDY_CLUSTERING") - if clusterPluginName == "" { - clusterPluginName = "file" // name of default storage plugin - } - clusterFn, ok := clusterProviders[clusterPluginName] - if ok { - storage, err := clusterFn() - if err != nil { - return fmt.Errorf("constructing cluster plugin %s: %v", clusterPluginName, err) - } - certmagic.Default.Storage = storage - } else { - return fmt.Errorf("unrecognized cluster plugin (was it included in the Caddy build?): %s", clusterPluginName) - } - } - - configGetter, ok := configGetters[c.ServerType()] - if !ok { - return fmt.Errorf("no caddytls.ConfigGetter for %s server type; must call RegisterConfigGetter", c.ServerType()) - } - config := configGetter(c) - if config == nil { - return fmt.Errorf("no caddytls.Config to set up for %s", c.Key) - } - - config.Enabled = true - - // we use certmagic events to collect metrics for telemetry - config.Manager.OnEvent = func(event string, data interface{}) { - switch event { - case "tls_handshake_started": - clientHello := data.(*tls.ClientHelloInfo) - if ClientHelloTelemetry && len(clientHello.SupportedVersions) > 0 { - // If no other plugin (such as the HTTP server type) is implementing ClientHello telemetry, we do it. - // NOTE: The values in the Go standard lib's ClientHelloInfo aren't guaranteed to be in order. - info := ClientHelloInfo{ - Version: clientHello.SupportedVersions[0], // report the highest - CipherSuites: clientHello.CipherSuites, - ExtensionsUnknown: true, // no extension info... :( - CompressionMethodsUnknown: true, // no compression methods... :( - Curves: clientHello.SupportedCurves, - Points: clientHello.SupportedPoints, - // We also have, but do not yet use: SignatureSchemes, ServerName, and SupportedProtos (ALPN) - // because the standard lib parses some extensions, but our MITM detector generally doesn't. - } - go telemetry.SetNested("tls_client_hello", info.Key(), info) - } - - case "tls_handshake_completed": - // TODO: This is a "best guess" for now - at this point, we only gave a - // certificate to the client; we need something listener-level to be sure - go telemetry.Increment("tls_handshake_count") - - case "acme_cert_obtained": - go telemetry.Increment("tls_acme_certs_obtained") - - case "acme_cert_renewed": - name := data.(string) - caddy.EmitEvent(caddy.CertRenewEvent, name) - go telemetry.Increment("tls_acme_certs_renewed") - - case "acme_cert_revoked": - telemetry.Increment("acme_certs_revoked") - - case "cached_managed_cert": - telemetry.Increment("tls_managed_cert_count") - - case "cached_unmanaged_cert": - telemetry.Increment("tls_unmanaged_cert_count") - } - } - - for c.Next() { - var certificateFile, keyFile, loadDir, maxCerts, askURL string - var onDemand bool - - args := c.RemainingArgs() - switch len(args) { - case 1: - // even if the email is one of the special values below, - // it is still necessary for future analysis that we store - // that value in the ACMEEmail field. - config.ACMEEmail = args[0] - - switch args[0] { - // user can force-disable managed TLS this way - case "off": - config.Enabled = false - return nil - // user might want a temporary, in-memory, self-signed cert - case "self_signed": - config.SelfSigned = true - default: - config.Manager.Email = args[0] - } - case 2: - certificateFile = args[0] - keyFile = args[1] - config.Manual = true - } - - // Optional block with extra parameters - var hadBlock bool - for c.NextBlock() { - hadBlock = true - switch c.Val() { - case "ca": - arg := c.RemainingArgs() - if len(arg) != 1 { - return c.ArgErr() - } - config.Manager.CA = arg[0] - case "key_type": - arg := c.RemainingArgs() - value, ok := supportedKeyTypes[strings.ToUpper(arg[0])] - if !ok { - return c.Errf("Wrong key type name or key type not supported: '%s'", c.Val()) - } - config.Manager.KeyType = value - case "protocols": - args := c.RemainingArgs() - if len(args) == 1 { - value, ok := SupportedProtocols[strings.ToLower(args[0])] - if !ok { - return c.Errf("Wrong protocol name or protocol not supported: '%s'", args[0]) - } - config.ProtocolMinVersion, config.ProtocolMaxVersion = value, value - } else { - value, ok := SupportedProtocols[strings.ToLower(args[0])] - if !ok { - return c.Errf("Wrong protocol name or protocol not supported: '%s'", args[0]) - } - config.ProtocolMinVersion = value - value, ok = SupportedProtocols[strings.ToLower(args[1])] - if !ok { - return c.Errf("Wrong protocol name or protocol not supported: '%s'", args[1]) - } - config.ProtocolMaxVersion = value - if config.ProtocolMinVersion > config.ProtocolMaxVersion { - return c.Errf("Minimum protocol version cannot be higher than maximum (reverse the order)") - } - } - case "ciphers": - for c.NextArg() { - value, ok := SupportedCiphersMap[strings.ToUpper(c.Val())] - if !ok { - return c.Errf("Wrong cipher name or cipher not supported: '%s'", c.Val()) - } - config.Ciphers = append(config.Ciphers, value) - } - case "curves": - for c.NextArg() { - value, ok := supportedCurvesMap[strings.ToUpper(c.Val())] - if !ok { - return c.Errf("Wrong curve name or curve not supported: '%s'", c.Val()) - } - config.CurvePreferences = append(config.CurvePreferences, value) - } - case "clients": - clientCertList := c.RemainingArgs() - if len(clientCertList) == 0 { - return c.ArgErr() - } - - listStart, mustProvideCA := 1, true - switch clientCertList[0] { - case "request": - config.ClientAuth = tls.RequestClientCert - mustProvideCA = false - case "require": - config.ClientAuth = tls.RequireAnyClientCert - mustProvideCA = false - case "verify_if_given": - config.ClientAuth = tls.VerifyClientCertIfGiven - default: - config.ClientAuth = tls.RequireAndVerifyClientCert - listStart = 0 - } - if mustProvideCA && len(clientCertList) <= listStart { - return c.ArgErr() - } - - config.ClientCerts = clientCertList[listStart:] - case "load": - c.Args(&loadDir) - config.Manual = true - case "max_certs": - c.Args(&maxCerts) - onDemand = true - case "ask": - c.Args(&askURL) - onDemand = true - case "dns": - args := c.RemainingArgs() - if len(args) != 1 { - return c.ArgErr() - } - // TODO: we can get rid of DNS provider plugins with this one line - // of code; however, currently (Dec. 2018) this adds about 20 MB - // of bloat to the Caddy binary, doubling its size to ~40 MB...! - // dnsProv, err := dns.NewDNSChallengeProviderByName(args[0]) - // if err != nil { - // return c.Errf("Configuring DNS provider '%s': %v", args[0], err) - // } - dnsProvName := args[0] - dnsProvConstructor, ok := dnsProviders[dnsProvName] - if !ok { - return c.Errf("Unknown DNS provider by name '%s'", dnsProvName) - } - dnsProv, err := dnsProvConstructor() - if err != nil { - return c.Errf("Setting up DNS provider '%s': %v", dnsProvName, err) - } - config.Manager.DNSProvider = dnsProv - case "alpn": - args := c.RemainingArgs() - if len(args) == 0 { - return c.ArgErr() - } - for _, arg := range args { - config.ALPN = append(config.ALPN, arg) - } - case "must_staple": - config.Manager.MustStaple = true - case "wildcard": - if !certmagic.HostQualifies(config.Hostname) { - return c.Errf("Hostname '%s' does not qualify for managed TLS, so cannot manage wildcard certificate for it", config.Hostname) - } - if strings.Contains(config.Hostname, "*") { - return c.Errf("Cannot convert domain name '%s' to a valid wildcard: already has a wildcard label", config.Hostname) - } - parts := strings.Split(config.Hostname, ".") - if len(parts) < 3 { - return c.Errf("Cannot convert domain name '%s' to a valid wildcard: too few labels", config.Hostname) - } - parts[0] = "*" - config.Hostname = strings.Join(parts, ".") - default: - return c.Errf("Unknown subdirective '%s'", c.Val()) - } - } - - // tls requires at least one argument if a block is not opened - if len(args) == 0 && !hadBlock { - return c.ArgErr() - } - - // configure on-demand TLS, if enabled - if onDemand { - config.Manager.OnDemand = new(certmagic.OnDemandConfig) - if maxCerts != "" { - maxCertsNum, err := strconv.Atoi(maxCerts) - if err != nil || maxCertsNum < 1 { - return c.Err("max_certs must be a positive integer") - } - config.Manager.OnDemand.MaxObtain = int32(maxCertsNum) - } - if askURL != "" { - parsedURL, err := url.Parse(askURL) - if err != nil { - return c.Err("ask must be a valid url") - } - if parsedURL.Scheme != "http" && parsedURL.Scheme != "https" { - return c.Err("ask URL must use http or https") - } - config.Manager.OnDemand.AskURL = parsedURL - } - } - - // don't try to load certificates unless we're supposed to - if !config.Enabled || !config.Manual { - continue - } - - // load a single certificate and key, if specified - if certificateFile != "" && keyFile != "" { - err := config.Manager.CacheUnmanagedCertificatePEMFile(certificateFile, keyFile) - if err != nil { - return c.Errf("Unable to load certificate and key files for '%s': %v", c.Key, err) - } - log.Printf("[INFO] Successfully loaded TLS assets from %s and %s", certificateFile, keyFile) - } - - // load a directory of certificates, if specified - if loadDir != "" { - err := loadCertsInDir(config, c, loadDir) - if err != nil { - return err - } - } - } - - SetDefaultTLSParams(config) - - // generate self-signed cert if needed - if config.SelfSigned { - ssCert, err := newSelfSignedCertificate(selfSignedConfig{ - SAN: []string{config.Hostname}, - KeyType: config.Manager.KeyType, - }) - if err != nil { - return fmt.Errorf("self-signed certificate generation: %v", err) - } - err = config.Manager.CacheUnmanagedTLSCertificate(ssCert) - if err != nil { - return fmt.Errorf("self-signed: %v", err) - } - telemetry.Increment("tls_self_signed_count") - } - - // store this as a custom config - cfgMap, ok := c.Get(configMapKey).(map[string]*Config) - if !ok || cfgMap == nil { - cfgMap = make(map[string]*Config) - } - cfgMap[config.Hostname] = config - c.Set(configMapKey, cfgMap) - - return nil -} - -// loadCertsInDir loads all the certificates/keys in dir, as long as -// the file ends with .pem. This method of loading certificates is -// modeled after haproxy, which expects the certificate and key to -// be bundled into the same file: -// https://cbonte.github.io/haproxy-dconv/configuration-1.5.html#5.1-crt -// -// This function may write to the log as it walks the directory tree. -func loadCertsInDir(cfg *Config, c *caddy.Controller, dir string) error { - return filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - log.Printf("[WARNING] Unable to traverse into %s; skipping", path) - return nil - } - if info.IsDir() { - return nil - } - if strings.HasSuffix(strings.ToLower(info.Name()), ".pem") { - certBuilder, keyBuilder := new(bytes.Buffer), new(bytes.Buffer) - var foundKey bool // use only the first key in the file - - bundle, err := ioutil.ReadFile(path) - if err != nil { - return err - } - - for { - // Decode next block so we can see what type it is - var derBlock *pem.Block - derBlock, bundle = pem.Decode(bundle) - if derBlock == nil { - break - } - - if derBlock.Type == "CERTIFICATE" { - // Re-encode certificate as PEM, appending to certificate chain - if err := pem.Encode(certBuilder, derBlock); err != nil { - log.Println("[ERROR] failed to write PEM encoding: ", err) - } - } else if derBlock.Type == "EC PARAMETERS" { - // EC keys generated from openssl can be composed of two blocks: - // parameters and key (parameter block should come first) - if !foundKey { - // Encode parameters - if err := pem.Encode(keyBuilder, derBlock); err != nil { - log.Println("[ERROR] failed to write PEM encoding: ", err) - } - - // Key must immediately follow - derBlock, bundle = pem.Decode(bundle) - if derBlock == nil || derBlock.Type != "EC PRIVATE KEY" { - return c.Errf("%s: expected elliptic private key to immediately follow EC parameters", path) - } - if err := pem.Encode(keyBuilder, derBlock); err != nil { - log.Println("[ERROR] failed to write PEM encoding: ", err) - } - foundKey = true - } - } else if derBlock.Type == "PRIVATE KEY" || strings.HasSuffix(derBlock.Type, " PRIVATE KEY") { - // RSA key - if !foundKey { - if err := pem.Encode(keyBuilder, derBlock); err != nil { - log.Println("[ERROR] failed to write PEM encoding: ", err) - } - foundKey = true - } - } else { - return c.Errf("%s: unrecognized PEM block type: %s", path, derBlock.Type) - } - } - - certPEMBytes, keyPEMBytes := certBuilder.Bytes(), keyBuilder.Bytes() - if len(certPEMBytes) == 0 { - return c.Errf("%s: failed to parse PEM data", path) - } - if len(keyPEMBytes) == 0 { - return c.Errf("%s: no private key block found", path) - } - - err = cfg.Manager.CacheUnmanagedCertificatePEMBytes(certPEMBytes, keyPEMBytes) - if err != nil { - return c.Errf("%s: failed to load cert and key for '%s': %v", path, c.Key, err) - } - log.Printf("[INFO] Successfully loaded TLS assets from %s", path) - } - return nil - }) -} - -func constructDefaultClusterPlugin() (certmagic.Storage, error) { - return &certmagic.FileStorage{Path: caddy.AssetsPath()}, nil -} - -const configMapKey = "tls_custom_configs" diff --git a/vendor/github.com/mholt/caddy/caddytls/tls.go b/vendor/github.com/mholt/caddy/caddytls/tls.go deleted file mode 100644 index e419f6f4e..000000000 --- a/vendor/github.com/mholt/caddy/caddytls/tls.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package caddytls facilitates the management of TLS assets and integrates -// Let's Encrypt functionality into Caddy with first-class support for -// creating and renewing certificates automatically. It also implements -// the tls directive. It's mostly powered by the CertMagic package. -// -// This package is meant to be used by Caddy server types. To use the -// tls directive, a server type must import this package and call -// RegisterConfigGetter(). The server type must make and keep track of -// the caddytls.Config structs that this package produces. It must also -// add tls to its list of directives. When it comes time to make the -// server instances, the server type can call MakeTLSConfig() to convert -// a []caddytls.Config to a single tls.Config for use in tls.NewListener(). -// It is also recommended to call RotateSessionTicketKeys() when -// starting a new listener. -package caddytls - -import ( - "github.com/go-acme/lego/challenge" - "github.com/mholt/caddy" - "github.com/mholt/certmagic" -) - -// ConfigHolder is any type that has a Config; it presumably is -// connected to a hostname and port on which it is serving. -type ConfigHolder interface { - TLSConfig() *Config - Host() string - Port() string -} - -// QualifiesForManagedTLS returns true if c qualifies for -// for managed TLS (but not on-demand TLS specifically). -// It does NOT check to see if a cert and key already exist -// for the config. If the return value is true, you should -// be OK to set c.TLSConfig().Managed to true; then you should -// check that value in the future instead, because the process -// of setting up the config may make it look like it doesn't -// qualify even though it originally did. -func QualifiesForManagedTLS(c ConfigHolder) bool { - if c == nil { - return false - } - tlsConfig := c.TLSConfig() - if tlsConfig == nil || tlsConfig.Manager == nil { - return false - } - onDemand := tlsConfig.Manager.OnDemand != nil - - return (!tlsConfig.Manual || onDemand) && // user might provide own cert and key - - // if self-signed, we've already generated one to use - !tlsConfig.SelfSigned && - - // user can force-disable managed TLS - c.Port() != "80" && - tlsConfig.ACMEEmail != "off" && - - // we get can't certs for some kinds of hostnames, but - // on-demand TLS allows empty hostnames at startup - (certmagic.HostQualifies(c.Host()) || onDemand) -} - -// Revoke revokes the certificate fro host via the ACME protocol. -// It assumes the certificate was obtained from certmagic.CA. -func Revoke(domainName string) error { - return certmagic.NewDefault().RevokeCert(domainName, true) -} - -// KnownACMECAs is a list of ACME directory endpoints of -// known, public, and trusted ACME-compatible certificate -// authorities. -var KnownACMECAs = []string{ - "https://acme-v02.api.letsencrypt.org/directory", -} - -// ChallengeProvider defines an own type that should be used in Caddy plugins -// over challenge.Provider. Using challenge.Provider causes version mismatches -// with vendored dependencies (see https://github.com/mattfarina/golang-broken-vendor) -// -// challenge.Provider is an interface that allows the implementation of custom -// challenge providers. For more details, see: -// https://godoc.org/github.com/go-acme/lego/acme#ChallengeProvider -type ChallengeProvider challenge.Provider - -// DNSProviderConstructor is a function that takes credentials and -// returns a type that can solve the ACME DNS challenges. -type DNSProviderConstructor func(credentials ...string) (ChallengeProvider, error) - -// dnsProviders is the list of DNS providers that have been plugged in. -var dnsProviders = make(map[string]DNSProviderConstructor) - -// RegisterDNSProvider registers provider by name for solving the ACME DNS challenge. -func RegisterDNSProvider(name string, provider DNSProviderConstructor) { - dnsProviders[name] = provider - caddy.RegisterPlugin("tls.dns."+name, caddy.Plugin{}) -} - -// ClusterPluginConstructor is a function type that is used to -// instantiate a new implementation of both certmagic.Storage -// and certmagic.Locker, which are required for successful -// use in cluster environments. -type ClusterPluginConstructor func() (certmagic.Storage, error) - -// clusterProviders is the list of storage providers -var clusterProviders = make(map[string]ClusterPluginConstructor) - -// RegisterClusterPlugin registers provider by name for facilitating -// cluster-wide operations like storage and synchronization. -func RegisterClusterPlugin(name string, provider ClusterPluginConstructor) { - clusterProviders[name] = provider - caddy.RegisterPlugin("tls.cluster."+name, caddy.Plugin{}) -} diff --git a/vendor/github.com/mholt/caddy/commands.go b/vendor/github.com/mholt/caddy/commands.go deleted file mode 100644 index 74796e1df..000000000 --- a/vendor/github.com/mholt/caddy/commands.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "errors" - "runtime" - "unicode" - - "github.com/flynn/go-shlex" -) - -var runtimeGoos = runtime.GOOS - -// SplitCommandAndArgs takes a command string and parses it shell-style into the -// command and its separate arguments. -func SplitCommandAndArgs(command string) (cmd string, args []string, err error) { - var parts []string - - if runtimeGoos == "windows" { - parts = parseWindowsCommand(command) // parse it Windows-style - } else { - parts, err = parseUnixCommand(command) // parse it Unix-style - if err != nil { - err = errors.New("error parsing command: " + err.Error()) - return - } - } - - if len(parts) == 0 { - err = errors.New("no command contained in '" + command + "'") - return - } - - cmd = parts[0] - if len(parts) > 1 { - args = parts[1:] - } - - return -} - -// parseUnixCommand parses a unix style command line and returns the -// command and its arguments or an error -func parseUnixCommand(cmd string) ([]string, error) { - return shlex.Split(cmd) -} - -// parseWindowsCommand parses windows command lines and -// returns the command and the arguments as an array. It -// should be able to parse commonly used command lines. -// Only basic syntax is supported: -// - spaces in double quotes are not token delimiters -// - double quotes are escaped by either backspace or another double quote -// - except for the above case backspaces are path separators (not special) -// -// Many sources point out that escaping quotes using backslash can be unsafe. -// Use two double quotes when possible. (Source: http://stackoverflow.com/a/31413730/2616179 ) -// -// This function has to be used on Windows instead -// of the shlex package because this function treats backslash -// characters properly. -func parseWindowsCommand(cmd string) []string { - const backslash = '\\' - const quote = '"' - - var parts []string - var part string - var inQuotes bool - var lastRune rune - - for i, ch := range cmd { - - if i != 0 { - lastRune = rune(cmd[i-1]) - } - - if ch == backslash { - // put it in the part - for now we don't know if it's an - // escaping char or path separator - part += string(ch) - continue - } - - if ch == quote { - if lastRune == backslash { - // remove the backslash from the part and add the escaped quote instead - part = part[:len(part)-1] - part += string(ch) - continue - } - - if lastRune == quote { - // revert the last change of the inQuotes state - // it was an escaping quote - inQuotes = !inQuotes - part += string(ch) - continue - } - - // normal escaping quotes - inQuotes = !inQuotes - continue - - } - - if unicode.IsSpace(ch) && !inQuotes && len(part) > 0 { - parts = append(parts, part) - part = "" - continue - } - - part += string(ch) - } - - if len(part) > 0 { - parts = append(parts, part) - } - - return parts -} diff --git a/vendor/github.com/mholt/caddy/controller.go b/vendor/github.com/mholt/caddy/controller.go deleted file mode 100644 index af40a23a4..000000000 --- a/vendor/github.com/mholt/caddy/controller.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "strings" - - "github.com/mholt/caddy/caddyfile" -) - -// Controller is given to the setup function of directives which -// gives them access to be able to read tokens with which to -// configure themselves. It also stores state for the setup -// functions, can get the current context, and can be used to -// identify a particular server block using the Key field. -type Controller struct { - caddyfile.Dispenser - - // The instance in which the setup is occurring - instance *Instance - - // Key is the key from the top of the server block, usually - // an address, hostname, or identifier of some sort. - Key string - - // OncePerServerBlock is a function that executes f - // exactly once per server block, no matter how many - // hosts are associated with it. If it is the first - // time, the function f is executed immediately - // (not deferred) and may return an error which is - // returned by OncePerServerBlock. - OncePerServerBlock func(f func() error) error - - // ServerBlockIndex is the 0-based index of the - // server block as it appeared in the input. - ServerBlockIndex int - - // ServerBlockKeyIndex is the 0-based index of this - // key as it appeared in the input at the head of the - // server block. - ServerBlockKeyIndex int - - // ServerBlockKeys is a list of keys that are - // associated with this server block. All these - // keys, consequently, share the same tokens. - ServerBlockKeys []string - - // ServerBlockStorage is used by a directive's - // setup function to persist state between all - // the keys on a server block. - ServerBlockStorage interface{} -} - -// ServerType gets the name of the server type that is being set up. -func (c *Controller) ServerType() string { - return c.instance.serverType -} - -// OnFirstStartup adds fn to the list of callback functions to execute -// when the server is about to be started NOT as part of a restart. -func (c *Controller) OnFirstStartup(fn func() error) { - c.instance.OnFirstStartup = append(c.instance.OnFirstStartup, fn) -} - -// OnStartup adds fn to the list of callback functions to execute -// when the server is about to be started (including restarts). -func (c *Controller) OnStartup(fn func() error) { - c.instance.OnStartup = append(c.instance.OnStartup, fn) -} - -// OnRestart adds fn to the list of callback functions to execute -// when the server is about to be restarted. -func (c *Controller) OnRestart(fn func() error) { - c.instance.OnRestart = append(c.instance.OnRestart, fn) -} - -// OnRestartFailed adds fn to the list of callback functions to execute -// if the server failed to restart. -func (c *Controller) OnRestartFailed(fn func() error) { - c.instance.OnRestartFailed = append(c.instance.OnRestartFailed, fn) -} - -// OnShutdown adds fn to the list of callback functions to execute -// when the server is about to be shut down (including restarts). -func (c *Controller) OnShutdown(fn func() error) { - c.instance.OnShutdown = append(c.instance.OnShutdown, fn) -} - -// OnFinalShutdown adds fn to the list of callback functions to execute -// when the server is about to be shut down NOT as part of a restart. -func (c *Controller) OnFinalShutdown(fn func() error) { - c.instance.OnFinalShutdown = append(c.instance.OnFinalShutdown, fn) -} - -// Context gets the context associated with the instance associated with c. -func (c *Controller) Context() Context { - return c.instance.context -} - -// Get safely gets a value from the Instance's storage. -func (c *Controller) Get(key interface{}) interface{} { - c.instance.StorageMu.RLock() - defer c.instance.StorageMu.RUnlock() - return c.instance.Storage[key] -} - -// Set safely sets a value on the Instance's storage. -func (c *Controller) Set(key, val interface{}) { - c.instance.StorageMu.Lock() - c.instance.Storage[key] = val - c.instance.StorageMu.Unlock() -} - -// NewTestController creates a new Controller for -// the server type and input specified. The filename -// is "Testfile". If the server type is not empty and -// is plugged in, a context will be created so that -// the results of setup functions can be checked for -// correctness. -// -// Used only for testing, but exported so plugins can -// use this for convenience. -func NewTestController(serverType, input string) *Controller { - testInst := &Instance{serverType: serverType, Storage: make(map[interface{}]interface{})} - if stype, err := getServerType(serverType); err == nil { - testInst.context = stype.NewContext(testInst) - } - return &Controller{ - instance: testInst, - Dispenser: caddyfile.NewDispenser("Testfile", strings.NewReader(input)), - OncePerServerBlock: func(f func() error) error { return f() }, - } -} diff --git a/vendor/github.com/mholt/caddy/go.mod b/vendor/github.com/mholt/caddy/go.mod deleted file mode 100644 index 96d209aae..000000000 --- a/vendor/github.com/mholt/caddy/go.mod +++ /dev/null @@ -1,25 +0,0 @@ -module github.com/mholt/caddy - -go 1.12 - -require ( - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/dustin/go-humanize v1.0.0 - github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 - github.com/go-acme/lego v2.5.0+incompatible - github.com/google/uuid v1.1.1 - github.com/gorilla/websocket v1.4.0 - github.com/hashicorp/go-syslog v1.0.0 - github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a - github.com/klauspost/cpuid v1.2.0 - github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 // indirect - github.com/lucas-clemente/quic-go v0.10.2 - github.com/mholt/certmagic v0.5.0 - github.com/naoina/go-stringutil v0.1.0 // indirect - github.com/naoina/toml v0.1.1 - github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4 - golang.org/x/net v0.0.0-20190328230028-74de082e2cca - gopkg.in/mcuadros/go-syslog.v2 v2.2.1 - gopkg.in/natefinch/lumberjack.v2 v2.0.0 - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/vendor/github.com/mholt/caddy/go.sum b/vendor/github.com/mholt/caddy/go.sum deleted file mode 100644 index 8ba8dd93c..000000000 --- a/vendor/github.com/mholt/caddy/go.sum +++ /dev/null @@ -1,100 +0,0 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115 h1:fUjoj2bT6dG8LoEe+uNsKk8J+sLkDbQkJnB6Z1F02Bc= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 h1:a1zrFsLFac2xoM6zG1u72DWJwZG3ayttYLfmLbxVETk= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a h1:BcF8coBl0QFVhe8vAMMlD+CV8EISiu9MGKLoj6ZEyJA= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f h1:sSeNEkJrs+0F9TUau0CgWTTNEwF23HST3Eq0A+QIx+A= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2 h1:iQtTSZVbd44k94Lu0U16lLBIG3lrnjDvQongjPd4B/s= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced h1:zqEC1GJZFbGZA0tRyNZqRjep92K5fujFtFsu5ZW7Aug= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/mholt/certmagic v0.5.0 h1:lYXxsLUFya/I3BgDCrfuwcMQOB+4auzI8CCzpK41tjc= -github.com/mholt/certmagic v0.5.0/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.3 h1:1g0r1IvskvgL8rR+AcHzUA+oFmGcQlaIm4IqakufeMM= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1 h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4 h1:S9YlS71UNJIyS61OqGAmLXv3w5zclSidN+qwr80XxKs= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca h1:hyA6yiAgbUwuWqtscNvWAI7U1CtlaD1KilQ6iudt1aI= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e h1:ZytStCyV048ZqDsWHiYDdoI2Vd4msMcrDECFxS+tL9c= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1 h1:60g8zx1BijSVSgLTzLCW9UC4/+i1Ih9jJ1DR5Tgp9vE= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/mholt/caddy/onevent/hook/config.go b/vendor/github.com/mholt/caddy/onevent/hook/config.go deleted file mode 100644 index 5055e7eb4..000000000 --- a/vendor/github.com/mholt/caddy/onevent/hook/config.go +++ /dev/null @@ -1,20 +0,0 @@ -package hook - -import ( - "github.com/mholt/caddy" -) - -// Config describes how Hook should be configured and used. -type Config struct { - ID string - Event caddy.EventName - Command string - Args []string -} - -// SupportedEvents is a map of supported events. -var SupportedEvents = map[string]caddy.EventName{ - "startup": caddy.InstanceStartupEvent, - "shutdown": caddy.ShutdownEvent, - "certrenew": caddy.CertRenewEvent, -} diff --git a/vendor/github.com/mholt/caddy/onevent/hook/hook.go b/vendor/github.com/mholt/caddy/onevent/hook/hook.go deleted file mode 100644 index 9d2c4d0b5..000000000 --- a/vendor/github.com/mholt/caddy/onevent/hook/hook.go +++ /dev/null @@ -1,41 +0,0 @@ -package hook - -import ( - "log" - "os" - "os/exec" - "strings" - - "github.com/mholt/caddy" -) - -// Hook executes a command. -func (cfg *Config) Hook(event caddy.EventName, info interface{}) error { - if event != cfg.Event { - return nil - } - - nonblock := false - if len(cfg.Args) >= 1 && cfg.Args[len(cfg.Args)-1] == "&" { - // Run command in background; non-blocking - nonblock = true - cfg.Args = cfg.Args[:len(cfg.Args)-1] - } - - // Execute command. - cmd := exec.Command(cfg.Command, cfg.Args...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if nonblock { - log.Printf("[INFO] Nonblocking Command \"%s %s\" with ID %s", cfg.Command, strings.Join(cfg.Args, " "), cfg.ID) - return cmd.Start() - } - log.Printf("[INFO] Blocking Command \"%s %s\" with ID %s", cfg.Command, strings.Join(cfg.Args, " "), cfg.ID) - err := cmd.Run() - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/mholt/caddy/onevent/on.go b/vendor/github.com/mholt/caddy/onevent/on.go deleted file mode 100644 index 473cba55d..000000000 --- a/vendor/github.com/mholt/caddy/onevent/on.go +++ /dev/null @@ -1,71 +0,0 @@ -package onevent - -import ( - "strings" - - "github.com/google/uuid" - "github.com/mholt/caddy" - "github.com/mholt/caddy/onevent/hook" -) - -func init() { - // Register Directive. - caddy.RegisterPlugin("on", caddy.Plugin{Action: setup}) -} - -func setup(c *caddy.Controller) error { - config, err := onParse(c) - if err != nil { - return err - } - - // Register Event Hooks. - err = c.OncePerServerBlock(func() error { - for _, cfg := range config { - caddy.RegisterEventHook("on-"+cfg.ID, cfg.Hook) - } - return nil - }) - if err != nil { - return err - } - - return nil -} - -func onParse(c *caddy.Controller) ([]*hook.Config, error) { - var config []*hook.Config - - for c.Next() { - cfg := new(hook.Config) - - if !c.NextArg() { - return config, c.ArgErr() - } - - // Configure Event. - event, ok := hook.SupportedEvents[strings.ToLower(c.Val())] - if !ok { - return config, c.Errf("Wrong event name or event not supported: '%s'", c.Val()) - } - cfg.Event = event - - // Assign an unique ID. - cfg.ID = uuid.New().String() - - args := c.RemainingArgs() - - // Extract command and arguments. - command, args, err := caddy.SplitCommandAndArgs(strings.Join(args, " ")) - if err != nil { - return config, c.Err(err.Error()) - } - - cfg.Command = command - cfg.Args = args - - config = append(config, cfg) - } - - return config, nil -} diff --git a/vendor/github.com/mholt/caddy/plugins.go b/vendor/github.com/mholt/caddy/plugins.go deleted file mode 100644 index cd4497fa9..000000000 --- a/vendor/github.com/mholt/caddy/plugins.go +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "fmt" - "log" - "net" - "sort" - "sync" - - "github.com/mholt/caddy/caddyfile" -) - -// These are all the registered plugins. -var ( - // serverTypes is a map of registered server types. - serverTypes = make(map[string]ServerType) - - // plugins is a map of server type to map of plugin name to - // Plugin. These are the "general" plugins that may or may - // not be associated with a specific server type. If it's - // applicable to multiple server types or the server type is - // irrelevant, the key is empty string (""). But all plugins - // must have a name. - plugins = make(map[string]map[string]Plugin) - - // eventHooks is a map of hook name to Hook. All hooks plugins - // must have a name. - eventHooks = &sync.Map{} - - // parsingCallbacks maps server type to map of directive - // to list of callback functions. These aren't really - // plugins on their own, but are often registered from - // plugins. - parsingCallbacks = make(map[string]map[string][]ParsingCallback) - - // caddyfileLoaders is the list of all Caddyfile loaders - // in registration order. - caddyfileLoaders []caddyfileLoader -) - -// DescribePlugins returns a string describing the registered plugins. -func DescribePlugins() string { - pl := ListPlugins() - - str := "Server types:\n" - for _, name := range pl["server_types"] { - str += " " + name + "\n" - } - - str += "\nCaddyfile loaders:\n" - for _, name := range pl["caddyfile_loaders"] { - str += " " + name + "\n" - } - - if len(pl["event_hooks"]) > 0 { - str += "\nEvent hook plugins:\n" - for _, name := range pl["event_hooks"] { - str += " hook." + name + "\n" - } - } - - if len(pl["clustering"]) > 0 { - str += "\nClustering plugins:\n" - for _, name := range pl["clustering"] { - str += " " + name + "\n" - } - } - - str += "\nOther plugins:\n" - for _, name := range pl["others"] { - str += " " + name + "\n" - } - - return str -} - -// ListPlugins makes a list of the registered plugins, -// keyed by plugin type. -func ListPlugins() map[string][]string { - p := make(map[string][]string) - - // server type plugins - for name := range serverTypes { - p["server_types"] = append(p["server_types"], name) - } - - // caddyfile loaders in registration order - for _, loader := range caddyfileLoaders { - p["caddyfile_loaders"] = append(p["caddyfile_loaders"], loader.name) - } - if defaultCaddyfileLoader.name != "" { - p["caddyfile_loaders"] = append(p["caddyfile_loaders"], defaultCaddyfileLoader.name) - } - - // List the event hook plugins - eventHooks.Range(func(k, _ interface{}) bool { - p["event_hooks"] = append(p["event_hooks"], k.(string)) - return true - }) - - // alphabetize the rest of the plugins - var others []string - for stype, stypePlugins := range plugins { - for name := range stypePlugins { - var s string - if stype != "" { - s = stype + "." - } - s += name - others = append(others, s) - } - } - - sort.Strings(others) - for _, name := range others { - p["others"] = append(p["others"], name) - } - - return p -} - -// ValidDirectives returns the list of all directives that are -// recognized for the server type serverType. However, not all -// directives may be installed. This makes it possible to give -// more helpful error messages, like "did you mean ..." or -// "maybe you need to plug in ...". -func ValidDirectives(serverType string) []string { - stype, err := getServerType(serverType) - if err != nil { - return nil - } - return stype.Directives() -} - -// ServerListener pairs a server to its listener and/or packetconn. -type ServerListener struct { - server Server - listener net.Listener - packet net.PacketConn -} - -// LocalAddr returns the local network address of the packetconn. It returns -// nil when it is not set. -func (s ServerListener) LocalAddr() net.Addr { - if s.packet == nil { - return nil - } - return s.packet.LocalAddr() -} - -// Addr returns the listener's network address. It returns nil when it is -// not set. -func (s ServerListener) Addr() net.Addr { - if s.listener == nil { - return nil - } - return s.listener.Addr() -} - -// Context is a type which carries a server type through -// the load and setup phase; it maintains the state -// between loading the Caddyfile, then executing its -// directives, then making the servers for Caddy to -// manage. Typically, such state involves configuration -// structs, etc. -type Context interface { - // Called after the Caddyfile is parsed into server - // blocks but before the directives are executed, - // this method gives you an opportunity to inspect - // the server blocks and prepare for the execution - // of directives. Return the server blocks (which - // you may modify, if desired) and an error, if any. - // The first argument is the name or path to the - // configuration file (Caddyfile). - // - // This function can be a no-op and simply return its - // input if there is nothing to do here. - InspectServerBlocks(string, []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) - - // This is what Caddy calls to make server instances. - // By this time, all directives have been executed and, - // presumably, the context has enough state to produce - // server instances for Caddy to start. - MakeServers() ([]Server, error) -} - -// RegisterServerType registers a server type srv by its -// name, typeName. -func RegisterServerType(typeName string, srv ServerType) { - if _, ok := serverTypes[typeName]; ok { - panic("server type already registered") - } - serverTypes[typeName] = srv -} - -// ServerType contains information about a server type. -type ServerType struct { - // Function that returns the list of directives, in - // execution order, that are valid for this server - // type. Directives should be one word if possible - // and lower-cased. - Directives func() []string - - // DefaultInput returns a default config input if none - // is otherwise loaded. This is optional, but highly - // recommended, otherwise a blank Caddyfile will be - // used. - DefaultInput func() Input - - // The function that produces a new server type context. - // This will be called when a new Caddyfile is being - // loaded, parsed, and executed independently of any - // startup phases before this one. It's a way to keep - // each set of server instances separate and to reduce - // the amount of global state you need. - NewContext func(inst *Instance) Context -} - -// Plugin is a type which holds information about a plugin. -type Plugin struct { - // ServerType is the type of server this plugin is for. - // Can be empty if not applicable, or if the plugin - // can associate with any server type. - ServerType string - - // Action is the plugin's setup function, if associated - // with a directive in the Caddyfile. - Action SetupFunc -} - -// RegisterPlugin plugs in plugin. All plugins should register -// themselves, even if they do not perform an action associated -// with a directive. It is important for the process to know -// which plugins are available. -// -// The plugin MUST have a name: lower case and one word. -// If this plugin has an action, it must be the name of -// the directive that invokes it. A name is always required -// and must be unique for the server type. -func RegisterPlugin(name string, plugin Plugin) { - if name == "" { - panic("plugin must have a name") - } - if _, ok := plugins[plugin.ServerType]; !ok { - plugins[plugin.ServerType] = make(map[string]Plugin) - } - if _, dup := plugins[plugin.ServerType][name]; dup { - panic("plugin named " + name + " already registered for server type " + plugin.ServerType) - } - plugins[plugin.ServerType][name] = plugin -} - -// EventName represents the name of an event used with event hooks. -type EventName string - -// Define names for the various events -const ( - StartupEvent EventName = "startup" - ShutdownEvent = "shutdown" - CertRenewEvent = "certrenew" - InstanceStartupEvent = "instancestartup" - InstanceRestartEvent = "instancerestart" -) - -// EventHook is a type which holds information about a startup hook plugin. -type EventHook func(eventType EventName, eventInfo interface{}) error - -// RegisterEventHook plugs in hook. All the hooks should register themselves -// and they must have a name. -func RegisterEventHook(name string, hook EventHook) { - if name == "" { - panic("event hook must have a name") - } - _, dup := eventHooks.LoadOrStore(name, hook) - if dup { - panic("hook named " + name + " already registered") - } -} - -// EmitEvent executes the different hooks passing the EventType as an -// argument. This is a blocking function. Hook developers should -// use 'go' keyword if they don't want to block Caddy. -func EmitEvent(event EventName, info interface{}) { - eventHooks.Range(func(k, v interface{}) bool { - err := v.(EventHook)(event, info) - if err != nil { - log.Printf("error on '%s' hook: %v", k.(string), err) - } - return true - }) -} - -// cloneEventHooks return a clone of the event hooks *sync.Map -func cloneEventHooks() *sync.Map { - c := &sync.Map{} - eventHooks.Range(func(k, v interface{}) bool { - c.Store(k, v) - return true - }) - return c -} - -// purgeEventHooks purges all event hooks from the map -func purgeEventHooks() { - eventHooks.Range(func(k, _ interface{}) bool { - eventHooks.Delete(k) - return true - }) -} - -// restoreEventHooks restores eventHooks with a provided *sync.Map -func restoreEventHooks(m *sync.Map) { - // Purge old event hooks - purgeEventHooks() - - // Restore event hooks - m.Range(func(k, v interface{}) bool { - eventHooks.Store(k, v) - return true - }) -} - -// ParsingCallback is a function that is called after -// a directive's setup functions have been executed -// for all the server blocks. -type ParsingCallback func(Context) error - -// RegisterParsingCallback registers callback to be called after -// executing the directive afterDir for server type serverType. -func RegisterParsingCallback(serverType, afterDir string, callback ParsingCallback) { - if _, ok := parsingCallbacks[serverType]; !ok { - parsingCallbacks[serverType] = make(map[string][]ParsingCallback) - } - parsingCallbacks[serverType][afterDir] = append(parsingCallbacks[serverType][afterDir], callback) -} - -// SetupFunc is used to set up a plugin, or in other words, -// execute a directive. It will be called once per key for -// each server block it appears in. -type SetupFunc func(c *Controller) error - -// DirectiveAction gets the action for directive dir of -// server type serverType. -func DirectiveAction(serverType, dir string) (SetupFunc, error) { - if stypePlugins, ok := plugins[serverType]; ok { - if plugin, ok := stypePlugins[dir]; ok { - return plugin.Action, nil - } - } - if genericPlugins, ok := plugins[""]; ok { - if plugin, ok := genericPlugins[dir]; ok { - return plugin.Action, nil - } - } - return nil, fmt.Errorf("no action found for directive '%s' with server type '%s' (missing a plugin?)", - dir, serverType) -} - -// Loader is a type that can load a Caddyfile. -// It is passed the name of the server type. -// It returns an error only if something went -// wrong, not simply if there is no Caddyfile -// for this loader to load. -// -// A Loader should only load the Caddyfile if -// a certain condition or requirement is met, -// as returning a non-nil Input value along with -// another Loader will result in an error. -// In other words, loading the Caddyfile must -// be deliberate & deterministic, not haphazard. -// -// The exception is the default Caddyfile loader, -// which will be called only if no other Caddyfile -// loaders return a non-nil Input. The default -// loader may always return an Input value. -type Loader interface { - Load(serverType string) (Input, error) -} - -// LoaderFunc is a convenience type similar to http.HandlerFunc -// that allows you to use a plain function as a Load() method. -type LoaderFunc func(serverType string) (Input, error) - -// Load loads a Caddyfile. -func (lf LoaderFunc) Load(serverType string) (Input, error) { - return lf(serverType) -} - -// RegisterCaddyfileLoader registers loader named name. -func RegisterCaddyfileLoader(name string, loader Loader) { - caddyfileLoaders = append(caddyfileLoaders, caddyfileLoader{name: name, loader: loader}) -} - -// SetDefaultCaddyfileLoader registers loader by name -// as the default Caddyfile loader if no others produce -// a Caddyfile. If another Caddyfile loader has already -// been set as the default, this replaces it. -// -// Do not call RegisterCaddyfileLoader on the same -// loader; that would be redundant. -func SetDefaultCaddyfileLoader(name string, loader Loader) { - defaultCaddyfileLoader = caddyfileLoader{name: name, loader: loader} -} - -// loadCaddyfileInput iterates the registered Caddyfile loaders -// and, if needed, calls the default loader, to load a Caddyfile. -// It is an error if any of the loaders return an error or if -// more than one loader returns a Caddyfile. -func loadCaddyfileInput(serverType string) (Input, error) { - var loadedBy string - var caddyfileToUse Input - for _, l := range caddyfileLoaders { - cdyfile, err := l.loader.Load(serverType) - if err != nil { - return nil, fmt.Errorf("loading Caddyfile via %s: %v", l.name, err) - } - if cdyfile != nil { - if caddyfileToUse != nil { - return nil, fmt.Errorf("Caddyfile loaded multiple times; first by %s, then by %s", loadedBy, l.name) - } - loaderUsed = l - caddyfileToUse = cdyfile - loadedBy = l.name - } - } - if caddyfileToUse == nil && defaultCaddyfileLoader.loader != nil { - cdyfile, err := defaultCaddyfileLoader.loader.Load(serverType) - if err != nil { - return nil, err - } - if cdyfile != nil { - loaderUsed = defaultCaddyfileLoader - caddyfileToUse = cdyfile - } - } - return caddyfileToUse, nil -} - -// OnProcessExit is a list of functions to run when the process -// exits -- they are ONLY for cleanup and should not block, -// return errors, or do anything fancy. They will be run with -// every signal, even if "shutdown callbacks" are not executed. -// This variable must only be modified in the main goroutine -// from init() functions. -var OnProcessExit []func() - -// caddyfileLoader pairs the name of a loader to the loader. -type caddyfileLoader struct { - name string - loader Loader -} - -var ( - defaultCaddyfileLoader caddyfileLoader // the default loader if all else fail - loaderUsed caddyfileLoader // the loader that was used (relevant for reloads) -) diff --git a/vendor/github.com/mholt/caddy/rlimit_nonposix.go b/vendor/github.com/mholt/caddy/rlimit_nonposix.go deleted file mode 100644 index 05ca942d3..000000000 --- a/vendor/github.com/mholt/caddy/rlimit_nonposix.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows plan9 nacl js - -package caddy - -// checkFdlimit issues a warning if the OS limit for -// max file descriptors is below a recommended minimum. -func checkFdlimit() { -} diff --git a/vendor/github.com/mholt/caddy/rlimit_posix.go b/vendor/github.com/mholt/caddy/rlimit_posix.go deleted file mode 100644 index 9e85bf661..000000000 --- a/vendor/github.com/mholt/caddy/rlimit_posix.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows,!plan9,!nacl,!js - -package caddy - -import ( - "fmt" - "syscall" -) - -// checkFdlimit issues a warning if the OS limit for -// max file descriptors is below a recommended minimum. -func checkFdlimit() { - const min = 8192 - - // Warn if ulimit is too low for production sites - rlimit := &syscall.Rlimit{} - err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, rlimit) - if err == nil && rlimit.Cur < min { - fmt.Printf("WARNING: File descriptor limit %d is too low for production servers. "+ - "At least %d is recommended. Fix with `ulimit -n %d`.\n", rlimit.Cur, min, min) - } - -} diff --git a/vendor/github.com/mholt/caddy/sigtrap.go b/vendor/github.com/mholt/caddy/sigtrap.go deleted file mode 100644 index dbb01b7ec..000000000 --- a/vendor/github.com/mholt/caddy/sigtrap.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "log" - "os" - "os/signal" - "sync" - - "github.com/mholt/caddy/telemetry" -) - -// TrapSignals create signal handlers for all applicable signals for this -// system. If your Go program uses signals, this is a rather invasive -// function; best to implement them yourself in that case. Signals are not -// required for the caddy package to function properly, but this is a -// convenient way to allow the user to control this part of your program. -func TrapSignals() { - trapSignalsCrossPlatform() - trapSignalsPosix() -} - -// trapSignalsCrossPlatform captures SIGINT, which triggers forceful -// shutdown that executes shutdown callbacks first. A second interrupt -// signal will exit the process immediately. -func trapSignalsCrossPlatform() { - go func() { - shutdown := make(chan os.Signal, 1) - signal.Notify(shutdown, os.Interrupt) - - for i := 0; true; i++ { - <-shutdown - - if i > 0 { - log.Println("[INFO] SIGINT: Force quit") - for _, f := range OnProcessExit { - f() // important cleanup actions only - } - os.Exit(2) - } - - log.Println("[INFO] SIGINT: Shutting down") - - telemetry.AppendUnique("sigtrap", "SIGINT") - go telemetry.StopEmitting() // not guaranteed to finish in time; that's OK (just don't block!) - - // important cleanup actions before shutdown callbacks - for _, f := range OnProcessExit { - f() - } - - go func() { - os.Exit(executeShutdownCallbacks("SIGINT")) - }() - } - }() -} - -// executeShutdownCallbacks executes the shutdown callbacks as initiated -// by signame. It logs any errors and returns the recommended exit status. -// This function is idempotent; subsequent invocations always return 0. -func executeShutdownCallbacks(signame string) (exitCode int) { - shutdownCallbacksOnce.Do(func() { - // execute third-party shutdown hooks - EmitEvent(ShutdownEvent, signame) - - errs := allShutdownCallbacks() - if len(errs) > 0 { - for _, err := range errs { - log.Printf("[ERROR] %s shutdown: %v", signame, err) - } - exitCode = 4 - } - }) - return -} - -// allShutdownCallbacks executes all the shutdown callbacks -// for all the instances, and returns all the errors generated -// during their execution. An error executing one shutdown -// callback does not stop execution of others. Only one shutdown -// callback is executed at a time. -func allShutdownCallbacks() []error { - var errs []error - instancesMu.Lock() - for _, inst := range instances { - errs = append(errs, inst.ShutdownCallbacks()...) - } - instancesMu.Unlock() - return errs -} - -// shutdownCallbacksOnce ensures that shutdown callbacks -// for all instances are only executed once. -var shutdownCallbacksOnce sync.Once diff --git a/vendor/github.com/mholt/caddy/sigtrap_nonposix.go b/vendor/github.com/mholt/caddy/sigtrap_nonposix.go deleted file mode 100644 index 517e3a2e4..000000000 --- a/vendor/github.com/mholt/caddy/sigtrap_nonposix.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows plan9 nacl js - -package caddy - -func trapSignalsPosix() {} diff --git a/vendor/github.com/mholt/caddy/sigtrap_posix.go b/vendor/github.com/mholt/caddy/sigtrap_posix.go deleted file mode 100644 index fb9703611..000000000 --- a/vendor/github.com/mholt/caddy/sigtrap_posix.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows,!plan9,!nacl,!js - -package caddy - -import ( - "log" - "os" - "os/signal" - "syscall" - - "github.com/mholt/caddy/telemetry" -) - -// trapSignalsPosix captures POSIX-only signals. -func trapSignalsPosix() { - go func() { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGUSR1, syscall.SIGUSR2) - - for sig := range sigchan { - switch sig { - case syscall.SIGQUIT: - log.Println("[INFO] SIGQUIT: Quitting process immediately") - for _, f := range OnProcessExit { - f() // only perform important cleanup actions - } - os.Exit(0) - - case syscall.SIGTERM: - log.Println("[INFO] SIGTERM: Shutting down servers then terminating") - exitCode := executeShutdownCallbacks("SIGTERM") - for _, f := range OnProcessExit { - f() // only perform important cleanup actions - } - err := Stop() - if err != nil { - log.Printf("[ERROR] SIGTERM stop: %v", err) - exitCode = 3 - } - - telemetry.AppendUnique("sigtrap", "SIGTERM") - go telemetry.StopEmitting() // won't finish in time, but that's OK - just don't block - - os.Exit(exitCode) - - case syscall.SIGUSR1: - log.Println("[INFO] SIGUSR1: Reloading") - go telemetry.AppendUnique("sigtrap", "SIGUSR1") - - // Start with the existing Caddyfile - caddyfileToUse, inst, err := getCurrentCaddyfile() - if err != nil { - log.Printf("[ERROR] SIGUSR1: %v", err) - continue - } - if loaderUsed.loader == nil { - // This also should never happen - log.Println("[ERROR] SIGUSR1: no Caddyfile loader with which to reload Caddyfile") - continue - } - - // Load the updated Caddyfile - newCaddyfile, err := loaderUsed.loader.Load(inst.serverType) - if err != nil { - log.Printf("[ERROR] SIGUSR1: loading updated Caddyfile: %v", err) - continue - } - if newCaddyfile != nil { - caddyfileToUse = newCaddyfile - } - - // Backup old event hooks - oldEventHooks := cloneEventHooks() - - // Purge the old event hooks - purgeEventHooks() - - // Kick off the restart; our work is done - EmitEvent(InstanceRestartEvent, nil) - _, err = inst.Restart(caddyfileToUse) - if err != nil { - restoreEventHooks(oldEventHooks) - - log.Printf("[ERROR] SIGUSR1: %v", err) - } - - case syscall.SIGUSR2: - log.Println("[INFO] SIGUSR2: Upgrading") - go telemetry.AppendUnique("sigtrap", "SIGUSR2") - if err := Upgrade(); err != nil { - log.Printf("[ERROR] SIGUSR2: upgrading: %v", err) - } - - case syscall.SIGHUP: - // ignore; this signal is sometimes sent outside of the user's control - go telemetry.AppendUnique("sigtrap", "SIGHUP") - } - } - }() -} diff --git a/vendor/github.com/mholt/caddy/telemetry/collection.go b/vendor/github.com/mholt/caddy/telemetry/collection.go deleted file mode 100644 index 8b05d17d1..000000000 --- a/vendor/github.com/mholt/caddy/telemetry/collection.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package telemetry - -import ( - "fmt" - "hash/fnv" - "log" - "strings" - - "github.com/google/uuid" -) - -// Init initializes this package so that it may -// be used. Do not call this function more than -// once. Init panics if it is called more than -// once or if the UUID value is empty. Once this -// function is called, the rest of the package -// may safely be used. If this function is not -// called, the collector functions may still be -// invoked, but they will be no-ops. -// -// Any metrics keys that are passed in the second -// argument will be permanently disabled for the -// lifetime of the process. -func Init(instanceID uuid.UUID, disabledMetricsKeys []string) { - if enabled { - panic("already initialized") - } - if str := instanceID.String(); str == "" || - str == "00000000-0000-0000-0000-000000000000" { - panic("empty UUID") - } - instanceUUID = instanceID - disabledMetricsMu.Lock() - for _, key := range disabledMetricsKeys { - disabledMetrics[strings.TrimSpace(key)] = false - } - disabledMetricsMu.Unlock() - enabled = true -} - -// StartEmitting sends the current payload and begins the -// transmission cycle for updates. This is the first -// update sent, and future ones will be sent until -// StopEmitting is called. -// -// This function is non-blocking (it spawns a new goroutine). -// -// This function panics if it was called more than once. -// It is a no-op if this package was not initialized. -func StartEmitting() { - if !enabled { - return - } - updateTimerMu.Lock() - if updateTimer != nil { - updateTimerMu.Unlock() - panic("updates already started") - } - updateTimerMu.Unlock() - updateMu.Lock() - if updating { - updateMu.Unlock() - panic("update already in progress") - } - updateMu.Unlock() - go logEmit(false) -} - -// StopEmitting sends the current payload and terminates -// the update cycle. No more updates will be sent. -// -// It is a no-op if the package was never initialized -// or if emitting was never started. -// -// NOTE: This function is blocking. Run in a goroutine if -// you want to guarantee no blocking at critical times -// like exiting the program. -func StopEmitting() { - if !enabled { - return - } - updateTimerMu.Lock() - if updateTimer == nil { - updateTimerMu.Unlock() - return - } - updateTimerMu.Unlock() - logEmit(true) // likely too early; may take minutes to return -} - -// Reset empties the current payload buffer. -func Reset() { - resetBuffer() -} - -// Set puts a value in the buffer to be included -// in the next emission. It overwrites any -// previous value. -// -// This function is safe for multiple goroutines, -// and it is recommended to call this using the -// go keyword after the call to SendHello so it -// doesn't block crucial code. -func Set(key string, val interface{}) { - if !enabled || isDisabled(key) { - return - } - bufferMu.Lock() - if _, ok := buffer[key]; !ok { - if bufferItemCount >= maxBufferItems { - bufferMu.Unlock() - return - } - bufferItemCount++ - } - buffer[key] = val - bufferMu.Unlock() -} - -// SetNested puts a value in the buffer to be included -// in the next emission, nested under the top-level key -// as subkey. It overwrites any previous value. -// -// This function is safe for multiple goroutines, -// and it is recommended to call this using the -// go keyword after the call to SendHello so it -// doesn't block crucial code. -func SetNested(key, subkey string, val interface{}) { - if !enabled || isDisabled(key) { - return - } - bufferMu.Lock() - if topLevel, ok1 := buffer[key]; ok1 { - topLevelMap, ok2 := topLevel.(map[string]interface{}) - if !ok2 { - bufferMu.Unlock() - log.Printf("[PANIC] Telemetry: key %s is already used for non-nested-map value", key) - return - } - if _, ok3 := topLevelMap[subkey]; !ok3 { - // don't exceed max buffer size - if bufferItemCount >= maxBufferItems { - bufferMu.Unlock() - return - } - bufferItemCount++ - } - topLevelMap[subkey] = val - } else { - // don't exceed max buffer size - if bufferItemCount >= maxBufferItems { - bufferMu.Unlock() - return - } - bufferItemCount++ - buffer[key] = map[string]interface{}{subkey: val} - } - bufferMu.Unlock() -} - -// Append appends value to a list named key. -// If key is new, a new list will be created. -// If key maps to a type that is not a list, -// a panic is logged, and this is a no-op. -func Append(key string, value interface{}) { - if !enabled || isDisabled(key) { - return - } - bufferMu.Lock() - if bufferItemCount >= maxBufferItems { - bufferMu.Unlock() - return - } - // TODO: Test this... - bufVal, inBuffer := buffer[key] - sliceVal, sliceOk := bufVal.([]interface{}) - if inBuffer && !sliceOk { - bufferMu.Unlock() - log.Printf("[PANIC] Telemetry: key %s already used for non-slice value", key) - return - } - if sliceVal == nil { - buffer[key] = []interface{}{value} - } else if sliceOk { - buffer[key] = append(sliceVal, value) - } - bufferItemCount++ - bufferMu.Unlock() -} - -// AppendUnique adds value to a set named key. -// Set items are unordered. Values in the set -// are unique, but how many times they are -// appended is counted. The value must be -// hashable. -// -// If key is new, a new set will be created for -// values with that key. If key maps to a type -// that is not a counting set, a panic is logged, -// and this is a no-op. -func AppendUnique(key string, value interface{}) { - if !enabled || isDisabled(key) { - return - } - bufferMu.Lock() - bufVal, inBuffer := buffer[key] - setVal, setOk := bufVal.(countingSet) - if inBuffer && !setOk { - bufferMu.Unlock() - log.Printf("[PANIC] Telemetry: key %s already used for non-counting-set value", key) - return - } - if setVal == nil { - // ensure the buffer is not too full, then add new unique value - if bufferItemCount >= maxBufferItems { - bufferMu.Unlock() - return - } - buffer[key] = countingSet{value: 1} - bufferItemCount++ - } else if setOk { - // unique value already exists, so just increment counter - setVal[value]++ - } - bufferMu.Unlock() -} - -// Add adds amount to a value named key. -// If it does not exist, it is created with -// a value of 1. If key maps to a type that -// is not an integer, a panic is logged, -// and this is a no-op. -func Add(key string, amount int) { - atomicAdd(key, amount) -} - -// Increment is a shortcut for Add(key, 1) -func Increment(key string) { - atomicAdd(key, 1) -} - -// atomicAdd adds amount (negative to subtract) -// to key. -func atomicAdd(key string, amount int) { - if !enabled || isDisabled(key) { - return - } - bufferMu.Lock() - bufVal, inBuffer := buffer[key] - intVal, intOk := bufVal.(int) - if inBuffer && !intOk { - bufferMu.Unlock() - log.Printf("[PANIC] Telemetry: key %s already used for non-integer value", key) - return - } - if !inBuffer { - if bufferItemCount >= maxBufferItems { - bufferMu.Unlock() - return - } - bufferItemCount++ - } - buffer[key] = intVal + amount - bufferMu.Unlock() -} - -// FastHash hashes input using a 32-bit hashing algorithm -// that is fast, and returns the hash as a hex-encoded string. -// Do not use this for cryptographic purposes. -func FastHash(input []byte) string { - h := fnv.New32a() - if _, err := h.Write(input); err != nil { - log.Println("[ERROR] failed to write bytes: ", err) - } - - return fmt.Sprintf("%x", h.Sum32()) -} - -// isDisabled returns whether key is -// a disabled metric key. ALL collection -// functions should call this and not -// save the value if this returns true. -func isDisabled(key string) bool { - // for keys that are augmented with data, such as - // "tls_client_hello_ua:", just - // check the prefix "tls_client_hello_ua" - checkKey := key - if idx := strings.Index(key, ":"); idx > -1 { - checkKey = key[:idx] - } - - disabledMetricsMu.RLock() - _, ok := disabledMetrics[checkKey] - disabledMetricsMu.RUnlock() - return ok -} diff --git a/vendor/github.com/mholt/caddy/telemetry/telemetry.go b/vendor/github.com/mholt/caddy/telemetry/telemetry.go deleted file mode 100644 index 2aab30b88..000000000 --- a/vendor/github.com/mholt/caddy/telemetry/telemetry.go +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package telemetry implements the client for server-side telemetry -// of the network. Functions in this package are synchronous and blocking -// unless otherwise specified. For convenience, most functions here do -// not return errors, but errors are logged to the standard logger. -// -// To use this package, first call Init(). You can then call any of the -// collection/aggregation functions. Call StartEmitting() when you are -// ready to begin sending telemetry updates. -// -// When collecting metrics (functions like Set, AppendUnique, or Increment), -// it may be desirable and even recommended to invoke them in a new -// goroutine in case there is lock contention; they are thread-safe (unless -// noted), and you may not want them to block the main thread of execution. -// However, sometimes blocking may be necessary too; for example, adding -// startup metrics to the buffer before the call to StartEmitting(). -// -// This package is designed to be as fast and space-efficient as reasonably -// possible, so that it does not disrupt the flow of execution. -package telemetry - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "math/rand" - "net/http" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/google/uuid" -) - -// logEmit calls emit and then logs the error, if any. -// See docs for emit. -func logEmit(final bool) { - err := emit(final) - if err != nil { - log.Printf("[ERROR] Sending telemetry: %v", err) - } -} - -// emit sends an update to the telemetry server. -// Set final to true if this is the last call to emit. -// If final is true, no future updates will be scheduled. -// Otherwise, the next update will be scheduled. -func emit(final bool) error { - if !enabled { - return fmt.Errorf("telemetry not enabled") - } - - // some metrics are updated/set at time of emission - setEmitTimeMetrics() - - // ensure only one update happens at a time; - // skip update if previous one still in progress - updateMu.Lock() - if updating { - updateMu.Unlock() - log.Println("[NOTICE] Skipping this telemetry update because previous one is still working") - return nil - } - updating = true - updateMu.Unlock() - defer func() { - updateMu.Lock() - updating = false - updateMu.Unlock() - }() - - // terminate any pending update if this is the last one - if final { - stopUpdateTimer() - } - - payloadBytes, err := makePayloadAndResetBuffer() - if err != nil { - return err - } - - // this will hold the server's reply - var reply Response - - // transmit the payload - use a loop to retry in case of failure - for i := 0; i < 4; i++ { - if i > 0 && err != nil { - // don't hammer the server; first failure might have been - // a fluke, but back off more after that - log.Printf("[WARNING] Sending telemetry (attempt %d): %v - backing off and retrying", i, err) - time.Sleep(time.Duration((i+1)*(i+1)*(i+1)) * time.Second) - } - - // send it - var resp *http.Response - resp, err = httpClient.Post(endpoint+instanceUUID.String(), "application/json", bytes.NewReader(payloadBytes)) - if err != nil { - continue - } - - // check for any special-case response codes - if resp.StatusCode == http.StatusGone { - // the endpoint has been deprecated and is no longer servicing clients - err = fmt.Errorf("telemetry server replied with HTTP %d; upgrade required", resp.StatusCode) - if clen := resp.Header.Get("Content-Length"); clen != "0" && clen != "" { - bodyBytes, readErr := ioutil.ReadAll(resp.Body) - if readErr != nil { - log.Printf("[ERROR] Reading response body from server: %v", readErr) - } - err = fmt.Errorf("%v - %s", err, bodyBytes) - } - resp.Body.Close() - reply.Stop = true - break - } - if resp.StatusCode == http.StatusUnavailableForLegalReasons { - // the endpoint is unavailable, at least to this client, for legal reasons (!) - err = fmt.Errorf("telemetry server replied with HTTP %d %s: please consult the project website and developers for guidance", resp.StatusCode, resp.Status) - if clen := resp.Header.Get("Content-Length"); clen != "0" && clen != "" { - bodyBytes, readErr := ioutil.ReadAll(resp.Body) - if readErr != nil { - log.Printf("[ERROR] Reading response body from server: %v", readErr) - } - err = fmt.Errorf("%v - %s", err, bodyBytes) - } - resp.Body.Close() - reply.Stop = true - break - } - - // okay, ensure we can interpret the response - if ct := resp.Header.Get("Content-Type"); (resp.StatusCode < 300 || resp.StatusCode >= 400) && - !strings.Contains(ct, "json") { - err = fmt.Errorf("telemetry server replied with unknown content-type: '%s' and HTTP %s", ct, resp.Status) - resp.Body.Close() - continue - } - - // read the response body - err = json.NewDecoder(resp.Body).Decode(&reply) - resp.Body.Close() // close response body as soon as we're done with it - if err != nil { - continue - } - - // update the list of enabled/disabled keys, if any - for _, key := range reply.EnableKeys { - disabledMetricsMu.Lock() - // only re-enable this metric if it is temporarily disabled - if temp, ok := disabledMetrics[key]; ok && temp { - delete(disabledMetrics, key) - } - disabledMetricsMu.Unlock() - } - for _, key := range reply.DisableKeys { - disabledMetricsMu.Lock() - disabledMetrics[key] = true // all remotely-disabled keys are "temporarily" disabled - disabledMetricsMu.Unlock() - } - - // make sure we didn't send the update too soon; if so, - // just wait and try again -- this is a special case of - // error that we handle differently, as you can see - if resp.StatusCode == http.StatusTooManyRequests { - if reply.NextUpdate <= 0 { - raStr := resp.Header.Get("Retry-After") - if ra, err := strconv.Atoi(raStr); err == nil { - reply.NextUpdate = time.Duration(ra) * time.Second - } - } - if !final { - log.Printf("[NOTICE] Sending telemetry: we were too early; waiting %s before trying again", reply.NextUpdate) - time.Sleep(reply.NextUpdate) - continue - } - } else if resp.StatusCode >= 400 { - err = fmt.Errorf("telemetry server returned status code %d", resp.StatusCode) - continue - } - - break - } - if err == nil && !final { - // (remember, if there was an error, we return it - // below, so it WILL get logged if it's supposed to) - log.Println("[INFO] Sending telemetry: success") - } - - // even if there was an error after all retries, we should - // schedule the next update using our default update - // interval because the server might be healthy later - - // ensure we won't slam the telemetry server; add a little variance - if reply.NextUpdate < 1*time.Second { - reply.NextUpdate = defaultUpdateInterval + time.Duration(rand.Int63n(int64(1*time.Minute))) - } - - // schedule the next update (if this wasn't the last one and - // if the remote server didn't tell us to stop sending) - if !final && !reply.Stop { - updateTimerMu.Lock() - updateTimer = time.AfterFunc(reply.NextUpdate, func() { - logEmit(false) - }) - updateTimerMu.Unlock() - } - - return err -} - -func stopUpdateTimer() { - updateTimerMu.Lock() - updateTimer.Stop() - updateTimer = nil - updateTimerMu.Unlock() -} - -// setEmitTimeMetrics sets some metrics that should -// be recorded just before emitting. -func setEmitTimeMetrics() { - Set("goroutines", runtime.NumGoroutine()) - - var mem runtime.MemStats - runtime.ReadMemStats(&mem) - SetNested("memory", "heap_alloc", mem.HeapAlloc) - SetNested("memory", "sys", mem.Sys) -} - -// makePayloadAndResetBuffer prepares a payload -// by emptying the collection buffer. It returns -// the bytes of the payload to send to the server. -// Since the buffer is reset by this, if the -// resulting byte slice is lost, the payload is -// gone with it. -func makePayloadAndResetBuffer() ([]byte, error) { - bufCopy := resetBuffer() - - // encode payload in preparation for transmission - payload := Payload{ - InstanceID: instanceUUID.String(), - Timestamp: time.Now().UTC(), - Data: bufCopy, - } - return json.Marshal(payload) -} - -// resetBuffer makes a local pointer to the buffer, -// then resets the buffer by assigning to be a newly- -// made value to clear it out, then sets the buffer -// item count to 0. It returns the copied pointer to -// the original map so the old buffer value can be -// used locally. -func resetBuffer() map[string]interface{} { - bufferMu.Lock() - bufCopy := buffer - buffer = make(map[string]interface{}) - bufferItemCount = 0 - bufferMu.Unlock() - return bufCopy -} - -// Response contains the body of a response from the -// telemetry server. -type Response struct { - // NextUpdate is how long to wait before the next update. - NextUpdate time.Duration `json:"next_update"` - - // Stop instructs the telemetry server to stop sending - // telemetry. This would only be done under extenuating - // circumstances, but we are prepared for it nonetheless. - Stop bool `json:"stop,omitempty"` - - // Error will be populated with an error message, if any. - // This field should be empty if the status code is < 400. - Error string `json:"error,omitempty"` - - // DisableKeys will contain a list of keys/metrics that - // should NOT be sent until further notice. The client - // must NOT store these items in its buffer or send them - // to the telemetry server while they are disabled. If - // this list and EnableKeys have the same value (which is - // not supposed to happen), this field should dominate. - DisableKeys []string `json:"disable_keys,omitempty"` - - // EnableKeys will contain a list of keys/metrics that - // MAY be sent until further notice. - EnableKeys []string `json:"enable_keys,omitempty"` -} - -// Payload is the data that gets sent to the telemetry server. -type Payload struct { - // The universally unique ID of the instance - InstanceID string `json:"instance_id"` - - // The UTC timestamp of the transmission - Timestamp time.Time `json:"timestamp"` - - // The timestamp before which the next update is expected - // (NOT populated by client - the server fills this in - // before it stores the data) - ExpectNext time.Time `json:"expect_next,omitempty"` - - // The metrics - Data map[string]interface{} `json:"data,omitempty"` -} - -// Int returns the value of the data keyed by key -// if it is an integer; otherwise it returns 0. -func (p Payload) Int(key string) int { - val, _ := p.Data[key] - switch p.Data[key].(type) { - case int: - return val.(int) - case float64: // after JSON-decoding, int becomes float64... - return int(val.(float64)) - } - return 0 -} - -// countingSet implements a set that counts how many -// times a key is inserted. It marshals to JSON in a -// way such that keys are converted to values next -// to their associated counts. -type countingSet map[interface{}]int - -// MarshalJSON implements the json.Marshaler interface. -// It converts the set to an array so that the values -// are JSON object values instead of keys, since keys -// are difficult to query in databases. -func (s countingSet) MarshalJSON() ([]byte, error) { - type Item struct { - Value interface{} `json:"value"` - Count int `json:"count"` - } - var list []Item - - for k, v := range s { - list = append(list, Item{Value: k, Count: v}) - } - - return json.Marshal(list) -} - -var ( - // httpClient should be used for HTTP requests. It - // is configured with a timeout for reliability. - httpClient = http.Client{ - Transport: &http.Transport{ - TLSHandshakeTimeout: 30 * time.Second, - DisableKeepAlives: true, - }, - Timeout: 1 * time.Minute, - } - - // buffer holds the data that we are building up to send. - buffer = make(map[string]interface{}) - bufferItemCount = 0 - bufferMu sync.RWMutex // protects both the buffer and its count - - // updating is used to ensure only one - // update happens at a time. - updating bool - updateMu sync.Mutex - - // updateTimer fires off the next update. - // If no update is scheduled, this is nil. - updateTimer *time.Timer - updateTimerMu sync.Mutex - - // disabledMetrics is a set of metric keys - // that should NOT be saved to the buffer - // or sent to the telemetry server. The value - // indicates whether the entry is temporary. - // If the value is true, it may be removed if - // the metric is re-enabled remotely later. If - // the value is false, it is permanent - // (presumably because the user explicitly - // disabled it) and can only be re-enabled - // with user consent. - disabledMetrics = make(map[string]bool) - disabledMetricsMu sync.RWMutex - - // instanceUUID is the ID of the current instance. - // This MUST be set to emit telemetry. - // This MUST NOT be openly exposed to clients, for privacy. - instanceUUID uuid.UUID - - // enabled indicates whether the package has - // been initialized and can be actively used. - enabled bool - - // maxBufferItems is the maximum number of items we'll allow - // in the buffer before we start dropping new ones, in a - // rough (simple) attempt to keep memory use under control. - maxBufferItems = 100000 -) - -const ( - // endpoint is the base URL to remote telemetry server; - // the instance ID will be appended to it. - endpoint = "https://telemetry.caddyserver.com/v1/update/" - - // defaultUpdateInterval is how long to wait before emitting - // more telemetry data if all retires fail. This value is - // only used if the client receives a nonsensical value, or - // doesn't send one at all, or if a connection can't be made, - // likely indicating a problem with the server. Thus, this - // value should be a long duration to help alleviate extra - // load on the server. - defaultUpdateInterval = 1 * time.Hour -) diff --git a/vendor/github.com/mholt/caddy/upgrade.go b/vendor/github.com/mholt/caddy/upgrade.go deleted file mode 100644 index c35f9b1cc..000000000 --- a/vendor/github.com/mholt/caddy/upgrade.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2015 Light Code Labs, LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package caddy - -import ( - "encoding/gob" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "sync" -) - -func init() { - // register CaddyfileInput with gob so it knows into - // which concrete type to decode an Input interface - gob.Register(CaddyfileInput{}) -} - -// IsUpgrade returns true if this process is part of an upgrade -// where a parent caddy process spawned this one to upgrade -// the binary. -func IsUpgrade() bool { - mu.Lock() - defer mu.Unlock() - return isUpgrade -} - -// Upgrade re-launches the process, preserving the listeners -// for a graceful upgrade. It does NOT load new configuration; -// it only starts the process anew with the current config. -// This makes it possible to perform zero-downtime binary upgrades. -// -// TODO: For more information when debugging, see: -// https://forum.golangbridge.org/t/bind-address-already-in-use-even-after-listener-closed/1510?u=matt -// https://github.com/mholt/shared-conn -func Upgrade() error { - log.Println("[INFO] Upgrading") - - // use existing Caddyfile; do not change configuration during upgrade - currentCaddyfile, _, err := getCurrentCaddyfile() - if err != nil { - return err - } - - if len(os.Args) == 0 { // this should never happen, but... - os.Args = []string{""} - } - - // tell the child that it's a restart - env := os.Environ() - if !IsUpgrade() { - env = append(env, "CADDY__UPGRADE=1") - } - - // prepare our payload to the child process - cdyfileGob := transferGob{ - ListenerFds: make(map[string]uintptr), - Caddyfile: currentCaddyfile, - } - - // prepare a pipe to the fork's stdin so it can get the Caddyfile - rpipe, wpipe, err := os.Pipe() - if err != nil { - return err - } - - // prepare a pipe that the child process will use to communicate - // its success with us by sending > 0 bytes - sigrpipe, sigwpipe, err := os.Pipe() - if err != nil { - return err - } - - // pass along relevant file descriptors to child process; ordering - // is very important since we rely on these being in certain positions. - extraFiles := []*os.File{sigwpipe} // fd 3 - - // add file descriptors of all the sockets - for i, j := 0, 0; ; i++ { - instancesMu.Lock() - if i >= len(instances) { - instancesMu.Unlock() - break - } - inst := instances[i] - instancesMu.Unlock() - - for _, s := range inst.servers { - gs, gracefulOk := s.server.(GracefulServer) - ln, lnOk := s.listener.(Listener) - pc, pcOk := s.packet.(PacketConn) - if gracefulOk { - if lnOk { - lnFile, _ := ln.File() - extraFiles = append(extraFiles, lnFile) - cdyfileGob.ListenerFds["tcp"+gs.Address()] = uintptr(4 + j) // 4 fds come before any of the listeners - j++ - } - if pcOk { - pcFile, _ := pc.File() - extraFiles = append(extraFiles, pcFile) - cdyfileGob.ListenerFds["udp"+gs.Address()] = uintptr(4 + j) // 4 fds come before any of the listeners - j++ - } - } - } - } - - // set up the command - cmd := exec.Command(os.Args[0], os.Args[1:]...) - cmd.Stdin = rpipe // fd 0 - cmd.Stdout = os.Stdout // fd 1 - cmd.Stderr = os.Stderr // fd 2 - cmd.ExtraFiles = extraFiles - cmd.Env = env - - // spawn the child process - err = cmd.Start() - if err != nil { - return err - } - - // immediately close our dup'ed fds and the write end of our signal pipe - for _, f := range extraFiles { - err = f.Close() - if err != nil { - return err - } - } - - // feed Caddyfile to the child - err = gob.NewEncoder(wpipe).Encode(cdyfileGob) - if err != nil { - return err - } - err = wpipe.Close() - if err != nil { - return err - } - - // determine whether child startup succeeded - answer, readErr := ioutil.ReadAll(sigrpipe) - if len(answer) == 0 { - cmdErr := cmd.Wait() // get exit status - errStr := fmt.Sprintf("child failed to initialize: %v", cmdErr) - if readErr != nil { - errStr += fmt.Sprintf(" - additionally, error communicating with child process: %v", readErr) - } - return fmt.Errorf(errStr) - } - - // looks like child is successful; we can exit gracefully. - log.Println("[INFO] Upgrade finished") - return Stop() -} - -// getCurrentCaddyfile gets the Caddyfile used by the -// current (first) Instance and returns both of them. -func getCurrentCaddyfile() (Input, *Instance, error) { - instancesMu.Lock() - if len(instances) == 0 { - instancesMu.Unlock() - return nil, nil, fmt.Errorf("no server instances are fully running") - } - inst := instances[0] - instancesMu.Unlock() - - currentCaddyfile := inst.caddyfileInput - if currentCaddyfile == nil { - // hmm, did spawning process forget to close stdin? Anyhow, this is unusual. - return nil, inst, fmt.Errorf("no Caddyfile to reload (was stdin left open?)") - } - return currentCaddyfile, inst, nil -} - -// signalSuccessToParent tells the parent our status using pipe at index 3. -// If this process is not a restart, this function does nothing. -// Calling this function once this process has successfully initialized -// is vital so that the parent process can unblock and kill itself. -// This function is idempotent; it executes at most once per process. -func signalSuccessToParent() { - signalParentOnce.Do(func() { - if IsUpgrade() { - ppipe := os.NewFile(3, "") // parent is reading from pipe at index 3 - _, err := ppipe.Write([]byte("success")) // we must send some bytes to the parent - if err != nil { - log.Printf("[ERROR] Communicating successful init to parent: %v", err) - } - ppipe.Close() - } - }) -} - -// signalParentOnce is used to make sure that the parent is only -// signaled once; doing so more than once breaks whatever socket is -// at fd 4 (TODO: the reason for this is still unclear - to reproduce, -// call Stop() and Start() in succession at least once after a -// restart, then try loading first host of Caddyfile in the browser -// - this was pre-v0.9; this code and godoc is borrowed from the -// implementation then, but I'm not sure if it's been fixed yet, as -// of v0.10.7). Do not use this directly; call signalSuccessToParent -// instead. -var signalParentOnce sync.Once - -// transferGob is used if this is a child process as part of -// a graceful upgrade; it is used to map listeners to their -// index in the list of inherited file descriptors. This -// variable is not safe for concurrent access. -var loadedGob transferGob - -// transferGob maps bind address to index of the file descriptor -// in the Files array passed to the child process. It also contains -// the Caddyfile contents and any other state needed by the new process. -// Used only during graceful upgrades. -type transferGob struct { - ListenerFds map[string]uintptr - Caddyfile Input -} diff --git a/vendor/github.com/mholt/certmagic/.gitignore b/vendor/github.com/mholt/certmagic/.gitignore deleted file mode 100644 index fbd281d14..000000000 --- a/vendor/github.com/mholt/certmagic/.gitignore +++ /dev/null @@ -1 +0,0 @@ -_gitignore/ diff --git a/vendor/github.com/mholt/certmagic/.travis.yml b/vendor/github.com/mholt/certmagic/.travis.yml deleted file mode 100644 index 9dfaec42e..000000000 --- a/vendor/github.com/mholt/certmagic/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -go: - - 1.x - - tip - -matrix: - allow_failures: - - go: tip - fast_finish: true - -install: - - go get -t ./... - - go get golang.org/x/lint/golint - - go get github.com/alecthomas/gometalinter - -script: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.15.0 - - golangci-lint run --disable-all -E vet -E gofmt -E misspell -E ineffassign -E goimports -E deadcode --tests - - go test -race ./... - -after_script: - - golint ./... diff --git a/vendor/github.com/mholt/certmagic/LICENSE.txt b/vendor/github.com/mholt/certmagic/LICENSE.txt deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/mholt/certmagic/LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/mholt/certmagic/README.md b/vendor/github.com/mholt/certmagic/README.md deleted file mode 100644 index 4d3b5f422..000000000 --- a/vendor/github.com/mholt/certmagic/README.md +++ /dev/null @@ -1,500 +0,0 @@ -

- CertMagic -

-

Easy and Powerful TLS Automation

-

The same library used by the Caddy Web Server

-

- - - - -

- - -Caddy's automagic TLS features, now for your own Go programs, in one powerful and easy-to-use library! - -CertMagic is the most mature, robust, and capable ACME client integration for Go. - -With CertMagic, you can add one line to your Go application to serve securely over TLS, without ever having to touch certificates. - -Instead of: - -```go -// plaintext HTTP, gross 🤢 -http.ListenAndServe(":80", mux) -``` - -Use CertMagic: - -```go -// encrypted HTTPS with HTTP->HTTPS redirects - yay! 🔒😠-certmagic.HTTPS([]string{"example.com"}, mux) -``` - -That line of code will serve your HTTP router `mux` over HTTPS, complete with HTTP->HTTPS redirects. It obtains and renews the TLS certificates. It staples OCSP responses for greater privacy and security. As long as your domain name points to your server, CertMagic will keep its connections secure. - -Compared to other ACME client libraries for Go, only CertMagic supports the full suite of ACME features, and no other library matches CertMagic's maturity and reliability. - - - - -CertMagic - Automatic HTTPS using Let's Encrypt -=============================================== - -**Sponsored by Relica - Cross-platform local and cloud file backup:** - -Relica - Cross-platform file backup to the cloud, local disks, or other computers - - -## Menu - -- [Features](#features) -- [Requirements](#requirements) -- [Installation](#installation) -- [Usage](#usage) - - [Package Overview](#package-overview) - - [Certificate authority](#certificate-authority) - - [The `Config` type](#the-config-type) - - [Defaults](#defaults) - - [Providing an email address](#providing-an-email-address) - - [Development and testing](#development-and-testing) - - [Examples](#examples) - - [Serving HTTP handlers with HTTPS](#serving-http-handlers-with-https) - - [Starting a TLS listener](#starting-a-tls-listener) - - [Getting a tls.Config](#getting-a-tlsconfig) - - [Advanced use](#advanced-use) - - [Wildcard Certificates](#wildcard-certificates) - - [Behind a load balancer (or in a cluster)](#behind-a-load-balancer-or-in-a-cluster) - - [The ACME Challenges](#the-acme-challenges) - - [HTTP Challenge](#http-challenge) - - [TLS-ALPN Challenge](#tls-alpn-challenge) - - [DNS Challenge](#dns-challenge) - - [On-Demand TLS](#on-demand-tls) - - [Storage](#storage) - - [Cache](#cache) -- [Contributing](#contributing) -- [Project History](#project-history) -- [Credits and License](#credits-and-license) - - -## Features - -- Fully automated certificate management including issuance and renewal -- One-liner, fully managed HTTPS servers -- Full control over almost every aspect of the system -- HTTP->HTTPS redirects (for HTTP applications) -- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS -- Over 50 DNS providers work out-of-the-box (powered by [lego](https://github.com/go-acme/lego)!) -- Pluggable storage implementations (default: file system) -- Wildcard certificates (requires DNS challenge) -- OCSP stapling for each qualifying certificate ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) -- Distributed solving of all challenges (works behind load balancers) -- Supports "on-demand" issuance of certificates (during TLS handshakes!) - - Custom decision functions - - Hostname whitelist - - Ask an external URL - - Rate limiting -- Optional event hooks to observe internal behaviors -- Works with any certificate authority (CA) compliant with the ACME specification -- Certificate revocation (please, only if private key is compromised) -- Must-Staple (optional; not default) -- Cross-platform support! Mac, Windows, Linux, BSD, Android... -- Scales well to thousands of names/certificates per instance -- Use in conjunction with your own certificates - - -## Requirements - -1. Public DNS name(s) you control -2. Server reachable from public Internet - - Or use the DNS challenge to waive this requirement -3. Control over port 80 (HTTP) and/or 443 (HTTPS) - - Or they can be forwarded to other ports you control - - Or use the DNS challenge to waive this requirement - - (This is a requirement of the ACME protocol, not a library limitation) -4. Persistent storage - - Typically the local file system (default) - - Other integrations available/possible - -**_Before using this library, your domain names MUST be pointed (A/AAAA records) at your server (unless you use the DNS challenge)!_** - - -## Installation - -```bash -$ go get -u github.com/mholt/certmagic -``` - - -## Usage - -### Package Overview - -#### Certificate authority - -This library uses Let's Encrypt by default, but you can use any certificate authority that conforms to the ACME specification. Known/common CAs are provided as consts in the package, for example `LetsEncryptStagingCA` and `LetsEncryptProductionCA`. - -#### The `Config` type - -The `certmagic.Config` struct is how you can wield the power of this fully armed and operational battle station. However, an empty/uninitialized `Config` is _not_ a valid one! In time, you will learn to use the force of `certmagic.NewDefault()` as I have. - -#### Defaults - -The default `Config` value is called `certmagic.Default`. Change its fields to suit your needs, then call `certmagic.NewDefault()` when you need a valid `Config` value. In other words, `certmagic.Default` is a template and is not valid for use directly. - -You can set the default values easily, for example: `certmagic.Default.Email = ...`. - -The high-level functions in this package (`HTTPS()`, `Listen()`, and `Manage()`) use the default config exclusively. This is how most of you will interact with the package. This is suitable when all your certificates are managed the same way. However, if you need to manage certificates differently depending on their name, you will need to make your own cache and configs (keep reading). - - -#### Providing an email address - -Although not strictly required, this is highly recommended best practice. It allows you to receive expiration emails if your certificates are expiring for some reason, and also allows the CA's engineers to potentially get in touch with you if something is wrong. I recommend setting `certmagic.Default.Email` or always setting the `Email` field of a new `Config` struct. - - -### Development and Testing - -Note that Let's Encrypt imposes [strict rate limits](https://letsencrypt.org/docs/rate-limits/) at its production endpoint, so using it while developing your application may lock you out for a few days if you aren't careful! - -While developing your application and testing it, use [their staging endpoint](https://letsencrypt.org/docs/staging-environment/) which has much higher rate limits. Even then, don't hammer it: but it's much safer for when you're testing. When deploying, though, use their production CA because their staging CA doesn't issue trusted certificates. - -To use staging, set `certmagic.Default.CA = certmagic.LetsEncryptStagingCA` or set `CA` of every `Config` struct. - - - -### Examples - -There are many ways to use this library. We'll start with the highest-level (simplest) and work down (more control). - -All these high-level examples use `certmagic.Default` for the config and the default cache and storage for serving up certificates. - -First, we'll follow best practices and do the following: - -```go -// read and agree to your CA's legal documents -certmagic.Default.Agreed = true - -// provide an email address -certmagic.Default.Email = "you@yours.com" - -// use the staging endpoint while we're developing -certmagic.Default.CA = certmagic.LetsEncryptStagingCA -``` - -For fully-functional program examples, check out [this Twitter thread](https://twitter.com/mholt6/status/1073103805112147968) (or read it [unrolled into a single post](https://threadreaderapp.com/thread/1073103805112147968.html)). (Note that the package API has changed slightly since these posts.) - - -#### Serving HTTP handlers with HTTPS - -```go -err := certmagic.HTTPS([]string{"example.com", "www.example.com"}, mux) -if err != nil { - return err -} -``` - -This starts HTTP and HTTPS listeners and redirects HTTP to HTTPS! - -#### Starting a TLS listener - -```go -ln, err := certmagic.Listen([]string{"example.com"}) -if err != nil { - return err -} -``` - - -#### Getting a tls.Config - -```go -tlsConfig, err := certmagic.TLS([]string{"example.com"}) -if err != nil { - return err -} -``` - - -#### Advanced use - -For more control (particularly, if you need a different way of managing each certificate), you'll make and use a `Cache` and a `Config` like so: - -```go -cache := certmagic.NewCache(certmagic.CacheOptions{ - GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) { - // do whatever you need to do to get the right - // configuration for this certificate; keep in - // mind that this config value is used as a - // template, and will be completed with any - // defaults that are set in the Default config - return certmagic.Config{ - // ... - }), nil - }, - ... -}) - -magic := certmagic.New(cache, certmagic.Config{ - CA: certmagic.LetsEncryptStagingCA, - Email: "you@yours.com", - Agreed: true, - // plus any other customization you want -}) - -// this obtains certificates or renews them if necessary -err := magic.Manage([]string{"example.com", "sub.example.com"}) -if err != nil { - return err -} - -// to use its certificates and solve the TLS-ALPN challenge, -// you can get a TLS config to use in a TLS listener! -tlsConfig := magic.TLSConfig() - -//// OR //// - -// if you already have a TLS config you don't want to replace, -// we can simply set its GetCertificate field and append the -// TLS-ALPN challenge protocol to the NextProtos -myTLSConfig.GetCertificate = magic.GetCertificate -myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol} - -// the HTTP challenge has to be handled by your HTTP server; -// if you don't have one, you should have disabled it earlier -// when you made the certmagic.Config -httpMux = magic.HTTPChallengeHandler(httpMux) -``` - -Great! This example grants you much more flexibility for advanced programs. However, _the vast majority of you will only use the high-level functions described earlier_, especially since you can still customize them by setting the package-level `Default` config. - - -### Wildcard certificates - -At time of writing (December 2018), Let's Encrypt only issues wildcard certificates with the DNS challenge. You can easily enable the DNS challenge with CertMagic for numerous providers (see the relevant section in the docs). - - -### Behind a load balancer (or in a cluster) - -CertMagic runs effectively behind load balancers and/or in cluster/fleet environments. In other words, you can have 10 or 1,000 servers all serving the same domain names, all sharing certificates and OCSP staples. - -To do so, simply ensure that each instance is using the same Storage. That is the sole criteria for determining whether an instance is part of a cluster. - -The default Storage is implemented using the file system, so mounting the same shared folder is sufficient (see [Storage](#storage) for more on that)! If you need an alternate Storage implementation, feel free to use one, provided that all the instances use the _same_ one. :) - -See [Storage](#storage) and the associated [godoc](https://godoc.org/github.com/mholt/certmagic#Storage) for more information! - - -## The ACME Challenges - -This section describes how to solve the ACME challenges. Challenges are how you demonstrate to the certificate authority some control over your domain name, thus authorizing them to grant you a certificate for that name. [The great innovation of ACME](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) is that verification by CAs can now be automated, rather than having to click links in emails (who ever thought that was a good idea??). - -If you're using the high-level convenience functions like `HTTPS()`, `Listen()`, or `TLS()`, the HTTP and/or TLS-ALPN challenges are solved for you because they also start listeners. However, if you're making a `Config` and you start your own server manually, you'll need to be sure the ACME challenges can be solved so certificates can be renewed. - -The HTTP and TLS-ALPN challenges are the defaults because they don't require configuration from you, but they require that your server is accessible from external IPs on low ports. If that is not possible in your situation, you can enable the DNS challenge, which will disable the HTTP and TLS-ALPN challenges and use the DNS challenge exclusively. - -Technically, only one challenge needs to be enabled for things to work, but using multiple is good for reliability in case a challenge is discontinued by the CA. This happened to the TLS-SNI challenge in early 2018—many popular ACME clients such as Traefik and Autocert broke, resulting in downtime for some sites, until new releases were made and patches deployed, because they used only one challenge; Caddy, however—this library's forerunner—was unaffected because it also used the HTTP challenge. If multiple challenges are enabled, they are chosen randomly to help prevent false reliance on a single challenge type. - - -### HTTP Challenge - -Per the ACME spec, the HTTP challenge requires port 80, or at least packet forwarding from port 80. It works by serving a specific HTTP response that only the genuine server would have to a normal HTTP request at a special endpoint. - -If you are running an HTTP server, solving this challenge is very easy: just wrap your handler in `HTTPChallengeHandler` _or_ call `SolveHTTPChallenge()` inside your own `ServeHTTP()` method. - -For example, if you're using the standard library: - -```go -mux := http.NewServeMux() -mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Lookit my cool website over HTTPS!") -}) - -http.ListenAndServe(":80", magic.HTTPChallengeHandler(mux)) -``` - -If wrapping your handler is not a good solution, try this inside your `ServeHTTP()` instead: - -```go -magic := certmagic.NewDefault() - -func ServeHTTP(w http.ResponseWriter, req *http.Request) { - if magic.HandleHTTPChallenge(w, r) { - return // challenge handled; nothing else to do - } - ... -} -``` - -If you are not running an HTTP server, you should disable the HTTP challenge _or_ run an HTTP server whose sole job it is to solve the HTTP challenge. - - -### TLS-ALPN Challenge - -Per the ACME spec, the TLS-ALPN challenge requires port 443, or at least packet forwarding from port 443. It works by providing a special certificate using a standard TLS extension, Application Layer Protocol Negotiation (ALPN), having a special value. This is the most convenient challenge type because it usually requires no extra configuration and uses the standard TLS port which is where the certificates are used, also. - -This challenge is easy to solve: just use the provided `tls.Config` when you make your TLS listener: - -```go -// use this to configure a TLS listener -tlsConfig := magic.TLSConfig() -``` - -Or make two simple changes to an existing `tls.Config`: - -```go -myTLSConfig.GetCertificate = magic.GetCertificate -myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol} -``` - -Then just make sure your TLS listener is listening on port 443: - -```go -ln, err := tls.Listen("tcp", ":443", myTLSConfig) -``` - - -### DNS Challenge - -The DNS challenge is perhaps the most useful challenge because it allows you to obtain certificates without your server needing to be publicly accessible on the Internet, and it's the only challenge by which Let's Encrypt will issue wildcard certificates. - -This challenge works by setting a special record in the domain's zone. To do this automatically, your DNS provider needs to offer an API by which changes can be made to domain names, and the changes need to take effect immediately for best results. CertMagic supports [all of lego's DNS provider implementations](https://github.com/go-acme/lego/tree/master/providers/dns)! All of them clean up the temporary record after the challenge completes. - -To enable it, just set the `DNSProvider` field on a `certmagic.Config` struct, or set the default `certmagic.DNSProvider` variable. For example, if my domains' DNS was served by DNSimple and I set my DNSimple API credentials in environment variables: - -```go -import "github.com/go-acme/lego/providers/dns/dnsimple" - -provider, err := dnsimple.NewDNSProvider() -if err != nil { - return err -} - -certmagic.Default.DNSProvider = provider -``` - -Now the DNS challenge will be used by default, and I can obtain certificates for wildcard domains. See the [godoc documentation for the provider you're using](https://godoc.org/github.com/go-acme/lego/providers/dns#pkg-subdirectories) to learn how to configure it. Most can be configured by env variables or by passing in a config struct. If you pass a config struct instead of using env variables, you will probably need to set some other defaults (that's just how lego works, currently): - -```go -PropagationTimeout: dns01.DefaultPollingInterval, -PollingInterval: dns01.DefaultPollingInterval, -TTL: dns01.DefaultTTL, -``` - -Enabling the DNS challenge disables the other challenges for that `certmagic.Config` instance. - - -## On-Demand TLS - -Normally, certificates are obtained and renewed before a listener starts serving, and then those certificates are maintained throughout the lifetime of the program. In other words, the certificate names are static. But sometimes you don't know all the names ahead of time. This is where On-Demand TLS shines. - -Originally invented for use in Caddy (which was the first program to use such technology), On-Demand TLS makes it possible and easy to serve certificates for arbitrary names during the lifetime of the server. When a TLS handshake is received, CertMagic will read the Server Name Indication (SNI) value and either load and present that certificate in the ServerHello, or if one does not exist, it will obtain it from a CA right then-and-there. - -Of course, this has some obvious security implications. You don't want to DoS a CA or allow arbitrary clients to fill your storage with spammy TLS handshakes. That's why, in order to enable On-Demand issuance, you'll need to set some limits or some policy to allow getting a certificate. - -CertMagic provides several ways to enforce decision policies for On-Demand TLS, in descending order of priority: - -- A generic function that you write which will decide whether to allow the certificate request -- A name whitelist -- The ability to make an HTTP request to a URL for permission -- Rate limiting - -The simplest way to enable On-Demand issuance is to set the OnDemand field of a Config (or the default package-level value): - -```go -certmagic.Default.OnDemand = &certmagic.OnDemandConfig{MaxObtain: 5} -``` - -This allows only 5 certificates to be requested and is the simplest way to enable On-Demand TLS, but is the least recommended. It prevents abuse, but only in the least helpful way. - -The [godoc](https://godoc.org/github.com/mholt/certmagic#OnDemandConfig) describes how to use the other policies, all of which are much more recommended! :) - -If `OnDemand` is set and `Manage()` is called, then the names given to `Manage()` will be whitelisted rather than obtained right away. - - -## Storage - -CertMagic relies on storage to store certificates and other TLS assets (OCSP staple cache, coordinating locks, etc). Persistent storage is a requirement when using CertMagic: ephemeral storage will likely lead to rate limiting on the CA-side as CertMagic will always have to get new certificates. - -By default, CertMagic stores assets on the local file system in `$HOME/.local/share/certmagic` (and honors `$XDG_DATA_HOME` if set). CertMagic will create the directory if it does not exist. If writes are denied, things will not be happy, so make sure CertMagic can write to it! - -The notion of a "cluster" or "fleet" of instances that may be serving the same site and sharing certificates, etc, is tied to storage. Simply, any instances that use the same storage facilities are considered part of the cluster. So if you deploy 100 instances of CertMagic behind a load balancer, they are all part of the same cluster if they share the same storage configuration. Sharing storage could be mounting a shared folder, or implementing some other distributed storage system such as a database server or KV store. - -The easiest way to change the storage being used is to set `certmagic.DefaultStorage` to a value that satisfies the [Storage interface](https://godoc.org/github.com/mholt/certmagic#Storage). Keep in mind that a valid `Storage` must be able to implement some operations atomically in order to provide locking and synchronization. - -If you write a Storage implementation, please add it to the [project wiki](https://github.com/mholt/certmagic/wiki/Storage-Implementations) so people can find it! - - -## Cache - -All of the certificates in use are de-duplicated and cached in memory for optimal performance at handshake-time. This cache must be backed by persistent storage as described above. - -Most applications will not need to interact with certificate caches directly. Usually, the closest you will come is to set the package-wide `certmagic.DefaultStorage` variable (before attempting to create any Configs). However, if your use case requires using different storage facilities for different Configs (that's highly unlikely and NOT recommended! Even Caddy doesn't get that crazy), you will need to call `certmagic.NewCache()` and pass in the storage you want to use, then get new `Config` structs with `certmagic.NewWithCache()` and pass in the cache. - -Again, if you're needing to do this, you've probably over-complicated your application design. - - -## FAQ - -### Can I use some of my own certificates while using CertMagic? - -Yes, just call the relevant method on the `Config` to add your own certificate to the cache: - -- [`CacheUnmanagedCertificatePEMBytes()`](https://godoc.org/github.com/mholt/certmagic#Config.CacheUnmanagedCertificatePEMBytes) -- [`CacheUnmanagedCertificatePEMFile()`](https://godoc.org/github.com/mholt/certmagic#Config.CacheUnmanagedCertificatePEMFile) -- [`CacheUnmanagedTLSCertificate()`](https://godoc.org/github.com/mholt/certmagic#Config.CacheUnmanagedTLSCertificate) - -Keep in mind that unmanaged certificates are (obviously) not renewed for you, so you'll have to replace them when you do. However, OCSP stapling is performed even for unmanaged certificates that qualify. - - -### Does CertMagic obtain SAN certificates? - -Technically all certificates these days are SAN certificates because CommonName is deprecated. But if you're asking whether CertMagic issues and manages certificates with multiple SANs, the answer is no. But it does support serving them, if you provide your own. - - -### How can I listen on ports 80 and 443? Do I have to run as root? - -On Linux, you can use `setcap` to grant your binary the permission to bind low ports: - -```bash -$ sudo setcap cap_net_bind_service=+ep /path/to/your/binary -``` - -and then you will not need to run with root privileges. - - -## Contributing - -We welcome your contributions! Please see our **[contributing guidelines](https://github.com/mholt/certmagic/blob/master/.github/CONTRIBUTING.md)** for instructions. - - -## Project History - -CertMagic is the core of Caddy's advanced TLS automation code, extracted into a library. The underlying ACME client implementation is [lego](https://github.com/go-acme/lego), which was originally developed for use in Caddy even before Let's Encrypt entered public beta in 2015. - -In the years since then, Caddy's TLS automation techniques have been widely adopted, tried and tested in production, and served millions of sites and secured trillions of connections. - -Now, CertMagic is _the actual library used by Caddy_. It's incredibly powerful and feature-rich, but also easy to use for simple Go programs: one line of code can enable fully-automated HTTPS applications with HTTP->HTTPS redirects. - -Caddy is known for its robust HTTPS+ACME features. When ACME certificate authorities have had outages, in some cases Caddy was the only major client that didn't experience any downtime. Caddy can weather OCSP outages lasting days, or CA outages lasting weeks, without taking your sites offline. - -Caddy was also the first to sport "on-demand" issuance technology, which obtains certificates during the first TLS handshake for an allowed SNI name. - -Consequently, CertMagic brings all these (and more) features and capabilities right into your own Go programs. - -You can [watch a 2016 dotGo talk](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) by the author of this library about using ACME to automate certificate management in Go programs: - -[![Matthew Holt speaking at dotGo 2016 about ACME in Go](https://user-images.githubusercontent.com/1128849/49921557-2d506780-fe6b-11e8-97bf-6053b6b4eb48.png)](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) - - - -## Credits and License - -CertMagic is a project by [Matthew Holt](https://twitter.com/mholt6), who is the author; and various contributors, who are credited in the commit history of either CertMagic or Caddy. - -CertMagic is licensed under Apache 2.0, an open source license. For convenience, its main points are summarized as follows (but this is no replacement for the actual license text): - -- The author owns the copyright to this code -- Use, distribute, and modify the software freely -- Private and internal use is allowed -- License text and copyright notices must stay intact and be included with distributions -- Any and all changes to the code must be documented diff --git a/vendor/github.com/mholt/certmagic/appveyor.yml b/vendor/github.com/mholt/certmagic/appveyor.yml deleted file mode 100644 index 2ab77bec7..000000000 --- a/vendor/github.com/mholt/certmagic/appveyor.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: "{build}" - -clone_folder: c:\gopath\src\github.com\mholt\certmagic - -environment: - GOPATH: c:\gopath - -stack: go 1.11 - -install: - - set PATH=%GOPATH%\bin;%PATH% - - set PATH=C:\msys64\mingw64\bin;%PATH% - - go get -t ./... - - go get golang.org/x/lint/golint - - go get github.com/alecthomas/gometalinter - -build: off - -before_test: - - go version - - go env - -test_script: - - go test -race ./... - -after_test: - - golint ./... - -deploy: off diff --git a/vendor/github.com/mholt/certmagic/cache.go b/vendor/github.com/mholt/certmagic/cache.go deleted file mode 100644 index 79641df68..000000000 --- a/vendor/github.com/mholt/certmagic/cache.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "fmt" - "sync" - "time" -) - -// Cache is a structure that stores certificates in memory. -// A Cache indexes certificates by name for quick access -// during TLS handshakes, and avoids duplicating certificates -// in memory. Generally, there should only be one per process. -// However, that is not a strict requirement; but using more -// than one is a code smell, and may indicate an -// over-engineered design. -// -// An empty cache is INVALID and must not be used. Be sure -// to call NewCache to get a valid value. -// -// These should be very long-lived values and must not be -// copied. Before all references leave scope to be garbage -// collected, ensure you call Stop() to stop maintenance on -// the certificates stored in this cache and release locks. -// -// Caches are not usually manipulated directly; create a -// Config value with a pointer to a Cache, and then use -// the Config to interact with the cache. Caches are -// agnostic of any particular storage or ACME config, -// since each certificate may be managed and stored -// differently. -type Cache struct { - // User configuration of the cache - options CacheOptions - - // The cache is keyed by certificate hash - cache map[string]Certificate - - // cacheIndex is a map of SAN to cache key (cert hash) - cacheIndex map[string][]string - - // Protects the cache and index maps - mu sync.RWMutex - - // Close this channel to cancel asset maintenance - stopChan chan struct{} - - // Used to signal when stopping is completed - doneChan chan struct{} -} - -// NewCache returns a new, valid Cache for efficiently -// accessing certificates in memory. It also begins a -// maintenance goroutine to tend to the certificates -// in the cache. Call Stop() when you are done with the -// cache so it can clean up locks and stuff. -// -// Most users of this package will not need to call this -// because a default certificate cache is created for you. -// Only advanced use cases require creating a new cache. -// -// This function panics if opts.GetConfigForCert is not -// set. The reason is that a cache absolutely needs to -// be able to get a Config with which to manage TLS -// assets, and it is not safe to assume that the Default -// config is always the correct one, since you have -// created the cache yourself. -// -// See the godoc for Cache to use it properly. When -// no longer needed, caches should be stopped with -// Stop() to clean up resources even if the process -// is being terminated, so that it can clean up -// any locks for other processes to unblock! -func NewCache(opts CacheOptions) *Cache { - // assume default options if necessary - if opts.OCSPCheckInterval <= 0 { - opts.OCSPCheckInterval = DefaultOCSPCheckInterval - } - if opts.RenewCheckInterval <= 0 { - opts.RenewCheckInterval = DefaultRenewCheckInterval - } - - // this must be set, because we cannot not - // safely assume that the Default Config - // is always the correct one to use - if opts.GetConfigForCert == nil { - panic("cache must be initialized with a GetConfigForCert callback") - } - - c := &Cache{ - options: opts, - cache: make(map[string]Certificate), - cacheIndex: make(map[string][]string), - stopChan: make(chan struct{}), - doneChan: make(chan struct{}), - } - - go c.maintainAssets() - - return c -} - -// Stop stops the maintenance goroutine for -// certificates in certCache. It blocks until -// stopping is complete. Once a cache is -// stopped, it cannot be reused. -func (certCache *Cache) Stop() { - close(certCache.stopChan) // signal to stop - <-certCache.doneChan // wait for stop to complete -} - -// CacheOptions is used to configure certificate caches. -// Once a cache has been created with certain options, -// those settings cannot be changed. -type CacheOptions struct { - // REQUIRED. A function that returns a configuration - // used for managing a certificate, or for accessing - // that certificate's asset storage (e.g. for - // OCSP staples, etc). The returned Config MUST - // be associated with the same Cache as the caller. - // - // The reason this is a callback function, dynamically - // returning a Config (instead of attaching a static - // pointer to a Config on each certificate) is because - // the config for how to manage a domain's certificate - // might change from maintenance to maintenance. The - // cache is so long-lived, we cannot assume that the - // host's situation will always be the same; e.g. the - // certificate might switch DNS providers, so the DNS - // challenge (if used) would need to be adjusted from - // the last time it was run ~8 weeks ago. - GetConfigForCert ConfigGetter - - // How often to check certificates for renewal; - // if unset, DefaultOCSPCheckInterval will be used. - OCSPCheckInterval time.Duration - - // How often to check certificates for renewal; - // if unset, DefaultRenewCheckInterval will be used. - RenewCheckInterval time.Duration -} - -// ConfigGetter is a function that returns a config that -// should be used when managing the given certificate -// or its assets. -type ConfigGetter func(Certificate) (Config, error) - -// cacheCertificate calls unsyncedCacheCertificate with a write lock. -// -// This function is safe for concurrent use. -func (certCache *Cache) cacheCertificate(cert Certificate) { - certCache.mu.Lock() - certCache.unsyncedCacheCertificate(cert) - certCache.mu.Unlock() -} - -// unsyncedCacheCertificate adds cert to the in-memory cache unless -// it already exists in the cache (according to cert.Hash). It -// updates the name index. -// -// This function is NOT safe for concurrent use. Callers MUST acquire -// a write lock on certCache.mu first. -func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) { - // no-op if this certificate already exists in the cache - if _, ok := certCache.cache[cert.Hash]; ok { - return - } - - // store the certificate - certCache.cache[cert.Hash] = cert - - // update the index so we can access it by name - for _, name := range cert.Names { - certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.Hash) - } -} - -// removeCertificate removes cert from the cache. -// -// This function is NOT safe for concurrent use; callers -// MUST first acquire a write lock on certCache.mu. -func (certCache *Cache) removeCertificate(cert Certificate) { - // delete all mentions of this cert from the name index - for _, name := range cert.Names { - keyList := certCache.cacheIndex[name] - for i, cacheKey := range keyList { - if cacheKey == cert.Hash { - keyList = append(keyList[:i], keyList[i+1:]...) - } - } - if len(keyList) == 0 { - delete(certCache.cacheIndex, name) - } else { - certCache.cacheIndex[name] = keyList - } - } - - // delete the actual cert from the cache - delete(certCache.cache, cert.Hash) -} - -// replaceCertificate atomically replaces oldCert with newCert in -// the cache. -// -// This method is safe for concurrent use. -func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) { - certCache.mu.Lock() - certCache.removeCertificate(oldCert) - certCache.unsyncedCacheCertificate(newCert) - certCache.mu.Unlock() -} - -func (certCache *Cache) getFirstMatchingCert(name string) (Certificate, bool) { - certCache.mu.RLock() - defer certCache.mu.RUnlock() - - allCertKeys := certCache.cacheIndex[name] - if len(allCertKeys) == 0 { - return Certificate{}, false - } - - cert, ok := certCache.cache[allCertKeys[0]] - return cert, ok -} - -// TODO: This seems unused (but could be useful if TLS -// handshakes serve up different certs for a single -// name depending on other properties such as key type) -func (certCache *Cache) getAllMatchingCerts(name string) []Certificate { - certCache.mu.RLock() - defer certCache.mu.RUnlock() - - allCertKeys := certCache.cacheIndex[name] - - certs := make([]Certificate, len(allCertKeys)) - for i := range allCertKeys { - certs[i] = certCache.cache[allCertKeys[i]] - } - - return certs -} - -func (certCache *Cache) getConfig(cert Certificate) (*Config, error) { - cfg, err := certCache.options.GetConfigForCert(cert) - if err != nil { - return nil, err - } - if cfg.certCache != nil && cfg.certCache != certCache { - return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)", - cert.Names, cfg.certCache, certCache) - } - return New(certCache, cfg), nil -} - -var ( - defaultCache *Cache - defaultCacheMu sync.Mutex -) diff --git a/vendor/github.com/mholt/certmagic/certificates.go b/vendor/github.com/mholt/certmagic/certificates.go deleted file mode 100644 index 5675a7c8c..000000000 --- a/vendor/github.com/mholt/certmagic/certificates.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "log" - "net" - "strings" - "time" - - "golang.org/x/crypto/ocsp" -) - -// Certificate is a tls.Certificate with associated metadata tacked on. -// Even if the metadata can be obtained by parsing the certificate, -// we are more efficient by extracting the metadata onto this struct. -type Certificate struct { - tls.Certificate - - // Names is the list of names this certificate is written for. - // The first is the CommonName (if any), the rest are SAN. - Names []string - - // NotAfter is when the certificate expires. - NotAfter time.Time - - // OCSP contains the certificate's parsed OCSP response. - OCSP *ocsp.Response - - // The hex-encoded hash of this cert's chain's bytes. - Hash string - - // Whether this certificate is under our management - managed bool -} - -// NeedsRenewal returns true if the certificate is -// expiring soon (according to cfg) or has expired. -func (cert Certificate) NeedsRenewal(cfg *Config) bool { - if cert.NotAfter.IsZero() { - return false - } - renewDurationBefore := DefaultRenewDurationBefore - if cfg.RenewDurationBefore > 0 { - renewDurationBefore = cfg.RenewDurationBefore - } - return time.Until(cert.NotAfter) < renewDurationBefore -} - -// CacheManagedCertificate loads the certificate for domain into the -// cache, from the TLS storage for managed certificates. It returns a -// copy of the Certificate that was put into the cache. -// -// This is a lower-level method; normally you'll call Manage() instead. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheManagedCertificate(domain string) (Certificate, error) { - cert, err := cfg.loadManagedCertificate(domain) - if err != nil { - return cert, err - } - cfg.certCache.cacheCertificate(cert) - if cfg.OnEvent != nil { - cfg.OnEvent("cached_managed_cert", cert.Names) - } - return cert, nil -} - -// loadManagedCertificate loads the managed certificate for domain, -// but it does not add it to the cache. It just loads from storage. -func (cfg *Config) loadManagedCertificate(domain string) (Certificate, error) { - certRes, err := cfg.loadCertResource(domain) - if err != nil { - return Certificate{}, err - } - cert, err := makeCertificateWithOCSP(cfg.Storage, certRes.Certificate, certRes.PrivateKey) - if err != nil { - return cert, err - } - cert.managed = true - return cert, nil -} - -// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile -// and keyFile, which must be in PEM format. It stores the certificate in -// the in-memory cache. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheUnmanagedCertificatePEMFile(certFile, keyFile string) error { - cert, err := makeCertificateFromDiskWithOCSP(cfg.Storage, certFile, keyFile) - if err != nil { - return err - } - cfg.certCache.cacheCertificate(cert) - if cfg.OnEvent != nil { - cfg.OnEvent("cached_unmanaged_cert", cert.Names) - } - return nil -} - -// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache. -// It staples OCSP if possible. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheUnmanagedTLSCertificate(tlsCert tls.Certificate) error { - var cert Certificate - err := fillCertFromLeaf(&cert, tlsCert) - if err != nil { - return err - } - err = stapleOCSP(cfg.Storage, &cert, nil) - if err != nil { - log.Printf("[WARNING] Stapling OCSP: %v", err) - } - if cfg.OnEvent != nil { - cfg.OnEvent("cached_unmanaged_cert", cert.Names) - } - cfg.certCache.cacheCertificate(cert) - return nil -} - -// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes -// of the certificate and key, then caches it in memory. -// -// This method is safe for concurrent use. -func (cfg *Config) CacheUnmanagedCertificatePEMBytes(certBytes, keyBytes []byte) error { - cert, err := makeCertificateWithOCSP(cfg.Storage, certBytes, keyBytes) - if err != nil { - return err - } - cfg.certCache.cacheCertificate(cert) - if cfg.OnEvent != nil { - cfg.OnEvent("cached_unmanaged_cert", cert.Names) - } - return nil -} - -// makeCertificateFromDiskWithOCSP makes a Certificate by loading the -// certificate and key files. It fills out all the fields in -// the certificate except for the Managed and OnDemand flags. -// (It is up to the caller to set those.) It staples OCSP. -func makeCertificateFromDiskWithOCSP(storage Storage, certFile, keyFile string) (Certificate, error) { - certPEMBlock, err := ioutil.ReadFile(certFile) - if err != nil { - return Certificate{}, err - } - keyPEMBlock, err := ioutil.ReadFile(keyFile) - if err != nil { - return Certificate{}, err - } - return makeCertificateWithOCSP(storage, certPEMBlock, keyPEMBlock) -} - -// makeCertificateWithOCSP is the same as makeCertificate except that it also -// staples OCSP to the certificate. -func makeCertificateWithOCSP(storage Storage, certPEMBlock, keyPEMBlock []byte) (Certificate, error) { - cert, err := makeCertificate(certPEMBlock, keyPEMBlock) - if err != nil { - return cert, err - } - err = stapleOCSP(storage, &cert, certPEMBlock) - if err != nil { - log.Printf("[WARNING] Stapling OCSP: %v", err) - } - return cert, nil -} - -// makeCertificate turns a certificate PEM bundle and a key PEM block into -// a Certificate with necessary metadata from parsing its bytes filled into -// its struct fields for convenience (except for the OnDemand and Managed -// flags; it is up to the caller to set those properties!). This function -// does NOT staple OCSP. -func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) { - var cert Certificate - - // Convert to a tls.Certificate - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return cert, err - } - - // Extract necessary metadata - err = fillCertFromLeaf(&cert, tlsCert) - if err != nil { - return cert, err - } - - return cert, nil -} - -// fillCertFromLeaf populates metadata fields on cert from tlsCert. -func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error { - if len(tlsCert.Certificate) == 0 { - return fmt.Errorf("certificate is empty") - } - cert.Certificate = tlsCert - - // the leaf cert should be the one for the site; it has what we need - leaf, err := x509.ParseCertificate(tlsCert.Certificate[0]) - if err != nil { - return err - } - - if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated - cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)} - } - for _, name := range leaf.DNSNames { - if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated - cert.Names = append(cert.Names, strings.ToLower(name)) - } - } - for _, ip := range leaf.IPAddresses { - if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated - cert.Names = append(cert.Names, strings.ToLower(ipStr)) - } - } - for _, email := range leaf.EmailAddresses { - if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated - cert.Names = append(cert.Names, strings.ToLower(email)) - } - } - if len(cert.Names) == 0 { - return fmt.Errorf("certificate has no names") - } - - // save the hash of this certificate (chain) and - // expiration date, for necessity and efficiency - cert.Hash = hashCertificateChain(cert.Certificate.Certificate) - cert.NotAfter = leaf.NotAfter - - return nil -} - -// managedCertInStorageExpiresSoon returns true if cert (being a -// managed certificate) is expiring within RenewDurationBefore. -// It returns false if there was an error checking the expiration -// of the certificate as found in storage, or if the certificate -// in storage is NOT expiring soon. A certificate that is expiring -// soon in our cache but is not expiring soon in storage probably -// means that another instance renewed the certificate in the -// meantime, and it would be a good idea to simply load the cert -// into our cache rather than repeating the renewal process again. -func (cfg *Config) managedCertInStorageExpiresSoon(cert Certificate) (bool, error) { - certRes, err := cfg.loadCertResource(cert.Names[0]) - if err != nil { - return false, err - } - tlsCert, err := tls.X509KeyPair(certRes.Certificate, certRes.PrivateKey) - if err != nil { - return false, err - } - leaf, err := x509.ParseCertificate(tlsCert.Certificate[0]) - if err != nil { - return false, err - } - timeLeft := leaf.NotAfter.Sub(time.Now().UTC()) - return timeLeft < cfg.RenewDurationBefore, nil -} - -// reloadManagedCertificate reloads the certificate corresponding to the name(s) -// on oldCert into the cache, from storage. This also replaces the old certificate -// with the new one, so that all configurations that used the old cert now point -// to the new cert. -func (cfg *Config) reloadManagedCertificate(oldCert Certificate) error { - newCert, err := cfg.loadManagedCertificate(oldCert.Names[0]) - if err != nil { - return fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err) - } - cfg.certCache.replaceCertificate(oldCert, newCert) - return nil -} - -// HostQualifies returns true if the hostname alone -// appears eligible for automagic TLS. For example: -// localhost, empty hostname, and IP addresses are -// not eligible because we cannot obtain certificates -// for those names. Wildcard names are allowed, as long -// as they conform to CABF requirements (only one wildcard -// label, and it must be the left-most label). -func HostQualifies(hostname string) bool { - return hostname != "localhost" && // localhost is ineligible - - // hostname must not be empty - strings.TrimSpace(hostname) != "" && - - // only one wildcard label allowed, and it must be left-most - (!strings.Contains(hostname, "*") || - (strings.Count(hostname, "*") == 1 && - strings.HasPrefix(hostname, "*."))) && - - // must not start or end with a dot - !strings.HasPrefix(hostname, ".") && - !strings.HasSuffix(hostname, ".") && - - // cannot be an IP address, see - // https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt - net.ParseIP(hostname) == nil -} diff --git a/vendor/github.com/mholt/certmagic/certmagic.go b/vendor/github.com/mholt/certmagic/certmagic.go deleted file mode 100644 index 4739ad270..000000000 --- a/vendor/github.com/mholt/certmagic/certmagic.go +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package certmagic automates the obtaining and renewal of TLS certificates, -// including TLS & HTTPS best practices such as robust OCSP stapling, caching, -// HTTP->HTTPS redirects, and more. -// -// Its high-level API serves your HTTP handlers over HTTPS if you simply give -// the domain name(s) and the http.Handler; CertMagic will create and run -// the HTTPS server for you, fully managing certificates during the lifetime -// of the server. Similarly, it can be used to start TLS listeners or return -// a ready-to-use tls.Config -- whatever layer you need TLS for, CertMagic -// makes it easy. See the HTTPS, Listen, and TLS functions for that. -// -// If you need more control, create a Cache using NewCache() and then make -// a Config using New(). You can then call Manage() on the config. But if -// you use this lower-level API, you'll have to be sure to solve the HTTP -// and TLS-ALPN challenges yourself (unless you disabled them or use the -// DNS challenge) by using the provided Config.GetCertificate function -// in your tls.Config and/or Config.HTTPChallangeHandler in your HTTP -// handler. -// -// See the package's README for more instruction. -package certmagic - -import ( - "crypto/tls" - "fmt" - "log" - "net" - "net/http" - "net/url" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/go-acme/lego/certcrypto" -) - -// HTTPS serves mux for all domainNames using the HTTP -// and HTTPS ports, redirecting all HTTP requests to HTTPS. -// It uses the Default config. -// -// This high-level convenience function is opinionated and -// applies sane defaults for production use, including -// timeouts for HTTP requests and responses. To allow very -// long-lived connections, you should make your own -// http.Server values and use this package's Listen(), TLS(), -// or Config.TLSConfig() functions to customize to your needs. -// For example, servers which need to support large uploads or -// downloads with slow clients may need to use longer timeouts, -// thus this function is not suitable. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func HTTPS(domainNames []string, mux http.Handler) error { - if mux == nil { - mux = http.DefaultServeMux - } - - Default.Agreed = true - cfg := NewDefault() - - err := cfg.Manage(domainNames) - if err != nil { - return err - } - - httpWg.Add(1) - defer httpWg.Done() - - // if we haven't made listeners yet, do so now, - // and clean them up when all servers are done - lnMu.Lock() - if httpLn == nil && httpsLn == nil { - httpLn, err = net.Listen("tcp", fmt.Sprintf(":%d", HTTPPort)) - if err != nil { - lnMu.Unlock() - return err - } - - httpsLn, err = tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), cfg.TLSConfig()) - if err != nil { - httpLn.Close() - httpLn = nil - lnMu.Unlock() - return err - } - - go func() { - httpWg.Wait() - lnMu.Lock() - httpLn.Close() - httpsLn.Close() - lnMu.Unlock() - }() - } - hln, hsln := httpLn, httpsLn - lnMu.Unlock() - - // create HTTP/S servers that are configured - // with sane default timeouts and appropriate - // handlers (the HTTP server solves the HTTP - // challenge and issues redirects to HTTPS, - // while the HTTPS server simply serves the - // user's handler) - httpServer := &http.Server{ - ReadHeaderTimeout: 5 * time.Second, - ReadTimeout: 5 * time.Second, - WriteTimeout: 5 * time.Second, - IdleTimeout: 5 * time.Second, - Handler: cfg.HTTPChallengeHandler(http.HandlerFunc(httpRedirectHandler)), - } - httpsServer := &http.Server{ - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - WriteTimeout: 2 * time.Minute, - IdleTimeout: 5 * time.Minute, - Handler: mux, - } - - log.Printf("%v Serving HTTP->HTTPS on %s and %s", - domainNames, hln.Addr(), hsln.Addr()) - - go httpServer.Serve(hln) - return httpsServer.Serve(hsln) -} - -func httpRedirectHandler(w http.ResponseWriter, r *http.Request) { - toURL := "https://" - - // since we redirect to the standard HTTPS port, we - // do not need to include it in the redirect URL - requestHost, _, err := net.SplitHostPort(r.Host) - if err != nil { - requestHost = r.Host // host probably did not contain a port - } - - toURL += requestHost - toURL += r.URL.RequestURI() - - // get rid of this disgusting unencrypted HTTP connection 🤢 - w.Header().Set("Connection", "close") - - http.Redirect(w, r, toURL, http.StatusMovedPermanently) -} - -// TLS enables management of certificates for domainNames -// and returns a valid tls.Config. It uses the Default -// config. -// -// Because this is a convenience function that returns -// only a tls.Config, it does not assume HTTP is being -// served on the HTTP port, so the HTTP challenge is -// disabled (no HTTPChallengeHandler is necessary). The -// package variable Default is modified so that the -// HTTP challenge is disabled. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func TLS(domainNames []string) (*tls.Config, error) { - Default.Agreed = true - Default.DisableHTTPChallenge = true - cfg := NewDefault() - return cfg.TLSConfig(), cfg.Manage(domainNames) -} - -// Listen manages certificates for domainName and returns a -// TLS listener. It uses the Default config. -// -// Because this convenience function returns only a TLS-enabled -// listener and does not presume HTTP is also being served, -// the HTTP challenge will be disabled. The package variable -// Default is modified so that the HTTP challenge is disabled. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func Listen(domainNames []string) (net.Listener, error) { - Default.Agreed = true - Default.DisableHTTPChallenge = true - cfg := NewDefault() - err := cfg.Manage(domainNames) - if err != nil { - return nil, err - } - return tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), cfg.TLSConfig()) -} - -// Manage obtains certificates for domainNames and keeps them -// renewed using the Default config. -// -// This is a slightly lower-level function; you will need to -// wire up support for the ACME challenges yourself. You can -// obtain a Config to help you do that by calling NewDefault(). -// -// You will need to ensure that you use a TLS config that gets -// certificates from this Config and that the HTTP and TLS-ALPN -// challenges can be solved. The easiest way to do this is to -// use NewDefault().TLSConfig() as your TLS config and to wrap -// your HTTP handler with NewDefault().HTTPChallengeHandler(). -// If you don't have an HTTP server, you will need to disable -// the HTTP challenge. -// -// If you already have a TLS config you want to use, you can -// simply set its GetCertificate field to -// NewDefault().GetCertificate. -// -// Calling this function signifies your acceptance to -// the CA's Subscriber Agreement and/or Terms of Service. -func Manage(domainNames []string) error { - Default.Agreed = true - return NewDefault().Manage(domainNames) -} - -// OnDemandConfig contains some state relevant for providing -// on-demand TLS. Important note: If you are using the -// MaxObtain property to limit the maximum number of certs -// to be issued, the count of how many certs were issued -// will be reset if this struct gets garbage-collected. -type OnDemandConfig struct { - // If set, this function will be the absolute - // authority on whether the hostname (according - // to SNI) is allowed to try to get a cert. - DecisionFunc func(name string) error - - // If no DecisionFunc is set, this whitelist - // is the absolute authority as to whether - // a certificate should be allowed to be tried. - // Names are compared against SNI value. - HostWhitelist []string - - // If no DecisionFunc or HostWhitelist are set, - // then an HTTP request will be made to AskURL - // to determine if a certificate should be - // obtained. If the request fails or the response - // is anything other than 2xx status code, the - // issuance will be denied. - AskURL *url.URL - - // If no DecisionFunc, HostWhitelist, or AskURL - // are set, then only this many certificates may - // be obtained on-demand; this field is required - // if all others are empty, otherwise, all cert - // issuances will fail. - MaxObtain int32 - - // The number of certificates that have been issued on-demand - // by this config. It is only safe to modify this count atomically. - // If it reaches MaxObtain, on-demand issuances must fail. - // Note that this will necessarily be reset to 0 if the - // struct leaves scope and/or gets garbage-collected. - obtainedCount int32 -} - -// Allowed returns whether the issuance for name is allowed according to o. -func (o *OnDemandConfig) Allowed(name string) error { - // The decision function has absolute authority, if set - if o.DecisionFunc != nil { - return o.DecisionFunc(name) - } - - // Otherwise, the host whitelist has decision authority - if len(o.HostWhitelist) > 0 { - return o.checkWhitelistForObtainingNewCerts(name) - } - - // Otherwise, a URL is checked for permission to issue this cert - if o.AskURL != nil { - return o.checkURLForObtainingNewCerts(name) - } - - // Otherwise use the limit defined by the "max_certs" setting - return o.checkLimitsForObtainingNewCerts(name) -} - -func (o *OnDemandConfig) whitelistContains(name string) bool { - for _, n := range o.HostWhitelist { - if strings.ToLower(n) == strings.ToLower(name) { - return true - } - } - return false -} - -func (o *OnDemandConfig) checkWhitelistForObtainingNewCerts(name string) error { - if !o.whitelistContains(name) { - return fmt.Errorf("%s: name is not whitelisted", name) - } - return nil -} - -func (o *OnDemandConfig) checkURLForObtainingNewCerts(name string) error { - client := http.Client{ - Timeout: 10 * time.Second, - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return fmt.Errorf("following http redirects is not allowed") - }, - } - - // Copy the URL from the config in order to modify it for this request - askURL := new(url.URL) - *askURL = *o.AskURL - - query := askURL.Query() - query.Set("domain", name) - askURL.RawQuery = query.Encode() - - resp, err := client.Get(askURL.String()) - if err != nil { - return fmt.Errorf("error checking %v to deterine if certificate for hostname '%s' should be allowed: %v", o.AskURL, name, err) - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return fmt.Errorf("certificate for hostname '%s' not allowed, non-2xx status code %d returned from %v", name, resp.StatusCode, o.AskURL) - } - - return nil -} - -// checkLimitsForObtainingNewCerts checks to see if name can be issued right -// now according the maximum count defined in the configuration. If a non-nil -// error is returned, do not issue a new certificate for name. -func (o *OnDemandConfig) checkLimitsForObtainingNewCerts(name string) error { - if o.MaxObtain == 0 { - return fmt.Errorf("%s: no certificates allowed to be issued on-demand", name) - } - - // User can set hard limit for number of certs for the process to issue - if o.MaxObtain > 0 && - atomic.LoadInt32(&o.obtainedCount) >= o.MaxObtain { - return fmt.Errorf("%s: maximum certificates issued (%d)", name, o.MaxObtain) - } - - // Make sure name hasn't failed a challenge recently - failedIssuanceMu.RLock() - when, ok := failedIssuance[name] - failedIssuanceMu.RUnlock() - if ok { - return fmt.Errorf("%s: throttled; refusing to issue cert since last attempt on %s failed", name, when.String()) - } - - // Make sure, if we've issued a few certificates already, that we haven't - // issued any recently - lastIssueTimeMu.Lock() - since := time.Since(lastIssueTime) - lastIssueTimeMu.Unlock() - if atomic.LoadInt32(&o.obtainedCount) >= 10 && since < 10*time.Minute { - return fmt.Errorf("%s: throttled; last certificate was obtained %v ago", name, since) - } - - // Good to go 👠- return nil -} - -// failedIssuance is a set of names that we recently failed to get a -// certificate for from the ACME CA. They are removed after some time. -// When a name is in this map, do not issue a certificate for it on-demand. -var failedIssuance = make(map[string]time.Time) -var failedIssuanceMu sync.RWMutex - -// lastIssueTime records when we last obtained a certificate successfully. -// If this value is recent, do not make any on-demand certificate requests. -var lastIssueTime time.Time -var lastIssueTimeMu sync.Mutex - -// isLoopback returns true if the hostname of addr looks -// explicitly like a common local hostname. addr must only -// be a host or a host:port combination. -func isLoopback(addr string) bool { - host, _, err := net.SplitHostPort(strings.ToLower(addr)) - if err != nil { - host = addr // happens if the addr is only a hostname - } - return host == "localhost" || - strings.Trim(host, "[]") == "::1" || - strings.HasPrefix(host, "127.") -} - -// isInternal returns true if the IP of addr -// belongs to a private network IP range. addr -// must only be an IP or an IP:port combination. -// Loopback addresses are considered false. -func isInternal(addr string) bool { - privateNetworks := []string{ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - "fc00::/7", - } - host, _, err := net.SplitHostPort(addr) - if err != nil { - host = addr // happens if the addr is just a hostname, missing port - // if we encounter an error, the brackets need to be stripped - // because SplitHostPort didn't do it for us - host = strings.Trim(host, "[]") - } - ip := net.ParseIP(host) - if ip == nil { - return false - } - for _, privateNetwork := range privateNetworks { - _, ipnet, _ := net.ParseCIDR(privateNetwork) - if ipnet.Contains(ip) { - return true - } - } - return false -} - -// Default contains the package defaults for the -// various Config fields. This is used as a template -// when creating your own Configs with New(), and it -// is also used as the Config by all the high-level -// functions in this package. -// -// The fields of this value will be used for Config -// fields which are unset. Feel free to modify these -// defaults, but do not use this Config by itself: it -// is only a template. Valid configurations can be -// obtained by calling New() (if you have your own -// certificate cache) or NewDefault() (if you only -// need a single config and want to use the default -// cache). This is the only Config which can access -// the default certificate cache. -var Default = Config{ - CA: LetsEncryptProductionCA, - RenewDurationBefore: DefaultRenewDurationBefore, - RenewDurationBeforeAtStartup: DefaultRenewDurationBeforeAtStartup, - KeyType: certcrypto.EC256, - Storage: defaultFileStorage, -} - -const ( - // HTTPChallengePort is the officially-designated port for - // the HTTP challenge according to the ACME spec. - HTTPChallengePort = 80 - - // TLSALPNChallengePort is the officially-designated port for - // the TLS-ALPN challenge according to the ACME spec. - TLSALPNChallengePort = 443 -) - -// Some well-known CA endpoints available to use. -const ( - LetsEncryptStagingCA = "https://acme-staging-v02.api.letsencrypt.org/directory" - LetsEncryptProductionCA = "https://acme-v02.api.letsencrypt.org/directory" -) - -// Port variables must remain their defaults unless you -// forward packets from the defaults to whatever these -// are set to; otherwise ACME challenges will fail. -var ( - // HTTPPort is the port on which to serve HTTP - // and, by extension, the HTTP challenge (unless - // Default.AltHTTPPort is set). - HTTPPort = 80 - - // HTTPSPort is the port on which to serve HTTPS - // and, by extension, the TLS-ALPN challenge - // (unless Default.AltTLSALPNPort is set). - HTTPSPort = 443 -) - -// Variables for conveniently serving HTTPS. -var ( - httpLn, httpsLn net.Listener - lnMu sync.Mutex - httpWg sync.WaitGroup -) diff --git a/vendor/github.com/mholt/certmagic/client.go b/vendor/github.com/mholt/certmagic/client.go deleted file mode 100644 index b1e417a31..000000000 --- a/vendor/github.com/mholt/certmagic/client.go +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "fmt" - "log" - "net" - "net/url" - "strings" - "sync" - "time" - - "github.com/go-acme/lego/certificate" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/challenge/http01" - "github.com/go-acme/lego/challenge/tlsalpn01" - "github.com/go-acme/lego/lego" - "github.com/go-acme/lego/registration" -) - -// acmeMu ensures that only one ACME challenge occurs at a time. -var acmeMu sync.Mutex - -// acmeClient is a wrapper over acme.Client with -// some custom state attached. It is used to obtain, -// renew, and revoke certificates with ACME. -type acmeClient struct { - config *Config - acmeClient *lego.Client -} - -// listenerAddressInUse returns true if a TCP connection -// can be made to addr within a short time interval. -func listenerAddressInUse(addr string) bool { - conn, err := net.DialTimeout("tcp", addr, 250*time.Millisecond) - if err == nil { - conn.Close() - } - return err == nil -} - -func (cfg *Config) newManager(interactive bool) (Manager, error) { - if cfg.NewManager != nil { - return cfg.NewManager(interactive) - } - return cfg.newACMEClient(interactive) -} - -func (cfg *Config) newACMEClient(interactive bool) (*acmeClient, error) { - // look up or create the user account - leUser, err := cfg.getUser(cfg.Email) - if err != nil { - return nil, err - } - - // ensure key type and timeout are set - keyType := cfg.KeyType - if keyType == "" { - keyType = Default.KeyType - } - certObtainTimeout := cfg.CertObtainTimeout - if certObtainTimeout == 0 { - certObtainTimeout = Default.CertObtainTimeout - } - - // ensure CA URL (directory endpoint) is set - caURL := Default.CA - if cfg.CA != "" { - caURL = cfg.CA - } - - // ensure endpoint is secure (assume HTTPS if scheme is missing) - if !strings.Contains(caURL, "://") { - caURL = "https://" + caURL - } - u, err := url.Parse(caURL) - if err != nil { - return nil, err - } - - if u.Scheme != "https" && !isLoopback(u.Host) && !isInternal(u.Host) { - return nil, fmt.Errorf("%s: insecure CA URL (HTTPS required)", caURL) - } - - clientKey := caURL + leUser.Email + string(keyType) - - // if an underlying client with this configuration already exists, reuse it - // TODO: Could this be a global cache instead, perhaps? - cfg.acmeClientsMu.Lock() - client, ok := cfg.acmeClients[clientKey] - if !ok { - // the client facilitates our communication with the CA server - legoCfg := lego.NewConfig(&leUser) - legoCfg.CADirURL = caURL - legoCfg.UserAgent = buildUAString() - legoCfg.HTTPClient.Timeout = HTTPTimeout - legoCfg.Certificate = lego.CertificateConfig{ - KeyType: keyType, - Timeout: certObtainTimeout, - } - client, err = lego.NewClient(legoCfg) - if err != nil { - cfg.acmeClientsMu.Unlock() - return nil, err - } - cfg.acmeClients[clientKey] = client - } - cfg.acmeClientsMu.Unlock() - - // if not registered, the user must register an account - // with the CA and agree to terms - if leUser.Registration == nil { - if interactive { // can't prompt a user who isn't there - termsURL := client.GetToSURL() - if !cfg.Agreed && termsURL != "" { - cfg.Agreed = cfg.askUserAgreement(client.GetToSURL()) - } - if !cfg.Agreed && termsURL != "" { - return nil, fmt.Errorf("user must agree to CA terms") - } - } - - reg, err := client.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: cfg.Agreed}) - if err != nil { - return nil, fmt.Errorf("registration error: %v", err) - } - leUser.Registration = reg - - // persist the user to storage - err = cfg.saveUser(leUser) - if err != nil { - return nil, fmt.Errorf("could not save user: %v", err) - } - } - - c := &acmeClient{ - config: cfg, - acmeClient: client, - } - - if cfg.DNSProvider == nil { - // Use HTTP and TLS-ALPN challenges by default - - // figure out which ports we'll be serving the challenges on - useHTTPPort := HTTPChallengePort - useTLSALPNPort := TLSALPNChallengePort - if HTTPPort > 0 && HTTPPort != HTTPChallengePort { - useHTTPPort = HTTPPort - } - if HTTPSPort > 0 && HTTPSPort != TLSALPNChallengePort { - useTLSALPNPort = HTTPSPort - } - if cfg.AltHTTPPort > 0 { - useHTTPPort = cfg.AltHTTPPort - } - if cfg.AltTLSALPNPort > 0 { - useTLSALPNPort = cfg.AltTLSALPNPort - } - - // If this machine is already listening on the HTTP or TLS-ALPN port - // designated for the challenges, then we need to handle the challenges - // a little differently: for HTTP, we will answer the challenge request - // using our own HTTP handler (the HandleHTTPChallenge function - this - // works only because challenge info is written to storage associated - // with cfg when the challenge is initiated); for TLS-ALPN, we will add - // the challenge cert to our cert cache and serve it up during the - // handshake. As for the default solvers... we are careful to honor the - // listener bind preferences by using cfg.ListenHost. - var httpSolver, alpnSolver challenge.Provider - httpSolver = http01.NewProviderServer(cfg.ListenHost, fmt.Sprintf("%d", useHTTPPort)) - alpnSolver = tlsalpn01.NewProviderServer(cfg.ListenHost, fmt.Sprintf("%d", useTLSALPNPort)) - if listenerAddressInUse(net.JoinHostPort(cfg.ListenHost, fmt.Sprintf("%d", useHTTPPort))) { - httpSolver = nil - } - if listenerAddressInUse(net.JoinHostPort(cfg.ListenHost, fmt.Sprintf("%d", useTLSALPNPort))) { - alpnSolver = tlsALPNSolver{certCache: cfg.certCache} - } - - // because of our nifty Storage interface, we can distribute the HTTP and - // TLS-ALPN challenges across all instances that share the same storage - - // in fact, this is required now for successful solving of the HTTP challenge - // if the port is already in use, since we must write the challenge info - // to storage for the HTTPChallengeHandler to solve it successfully - c.acmeClient.Challenge.SetHTTP01Provider(distributedSolver{ - config: cfg, - providerServer: httpSolver, - }) - c.acmeClient.Challenge.SetTLSALPN01Provider(distributedSolver{ - config: cfg, - providerServer: alpnSolver, - }) - - // disable any challenges that should not be used - if cfg.DisableHTTPChallenge { - c.acmeClient.Challenge.Remove(challenge.HTTP01) - } - if cfg.DisableTLSALPNChallenge { - c.acmeClient.Challenge.Remove(challenge.TLSALPN01) - } - } else { - // Otherwise, use DNS challenge exclusively - c.acmeClient.Challenge.Remove(challenge.HTTP01) - c.acmeClient.Challenge.Remove(challenge.TLSALPN01) - c.acmeClient.Challenge.SetDNS01Provider(cfg.DNSProvider) - } - - return c, nil -} - -// lockKey returns a key for a lock that is specific to the operation -// named op being performed related to domainName and this config's CA. -func (cfg *Config) lockKey(op, domainName string) string { - return fmt.Sprintf("%s_%s_%s", op, domainName, cfg.CA) -} - -// Obtain obtains a single certificate for name. It stores the certificate -// on the disk if successful. This function is safe for concurrent use. -// -// Our storage mechanism only supports one name per certificate, so this -// function (along with Renew and Revoke) only accepts one domain as input. -// It could be easily modified to support SAN certificates if our storage -// mechanism is upgraded later, but that will increase logical complexity -// in other areas. -// -// Callers who have access to a Config value should use the ObtainCert -// method on that instead of this lower-level method. -func (c *acmeClient) Obtain(name string) error { - // ensure idempotency of the obtain operation for this name - lockKey := c.config.lockKey("cert_acme", name) - err := c.config.Storage.Lock(lockKey) - if err != nil { - return err - } - defer func() { - if err := c.config.Storage.Unlock(lockKey); err != nil { - log.Printf("[ERROR][%s] Obtain: Unable to unlock '%s': %v", name, lockKey, err) - } - }() - - // check if obtain is still needed -- might have - // been obtained during lock - if c.config.storageHasCertResources(name) { - log.Printf("[INFO][%s] Obtain: Certificate already exists in storage", name) - return nil - } - - for attempts := 0; attempts < 2; attempts++ { - request := certificate.ObtainRequest{ - Domains: []string{name}, - Bundle: true, - MustStaple: c.config.MustStaple, - } - acmeMu.Lock() - certificate, err := c.acmeClient.Certificate.Obtain(request) - acmeMu.Unlock() - if err != nil { - return fmt.Errorf("[%s] failed to obtain certificate: %s", name, err) - } - - // double-check that we actually got a certificate, in case there's a bug upstream (see issue mholt/caddy#2121) - if certificate.Domain == "" || certificate.Certificate == nil { - return fmt.Errorf("returned certificate was empty; probably an unchecked error obtaining it") - } - - // Success - immediately save the certificate resource - err = c.config.saveCertResource(certificate) - if err != nil { - return fmt.Errorf("error saving assets for %v: %v", name, err) - } - - break - } - - if c.config.OnEvent != nil { - c.config.OnEvent("acme_cert_obtained", name) - } - - return nil -} - -// Renew renews the managed certificate for name. It puts the renewed -// certificate into storage (not the cache). This function is safe for -// concurrent use. -// -// Callers who have access to a Config value should use the RenewCert -// method on that instead of this lower-level method. -func (c *acmeClient) Renew(name string) error { - // ensure idempotency of the renew operation for this name - lockKey := c.config.lockKey("cert_acme", name) - err := c.config.Storage.Lock(lockKey) - if err != nil { - return err - } - defer func() { - if err := c.config.Storage.Unlock(lockKey); err != nil { - log.Printf("[ERROR][%s] Renew: Unable to unlock '%s': %v", name, lockKey, err) - } - }() - - // Prepare for renewal (load PEM cert, key, and meta) - certRes, err := c.config.loadCertResource(name) - if err != nil { - return err - } - - // Check if renew is still needed - might have been renewed while waiting for lock - if !c.config.managedCertNeedsRenewal(certRes) { - log.Printf("[INFO][%s] Renew: Certificate appears to have been renewed already", name) - return nil - } - - // Perform renewal and retry if necessary, but not too many times. - var newCertMeta *certificate.Resource - var success bool - for attempts := 0; attempts < 2; attempts++ { - acmeMu.Lock() - newCertMeta, err = c.acmeClient.Certificate.Renew(certRes, true, c.config.MustStaple) - acmeMu.Unlock() - if err == nil { - // double-check that we actually got a certificate; check a couple fields, just in case - if newCertMeta == nil || newCertMeta.Domain == "" || newCertMeta.Certificate == nil { - err = fmt.Errorf("returned certificate was empty; probably an unchecked error renewing it") - } else { - success = true - break - } - } - - // wait a little bit and try again - wait := 10 * time.Second - log.Printf("[ERROR] Renewing [%v]: %v; trying again in %s", name, err, wait) - time.Sleep(wait) - } - - if !success { - return fmt.Errorf("too many renewal attempts; last error: %v", err) - } - - if c.config.OnEvent != nil { - c.config.OnEvent("acme_cert_renewed", name) - } - - return c.config.saveCertResource(newCertMeta) -} - -// Revoke revokes the certificate for name and deletes -// it from storage. -func (c *acmeClient) Revoke(name string) error { - if !c.config.Storage.Exists(StorageKeys.SitePrivateKey(c.config.CA, name)) { - return fmt.Errorf("private key not found for %s", name) - } - - certRes, err := c.config.loadCertResource(name) - if err != nil { - return err - } - - err = c.acmeClient.Certificate.Revoke(certRes.Certificate) - if err != nil { - return err - } - - if c.config.OnEvent != nil { - c.config.OnEvent("acme_cert_revoked", name) - } - - err = c.config.Storage.Delete(StorageKeys.SiteCert(c.config.CA, name)) - if err != nil { - return fmt.Errorf("certificate revoked, but unable to delete certificate file: %v", err) - } - err = c.config.Storage.Delete(StorageKeys.SitePrivateKey(c.config.CA, name)) - if err != nil { - return fmt.Errorf("certificate revoked, but unable to delete private key: %v", err) - } - err = c.config.Storage.Delete(StorageKeys.SiteMeta(c.config.CA, name)) - if err != nil { - return fmt.Errorf("certificate revoked, but unable to delete certificate metadata: %v", err) - } - - return nil -} - -func buildUAString() string { - ua := "CertMagic" - if UserAgent != "" { - ua += " " + UserAgent - } - return ua -} - -// Some default values passed down to the underlying lego client. -var ( - UserAgent string - HTTPTimeout = 30 * time.Second -) - -// Interface guard -var _ Manager = (*acmeClient)(nil) diff --git a/vendor/github.com/mholt/certmagic/config.go b/vendor/github.com/mholt/certmagic/config.go deleted file mode 100644 index 12ff698d7..000000000 --- a/vendor/github.com/mholt/certmagic/config.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "crypto/tls" - "fmt" - "sync" - "time" - - "github.com/go-acme/lego/certcrypto" - "github.com/go-acme/lego/certificate" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/challenge/tlsalpn01" - "github.com/go-acme/lego/lego" -) - -// Config configures a certificate manager instance. -// An empty Config is not valid: use New() to obtain -// a valid Config. -type Config struct { - // The endpoint of the directory for the ACME - // CA we are to use - CA string - - // The email address to use when creating or - // selecting an existing ACME server account - Email string - - // Set to true if agreed to the CA's - // subscriber agreement - Agreed bool - - // Disable all HTTP challenges - DisableHTTPChallenge bool - - // Disable all TLS-ALPN challenges - DisableTLSALPNChallenge bool - - // How long before expiration to renew certificates - RenewDurationBefore time.Duration - - // How long before expiration to require a renewed - // certificate when in interactive mode, like when - // the program is first starting up (see - // mholt/caddy#1680). A wider window between - // RenewDurationBefore and this value will suppress - // errors under duress (bad) but hopefully this duration - // will give it enough time for the blockage to be - // relieved. - RenewDurationBeforeAtStartup time.Duration - - // An optional event callback clients can set - // to subscribe to certain things happening - // internally by this config; invocations are - // synchronous, so make them return quickly! - OnEvent func(event string, data interface{}) - - // The host (ONLY the host, not port) to listen - // on if necessary to start a listener to solve - // an ACME challenge - ListenHost string - - // The alternate port to use for the ACME HTTP - // challenge; if non-empty, this port will be - // used instead of HTTPChallengePort to spin up - // a listener for the HTTP challenge - AltHTTPPort int - - // The alternate port to use for the ACME - // TLS-ALPN challenge; the system must forward - // TLSALPNChallengePort to this port for - // challenge to succeed - AltTLSALPNPort int - - // The DNS provider to use when solving the - // ACME DNS challenge - DNSProvider challenge.Provider - - // The type of key to use when generating - // certificates - KeyType certcrypto.KeyType - - // The maximum amount of time to allow for - // obtaining a certificate. If empty, the - // default from the underlying lego lib is - // used. If set, it must not be too low so - // as to cancel orders too early, running - // the risk of rate limiting. - CertObtainTimeout time.Duration - - // DefaultServerName specifies a server name - // to use when choosing a certificate if the - // ClientHello's ServerName field is empty - DefaultServerName string - - // The state needed to operate on-demand TLS - OnDemand *OnDemandConfig - - // Add the must staple TLS extension to the - // CSR generated by lego/acme - MustStaple bool - - // The storage to access when storing or - // loading TLS assets - Storage Storage - - // NewManager returns a new Manager. If nil, - // an ACME client will be created and used. - NewManager func(interactive bool) (Manager, error) - - // Pointer to the in-memory certificate cache - certCache *Cache - - // Map of client config key to ACME clients - // so they can be reused - // TODO: It might be better if these were globally cached, rather than per-config, which are ephemeral... but maybe evict them after a certain time, like 1 day or something - acmeClients map[string]*lego.Client - acmeClientsMu *sync.Mutex -} - -// NewDefault makes a valid config based on the package -// Default config. Most users will call this function -// instead of New() since most use cases require only a -// single config for any and all certificates. -// -// If your requirements are more advanced (for example, -// multiple configs depending on the certificate), then use -// New() instead. (You will need to make your own Cache -// first.) If you only need a single Config to manage your -// certs (even if that config changes, as long as it is the -// only one), customize the Default package variable before -// calling NewDefault(). -// -// All calls to NewDefault() will return configs that use the -// same, default certificate cache. All configs returned -// by NewDefault() are based on the values of the fields of -// Default at the time it is called. -func NewDefault() *Config { - defaultCacheMu.Lock() - if defaultCache == nil { - defaultCache = NewCache(CacheOptions{ - // the cache will likely need to renew certificates, - // so it will need to know how to do that, which - // depends on the certificate being managed and which - // can change during the lifetime of the cache; this - // callback makes it possible to get the latest and - // correct config with which to manage the cert, - // but if the user does not provide one, we can only - // assume that we are to use the default config - GetConfigForCert: func(Certificate) (Config, error) { - return Default, nil - }, - }) - } - certCache := defaultCache - defaultCacheMu.Unlock() - - return newWithCache(certCache, Default) -} - -// New makes a new, valid config based on cfg and -// uses the provided certificate cache. certCache -// MUST NOT be nil or this function will panic. -// -// Use this method when you have an advanced use case -// that requires a custom certificate cache and config -// that may differ from the Default. For example, if -// not all certificates are managed/renewed the same -// way, you need to make your own Cache value with a -// GetConfigForCert callback that returns the correct -// configuration for each certificate. However, for -// the vast majority of cases, there will be only a -// single Config, thus the default cache (which always -// uses the default Config) and default config will -// suffice, and you should use New() instead. -func New(certCache *Cache, cfg Config) *Config { - if certCache == nil { - panic("a certificate cache is required") - } - if certCache.options.GetConfigForCert == nil { - panic("cache must have GetConfigForCert set in its options") - } - return newWithCache(certCache, cfg) -} - -// newWithCache ensures that cfg is a valid config by populating -// zero-value fields from the Default Config. If certCache is -// nil, this function panics. -func newWithCache(certCache *Cache, cfg Config) *Config { - if certCache == nil { - panic("cannot make a valid config without a pointer to a certificate cache") - } - - // fill in default values - if cfg.CA == "" { - cfg.CA = Default.CA - } - if cfg.Email == "" { - cfg.Email = Default.Email - } - if cfg.OnDemand == nil { - cfg.OnDemand = Default.OnDemand - } - if !cfg.Agreed { - cfg.Agreed = Default.Agreed - } - if !cfg.DisableHTTPChallenge { - cfg.DisableHTTPChallenge = Default.DisableHTTPChallenge - } - if !cfg.DisableTLSALPNChallenge { - cfg.DisableTLSALPNChallenge = Default.DisableTLSALPNChallenge - } - if cfg.RenewDurationBefore == 0 { - cfg.RenewDurationBefore = Default.RenewDurationBefore - } - if cfg.RenewDurationBeforeAtStartup == 0 { - cfg.RenewDurationBeforeAtStartup = Default.RenewDurationBeforeAtStartup - } - if cfg.OnEvent == nil { - cfg.OnEvent = Default.OnEvent - } - if cfg.ListenHost == "" { - cfg.ListenHost = Default.ListenHost - } - if cfg.AltHTTPPort == 0 { - cfg.AltHTTPPort = Default.AltHTTPPort - } - if cfg.AltTLSALPNPort == 0 { - cfg.AltTLSALPNPort = Default.AltTLSALPNPort - } - if cfg.DNSProvider == nil { - cfg.DNSProvider = Default.DNSProvider - } - if cfg.KeyType == "" { - cfg.KeyType = Default.KeyType - } - if cfg.CertObtainTimeout == 0 { - cfg.CertObtainTimeout = Default.CertObtainTimeout - } - if cfg.DefaultServerName == "" { - cfg.DefaultServerName = Default.DefaultServerName - } - if cfg.OnDemand == nil { - cfg.OnDemand = Default.OnDemand - } - if !cfg.MustStaple { - cfg.MustStaple = Default.MustStaple - } - if cfg.Storage == nil { - cfg.Storage = Default.Storage - } - if cfg.NewManager == nil { - cfg.NewManager = Default.NewManager - } - - // absolutely don't allow a nil storage, - // because that would make almost anything - // a config can do pointless - if cfg.Storage == nil { - cfg.Storage = defaultFileStorage - } - - // ensure the unexported fields are valid - cfg.certCache = certCache - cfg.acmeClients = make(map[string]*lego.Client) - cfg.acmeClientsMu = new(sync.Mutex) - - return &cfg -} - -// Manage causes the certificates for domainNames to be managed -// according to cfg. If cfg is enabled for OnDemand, then this -// simply whitelists the domain names. Otherwise, the certificate(s) -// for each name are loaded from storage or obtained from the CA; -// and if loaded from storage, renewed if they are expiring or -// expired. It then caches the certificate in memory and is -// prepared to serve them up during TLS handshakes. -func (cfg *Config) Manage(domainNames []string) error { - for _, domainName := range domainNames { - if !HostQualifies(domainName) { - return fmt.Errorf("name does not qualify for automatic certificate management: %s", domainName) - } - - // if on-demand is configured, simply whitelist this name - if cfg.OnDemand != nil { - if !cfg.OnDemand.whitelistContains(domainName) { - cfg.OnDemand.HostWhitelist = append(cfg.OnDemand.HostWhitelist, domainName) - } - continue - } - - // try loading an existing certificate; if it doesn't - // exist yet, obtain one and try loading it again - cert, err := cfg.CacheManagedCertificate(domainName) - if err != nil { - if _, ok := err.(ErrNotExist); ok { - // if it doesn't exist, get it, then try loading it again - err := cfg.ObtainCert(domainName, false) - if err != nil { - return fmt.Errorf("%s: obtaining certificate: %v", domainName, err) - } - cert, err = cfg.CacheManagedCertificate(domainName) - if err != nil { - return fmt.Errorf("%s: caching certificate after obtaining it: %v", domainName, err) - } - continue - } - return fmt.Errorf("%s: caching certificate: %v", domainName, err) - } - - // for existing certificates, make sure it is renewed - if cert.NeedsRenewal(cfg) { - err := cfg.RenewCert(domainName, false) - if err != nil { - return fmt.Errorf("%s: renewing certificate: %v", domainName, err) - } - } - } - - return nil -} - -// ObtainCert obtains a certificate for name using cfg, as long -// as a certificate does not already exist in storage for that -// name. The name must qualify and cfg must be flagged as Managed. -// This function is a no-op if storage already has a certificate -// for name. -// -// It only obtains and stores certificates (and their keys), -// it does not load them into memory. If interactive is true, -// the user may be shown a prompt. -func (cfg *Config) ObtainCert(name string, interactive bool) error { - if cfg.storageHasCertResources(name) { - return nil - } - skip, err := cfg.preObtainOrRenewChecks(name, interactive) - if err != nil { - return err - } - if skip { - return nil - } - manager, err := cfg.newManager(interactive) - if err != nil { - return err - } - return manager.Obtain(name) -} - -// RenewCert renews the certificate for name using cfg. It stows the -// renewed certificate and its assets in storage if successful. -func (cfg *Config) RenewCert(name string, interactive bool) error { - skip, err := cfg.preObtainOrRenewChecks(name, interactive) - if err != nil { - return err - } - if skip { - return nil - } - manager, err := cfg.newManager(interactive) - if err != nil { - return err - } - return manager.Renew(name) -} - -// RevokeCert revokes the certificate for domain via ACME protocol. -func (cfg *Config) RevokeCert(domain string, interactive bool) error { - manager, err := cfg.newManager(interactive) - if err != nil { - return err - } - return manager.Revoke(domain) -} - -// TLSConfig is an opinionated method that returns a -// recommended, modern TLS configuration that can be -// used to configure TLS listeners, which also supports -// the TLS-ALPN challenge and serves up certificates -// managed by cfg. -// -// Unlike the package TLS() function, this method does -// not, by itself, enable certificate management for -// any domain names. -// -// Feel free to further customize the returned tls.Config, -// but do not mess with the GetCertificate or NextProtos -// fields unless you know what you're doing, as they're -// necessary to solve the TLS-ALPN challenge. -func (cfg *Config) TLSConfig() *tls.Config { - return &tls.Config{ - // these two fields necessary for TLS-ALPN challenge - GetCertificate: cfg.GetCertificate, - NextProtos: []string{"h2", "http/1.1", tlsalpn01.ACMETLS1Protocol}, - - // the rest recommended for modern TLS servers - MinVersion: tls.VersionTLS12, - CurvePreferences: []tls.CurveID{ - tls.X25519, - tls.CurveP256, - }, - CipherSuites: preferredDefaultCipherSuites(), - PreferServerCipherSuites: true, - } -} - -// preObtainOrRenewChecks perform a few simple checks before -// obtaining or renewing a certificate with ACME, and returns -// whether this name should be skipped (like if it's not -// managed TLS) as well as any error. It ensures that the -// config is Managed, that the name qualifies for a certificate, -// and that an email address is available. -func (cfg *Config) preObtainOrRenewChecks(name string, allowPrompts bool) (bool, error) { - if !HostQualifies(name) { - return true, nil - } - - err := cfg.getEmail(allowPrompts) - if err != nil { - return false, err - } - - return false, nil -} - -// storageHasCertResources returns true if the storage -// associated with cfg's certificate cache has all the -// resources related to the certificate for domain: the -// certificate, the private key, and the metadata. -func (cfg *Config) storageHasCertResources(domain string) bool { - certKey := StorageKeys.SiteCert(cfg.CA, domain) - keyKey := StorageKeys.SitePrivateKey(cfg.CA, domain) - metaKey := StorageKeys.SiteMeta(cfg.CA, domain) - return cfg.Storage.Exists(certKey) && - cfg.Storage.Exists(keyKey) && - cfg.Storage.Exists(metaKey) -} - -// managedCertNeedsRenewal returns true if certRes is -// expiring soon or already expired, or if the process -// of checking the expiration returned an error. -func (cfg *Config) managedCertNeedsRenewal(certRes certificate.Resource) bool { - cert, err := makeCertificate(certRes.Certificate, certRes.PrivateKey) - if err != nil { - return true - } - return cert.NeedsRenewal(cfg) -} - -// Manager is a type that can manage a certificate. -// They are usually very short-lived. -type Manager interface { - Obtain(name string) error - Renew(name string) error - Revoke(name string) error -} diff --git a/vendor/github.com/mholt/certmagic/crypto.go b/vendor/github.com/mholt/certmagic/crypto.go deleted file mode 100644 index c98878fd1..000000000 --- a/vendor/github.com/mholt/certmagic/crypto.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "encoding/json" - "encoding/pem" - "fmt" - "hash/fnv" - - "github.com/go-acme/lego/certificate" - "github.com/klauspost/cpuid" -) - -// encodePrivateKey marshals a EC or RSA private key into a PEM-encoded array of bytes. -func encodePrivateKey(key crypto.PrivateKey) ([]byte, error) { - var pemType string - var keyBytes []byte - switch key := key.(type) { - case *ecdsa.PrivateKey: - var err error - pemType = "EC" - keyBytes, err = x509.MarshalECPrivateKey(key) - if err != nil { - return nil, err - } - case *rsa.PrivateKey: - pemType = "RSA" - keyBytes = x509.MarshalPKCS1PrivateKey(key) - } - pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes} - return pem.EncodeToMemory(&pemKey), nil -} - -// decodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes. -func decodePrivateKey(keyPEMBytes []byte) (crypto.PrivateKey, error) { - keyBlock, _ := pem.Decode(keyPEMBytes) - switch keyBlock.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(keyBlock.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(keyBlock.Bytes) - } - return nil, fmt.Errorf("unknown private key type") -} - -// parseCertsFromPEMBundle parses a certificate bundle from top to bottom and returns -// a slice of x509 certificates. This function will error if no certificates are found. -func parseCertsFromPEMBundle(bundle []byte) ([]*x509.Certificate, error) { - var certificates []*x509.Certificate - var certDERBlock *pem.Block - for { - certDERBlock, bundle = pem.Decode(bundle) - if certDERBlock == nil { - break - } - if certDERBlock.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(certDERBlock.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } - } - if len(certificates) == 0 { - return nil, fmt.Errorf("no certificates found in bundle") - } - return certificates, nil -} - -// fastHash hashes input using a hashing algorithm that -// is fast, and returns the hash as a hex-encoded string. -// Do not use this for cryptographic purposes. -func fastHash(input []byte) string { - h := fnv.New32a() - h.Write(input) - return fmt.Sprintf("%x", h.Sum32()) -} - -// saveCertResource saves the certificate resource to disk. This -// includes the certificate file itself, the private key, and the -// metadata file. -func (cfg *Config) saveCertResource(cert *certificate.Resource) error { - metaBytes, err := json.MarshalIndent(&cert, "", "\t") - if err != nil { - return fmt.Errorf("encoding certificate metadata: %v", err) - } - - all := []keyValue{ - { - key: StorageKeys.SiteCert(cfg.CA, cert.Domain), - value: cert.Certificate, - }, - { - key: StorageKeys.SitePrivateKey(cfg.CA, cert.Domain), - value: cert.PrivateKey, - }, - { - key: StorageKeys.SiteMeta(cfg.CA, cert.Domain), - value: metaBytes, - }, - } - - return storeTx(cfg.Storage, all) -} - -func (cfg *Config) loadCertResource(domain string) (certificate.Resource, error) { - var certRes certificate.Resource - certBytes, err := cfg.Storage.Load(StorageKeys.SiteCert(cfg.CA, domain)) - if err != nil { - return certRes, err - } - keyBytes, err := cfg.Storage.Load(StorageKeys.SitePrivateKey(cfg.CA, domain)) - if err != nil { - return certRes, err - } - metaBytes, err := cfg.Storage.Load(StorageKeys.SiteMeta(cfg.CA, domain)) - if err != nil { - return certRes, err - } - err = json.Unmarshal(metaBytes, &certRes) - if err != nil { - return certRes, fmt.Errorf("decoding certificate metadata: %v", err) - } - certRes.Certificate = certBytes - certRes.PrivateKey = keyBytes - return certRes, nil -} - -// hashCertificateChain computes the unique hash of certChain, -// which is the chain of DER-encoded bytes. It returns the -// hex encoding of the hash. -func hashCertificateChain(certChain [][]byte) string { - h := sha256.New() - for _, certInChain := range certChain { - h.Write(certInChain) - } - return fmt.Sprintf("%x", h.Sum(nil)) -} - -// preferredDefaultCipherSuites returns an appropriate -// cipher suite to use depending on hardware support -// for AES-NI. -// -// See https://github.com/mholt/caddy/issues/1674 -func preferredDefaultCipherSuites() []uint16 { - if cpuid.CPU.AesNi() { - return defaultCiphersPreferAES - } - return defaultCiphersPreferChaCha -} - -var ( - defaultCiphersPreferAES = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - } - defaultCiphersPreferChaCha = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - } -) diff --git a/vendor/github.com/mholt/certmagic/filestorage.go b/vendor/github.com/mholt/certmagic/filestorage.go deleted file mode 100644 index 0de8f746e..000000000 --- a/vendor/github.com/mholt/certmagic/filestorage.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "path" - "path/filepath" - "runtime" - "time" -) - -// FileStorage facilitates forming file paths derived from a root -// directory. It is used to get file paths in a consistent, -// cross-platform way or persisting ACME assets on the file system. -type FileStorage struct { - Path string -} - -// Exists returns true if key exists in fs. -func (fs *FileStorage) Exists(key string) bool { - _, err := os.Stat(fs.Filename(key)) - return !os.IsNotExist(err) -} - -// Store saves value at key. -func (fs *FileStorage) Store(key string, value []byte) error { - filename := fs.Filename(key) - err := os.MkdirAll(filepath.Dir(filename), 0700) - if err != nil { - return err - } - return ioutil.WriteFile(filename, value, 0600) -} - -// Load retrieves the value at key. -func (fs *FileStorage) Load(key string) ([]byte, error) { - contents, err := ioutil.ReadFile(fs.Filename(key)) - if os.IsNotExist(err) { - return nil, ErrNotExist(err) - } - return contents, nil -} - -// Delete deletes the value at key. -func (fs *FileStorage) Delete(key string) error { - err := os.Remove(fs.Filename(key)) - if os.IsNotExist(err) { - return ErrNotExist(err) - } - return err -} - -// List returns all keys that match prefix. -func (fs *FileStorage) List(prefix string, recursive bool) ([]string, error) { - var keys []string - walkPrefix := fs.Filename(prefix) - - err := filepath.Walk(walkPrefix, func(fpath string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info == nil { - return fmt.Errorf("%s: file info is nil", fpath) - } - if fpath == walkPrefix { - return nil - } - - suffix, err := filepath.Rel(walkPrefix, fpath) - if err != nil { - return fmt.Errorf("%s: could not make path relative: %v", fpath, err) - } - keys = append(keys, path.Join(prefix, suffix)) - - if !recursive && info.IsDir() { - return filepath.SkipDir - } - return nil - }) - - return keys, err -} - -// Stat returns information about key. -func (fs *FileStorage) Stat(key string) (KeyInfo, error) { - fi, err := os.Stat(fs.Filename(key)) - if os.IsNotExist(err) { - return KeyInfo{}, ErrNotExist(err) - } - if err != nil { - return KeyInfo{}, err - } - return KeyInfo{ - Key: key, - Modified: fi.ModTime(), - Size: fi.Size(), - IsTerminal: !fi.IsDir(), - }, nil -} - -// Filename returns the key as a path on the file -// system prefixed by fs.Path. -func (fs *FileStorage) Filename(key string) string { - return filepath.Join(fs.Path, filepath.FromSlash(key)) -} - -// Lock obtains a lock named by the given key. It blocks -// until the lock can be obtained or an error is returned. -func (fs *FileStorage) Lock(key string) error { - start := time.Now() - filename := fs.lockFilename(key) - - for { - err := createLockfile(filename) - if err == nil { - // got the lock, yay - return nil - } - if !os.IsExist(err) { - // unexpected error - return fmt.Errorf("creating lock file: %v", err) - } - - // lock file already exists - - info, err := os.Stat(filename) - switch { - case os.IsNotExist(err): - // must have just been removed; try again to create it - continue - - case err != nil: - // unexpected error - return fmt.Errorf("accessing lock file: %v", err) - - case fileLockIsStale(info): - // lock file is stale - delete it and try again to create one - log.Printf("[INFO][%s] Lock for '%s' is stale; removing then retrying: %s", - fs, key, filename) - removeLockfile(filename) - continue - - case time.Since(start) > staleLockDuration*2: - // should never happen, hopefully - return fmt.Errorf("possible deadlock: %s passed trying to obtain lock for %s", - time.Since(start), key) - - default: - // lockfile exists and is not stale; - // just wait a moment and try again - time.Sleep(fileLockPollInterval) - } - } -} - -// Unlock releases the lock for name. -func (fs *FileStorage) Unlock(key string) error { - return removeLockfile(fs.lockFilename(key)) -} - -func (fs *FileStorage) String() string { - return "FileStorage:" + fs.Path -} - -func (fs *FileStorage) lockFilename(key string) string { - return filepath.Join(fs.lockDir(), StorageKeys.Safe(key)+".lock") -} - -func (fs *FileStorage) lockDir() string { - return filepath.Join(fs.Path, "locks") -} - -func fileLockIsStale(info os.FileInfo) bool { - if info == nil { - return true - } - return time.Since(info.ModTime()) > staleLockDuration -} - -// createLockfile atomically creates the lockfile -// identified by filename. A successfully created -// lockfile should be removed with removeLockfile. -func createLockfile(filename string) error { - err := atomicallyCreateFile(filename) - if err == nil { - // if the app crashes in removeLockfile(), there is a - // small chance the .unlock file is left behind; it's - // safe to simply remove it as it's a guard against - // double removal of the .lock file. - os.Remove(filename + ".unlock") - } - return err -} - -// removeLockfile atomically removes filename, -// which must be a lockfile created by createLockfile. -// See discussion in PR #7 for more background: -// https://github.com/mholt/certmagic/pull/7 -func removeLockfile(filename string) error { - unlockFilename := filename + ".unlock" - if err := atomicallyCreateFile(unlockFilename); err != nil { - if os.IsExist(err) { - // another process is handling the unlocking - return nil - } - return err - } - defer os.Remove(unlockFilename) - return os.Remove(filename) -} - -// atomicallyCreateFile atomically creates the file -// identified by filename if it doesn't already exist. -func atomicallyCreateFile(filename string) error { - // no need to check this, we only really care about the file creation error - os.MkdirAll(filepath.Dir(filename), 0700) - f, err := os.OpenFile(filename, os.O_CREATE|os.O_EXCL, 0644) - if err == nil { - f.Close() - } - return err -} - -// homeDir returns the best guess of the current user's home -// directory from environment variables. If unknown, "." (the -// current directory) is returned instead. -func homeDir() string { - home := os.Getenv("HOME") - if home == "" && runtime.GOOS == "windows" { - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home = drive + path - if drive == "" || path == "" { - home = os.Getenv("USERPROFILE") - } - } - if home == "" { - home = "." - } - return home -} - -func dataDir() string { - baseDir := filepath.Join(homeDir(), ".local", "share") - if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" { - baseDir = xdgData - } - return filepath.Join(baseDir, "certmagic") -} - -// staleLockDuration is the length of time -// before considering a lock to be stale. -const staleLockDuration = 2 * time.Hour - -// fileLockPollInterval is how frequently -// to check the existence of a lock file -const fileLockPollInterval = 1 * time.Second - -var _ Storage = (*FileStorage)(nil) diff --git a/vendor/github.com/mholt/certmagic/go.mod b/vendor/github.com/mholt/certmagic/go.mod deleted file mode 100644 index 75559b41a..000000000 --- a/vendor/github.com/mholt/certmagic/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module github.com/mholt/certmagic - -require ( - github.com/cenkalti/backoff v2.1.1+incompatible // indirect - github.com/go-acme/lego v2.5.0+incompatible - github.com/klauspost/cpuid v1.2.0 - github.com/miekg/dns v1.1.3 // indirect - github.com/stretchr/testify v1.3.0 // indirect - golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b - golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 // indirect - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - golang.org/x/sys v0.0.0-20190124100055-b90733256f2e // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/square/go-jose.v2 v2.2.2 // indirect -) diff --git a/vendor/github.com/mholt/certmagic/go.sum b/vendor/github.com/mholt/certmagic/go.sum deleted file mode 100644 index 7093ad5b4..000000000 --- a/vendor/github.com/mholt/certmagic/go.sum +++ /dev/null @@ -1,29 +0,0 @@ -github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-acme/lego v2.3.1-0.20190318164254-3684cc738d37+incompatible h1:D8mQOFMowsqoVMibY3U+xeNmd83bdNPEjTScRiPgVoc= -github.com/go-acme/lego v2.3.1-0.20190318164254-3684cc738d37+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZqHKVnK8f0s= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/miekg/dns v1.1.3 h1:1g0r1IvskvgL8rR+AcHzUA+oFmGcQlaIm4IqakufeMM= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b h1:Elez2XeF2p9uyVj0yEUDqQ56NFcDtcBNkYP7yv8YbUE= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e h1:3GIlrlVLfkoipSReOMNAgApI0ajnalyLa/EZHHca/XI= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/vendor/github.com/mholt/certmagic/handshake.go b/vendor/github.com/mholt/certmagic/handshake.go deleted file mode 100644 index 51dd39fd1..000000000 --- a/vendor/github.com/mholt/certmagic/handshake.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "log" - "net" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/go-acme/lego/challenge/tlsalpn01" -) - -// GetCertificate gets a certificate to satisfy clientHello. In getting -// the certificate, it abides the rules and settings defined in the -// Config that matches clientHello.ServerName. It first checks the in- -// memory cache, then, if the config enables "OnDemand", it accesses -// disk, then accesses the network if it must obtain a new certificate -// via ACME. -// -// This method is safe for use as a tls.Config.GetCertificate callback. -func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - if cfg.OnEvent != nil { - cfg.OnEvent("tls_handshake_started", clientHello) - } - - // special case: serve up the certificate for a TLS-ALPN ACME challenge - // (https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05) - for _, proto := range clientHello.SupportedProtos { - if proto == tlsalpn01.ACMETLS1Protocol { - cfg.certCache.mu.RLock() - challengeCert, ok := cfg.certCache.cache[tlsALPNCertKeyName(clientHello.ServerName)] - cfg.certCache.mu.RUnlock() - if !ok { - // see if this challenge was started in a cluster; try distributed challenge solver - // (note that the tls.Config's ALPN settings must include the ACME TLS-ALPN challenge - // protocol string, otherwise a valid certificate will not solve the challenge; we - // should already have taken care of that when we made the tls.Config) - challengeCert, ok, err := cfg.tryDistributedChallengeSolver(clientHello) - if err != nil { - log.Printf("[ERROR][%s] TLS-ALPN: %v", clientHello.ServerName, err) - } - if ok { - return &challengeCert.Certificate, nil - } - - return nil, fmt.Errorf("no certificate to complete TLS-ALPN challenge for SNI name: %s", clientHello.ServerName) - } - return &challengeCert.Certificate, nil - } - } - - // get the certificate and serve it up - cert, err := cfg.getCertDuringHandshake(clientHello, true, true) - if err == nil && cfg.OnEvent != nil { - cfg.OnEvent("tls_handshake_completed", clientHello) - } - return &cert.Certificate, err -} - -// getCertificate gets a certificate that matches name from the in-memory -// cache, according to the lookup table associated with cfg. The lookup then -// points to a certificate in the Instance certificate cache. -// -// The name is expected to already be normalized (e.g. lowercased). -// -// If there is no exact match for name, it will be checked against names of -// the form '*.example.com' (wildcard certificates) according to RFC 6125. -// If a match is found, matched will be true. If no matches are found, matched -// will be false and a "default" certificate will be returned with defaulted -// set to true. If defaulted is false, then no certificates were available. -// -// The logic in this function is adapted from the Go standard library, -// which is by the Go Authors. -// -// This function is safe for concurrent use. -func (cfg *Config) getCertificate(hello *tls.ClientHelloInfo) (cert Certificate, matched, defaulted bool) { - name := NormalizedName(hello.ServerName) - - var ok bool - - if name == "" { - // if SNI is empty, prefer matching IP address - if hello.Conn != nil { - addr := hello.Conn.LocalAddr().String() - ip, _, err := net.SplitHostPort(addr) - if err == nil { - addr = ip - } - if cert, ok = cfg.certCache.getFirstMatchingCert(addr); ok { - matched = true - return - } - } - - // fall back to a "default" certificate, if specified - if cfg.DefaultServerName != "" { - normDefault := NormalizedName(cfg.DefaultServerName) - if cert, ok = cfg.certCache.getFirstMatchingCert(normDefault); ok { - defaulted = true - return - } - } - } else { - // if SNI is specified, try an exact match first - if cert, ok = cfg.certCache.getFirstMatchingCert(name); ok { - matched = true - return - } - - // try replacing labels in the name with - // wildcards until we get a match - labels := strings.Split(name, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - if cert, ok = cfg.certCache.getFirstMatchingCert(candidate); ok { - matched = true - return - } - } - - // check the certCache directly to see if the SNI name is - // already the key of the certificate it wants; this implies - // that the SNI can contain the hash of a specific cert - // (chain) it wants and we will still be able to serve it up - // (this behavior, by the way, could be controversial as to - // whether it complies with RFC 6066 about SNI, but I think - // it does, soooo...) - // (this is how we solved the former ACME TLS-SNI challenge) - cfg.certCache.mu.RLock() - directCert, ok := cfg.certCache.cache[name] - cfg.certCache.mu.RUnlock() - if ok { - cert = directCert - matched = true - return - } - } - - // otherwise, we're bingo on ammo; see issues - // mholt/caddy#2035 and mholt/caddy#1303 (any - // change to certificate matching behavior must - // account for hosts defined where the hostname - // is empty or a catch-all, like ":443" or - // "0.0.0.0:443") - - return -} - -// getCertDuringHandshake will get a certificate for hello. It first tries -// the in-memory cache. If no certificate for hello is in the cache, the -// config most closely corresponding to hello will be loaded. If that config -// allows it (OnDemand==true) and if loadIfNecessary == true, it goes to disk -// to load it into the cache and serve it. If it's not on disk and if -// obtainIfNecessary == true, the certificate will be obtained from the CA, -// cached, and served. If obtainIfNecessary is true, then loadIfNecessary -// must also be set to true. An error will be returned if and only if no -// certificate is available. -// -// This function is safe for concurrent use. -func (cfg *Config) getCertDuringHandshake(hello *tls.ClientHelloInfo, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) { - name := NormalizedName(hello.ServerName) - - // First check our in-memory cache to see if we've already loaded it - cert, matched, defaulted := cfg.getCertificate(hello) - if matched { - return cert, nil - } - - // If OnDemand is enabled, then we might be able to load or - // obtain a needed certificate - if cfg.OnDemand != nil && loadIfNecessary { - // Then check to see if we have one on disk - loadedCert, err := cfg.CacheManagedCertificate(name) - if err == nil { - loadedCert, err = cfg.handshakeMaintenance(hello, loadedCert) - if err != nil { - log.Printf("[ERROR] Maintaining newly-loaded certificate for %s: %v", name, err) - } - return loadedCert, nil - } - if obtainIfNecessary { - // By this point, we need to ask the CA for a certificate - - // Make sure the certificate should be obtained based on config - err := cfg.checkIfCertShouldBeObtained(name) - if err != nil { - return Certificate{}, err - } - - // Name has to qualify for a certificate - if !HostQualifies(name) { - return cert, fmt.Errorf("hostname '%s' does not qualify for certificate", name) - } - - // Obtain certificate from the CA - return cfg.obtainOnDemandCertificate(hello) - } - } - - // Fall back to the default certificate if there is one - if defaulted { - return cert, nil - } - - return Certificate{}, fmt.Errorf("no certificate available for '%s'", name) -} - -// checkIfCertShouldBeObtained checks to see if an on-demand tls certificate -// should be obtained for a given domain based upon the config settings. If -// a non-nil error is returned, do not issue a new certificate for name. -func (cfg *Config) checkIfCertShouldBeObtained(name string) error { - if cfg.OnDemand == nil { - return fmt.Errorf("not configured for on-demand certificate issuance") - } - return cfg.OnDemand.Allowed(name) -} - -// obtainOnDemandCertificate obtains a certificate for hello. -// If another goroutine has already started obtaining a cert for -// hello, it will wait and use what the other goroutine obtained. -// -// This function is safe for use by multiple concurrent goroutines. -func (cfg *Config) obtainOnDemandCertificate(hello *tls.ClientHelloInfo) (Certificate, error) { - name := NormalizedName(hello.ServerName) - - // We must protect this process from happening concurrently, so synchronize. - obtainCertWaitChansMu.Lock() - wait, ok := obtainCertWaitChans[name] - if ok { - // lucky us -- another goroutine is already obtaining the certificate. - // wait for it to finish obtaining the cert and then we'll use it. - obtainCertWaitChansMu.Unlock() - <-wait - return cfg.getCertDuringHandshake(hello, true, false) - } - - // looks like it's up to us to do all the work and obtain the cert. - // make a chan others can wait on if needed - wait = make(chan struct{}) - obtainCertWaitChans[name] = wait - obtainCertWaitChansMu.Unlock() - - // obtain the certificate - log.Printf("[INFO] Obtaining new certificate for %s", name) - err := cfg.ObtainCert(name, false) - - // immediately unblock anyone waiting for it; doing this in - // a defer would risk deadlock because of the recursive call - // to getCertDuringHandshake below when we return! - obtainCertWaitChansMu.Lock() - close(wait) - delete(obtainCertWaitChans, name) - obtainCertWaitChansMu.Unlock() - - if err != nil { - // Failed to solve challenge, so don't allow another on-demand - // issue for this name to be attempted for a little while. - failedIssuanceMu.Lock() - failedIssuance[name] = time.Now() - go func(name string) { - time.Sleep(5 * time.Minute) - failedIssuanceMu.Lock() - delete(failedIssuance, name) - failedIssuanceMu.Unlock() - }(name) - failedIssuanceMu.Unlock() - return Certificate{}, err - } - - // Success - update counters and stuff - atomic.AddInt32(&cfg.OnDemand.obtainedCount, 1) - lastIssueTimeMu.Lock() - lastIssueTime = time.Now() - lastIssueTimeMu.Unlock() - - // certificate is already on disk; now just start over to load it and serve it - return cfg.getCertDuringHandshake(hello, true, false) -} - -// handshakeMaintenance performs a check on cert for expiration and OCSP validity. -// -// This function is safe for use by multiple concurrent goroutines. -func (cfg *Config) handshakeMaintenance(hello *tls.ClientHelloInfo, cert Certificate) (Certificate, error) { - // Check cert expiration - timeLeft := cert.NotAfter.Sub(time.Now().UTC()) - if timeLeft < cfg.RenewDurationBefore { - log.Printf("[INFO] Certificate for %v expires in %v; attempting renewal", cert.Names, timeLeft) - return cfg.renewDynamicCertificate(hello, cert) - } - - // Check OCSP staple validity - if cert.OCSP != nil { - refreshTime := cert.OCSP.ThisUpdate.Add(cert.OCSP.NextUpdate.Sub(cert.OCSP.ThisUpdate) / 2) - if time.Now().After(refreshTime) { - err := stapleOCSP(cfg.Storage, &cert, nil) - if err != nil { - // An error with OCSP stapling is not the end of the world, and in fact, is - // quite common considering not all certs have issuer URLs that support it. - log.Printf("[ERROR] Getting OCSP for %s: %v", hello.ServerName, err) - } - cfg.certCache.mu.Lock() - cfg.certCache.cache[cert.Hash] = cert - cfg.certCache.mu.Unlock() - } - } - - return cert, nil -} - -// renewDynamicCertificate renews the certificate for name using cfg. It returns the -// certificate to use and an error, if any. name should already be lower-cased before -// calling this function. name is the name obtained directly from the handshake's -// ClientHello. -// -// This function is safe for use by multiple concurrent goroutines. -func (cfg *Config) renewDynamicCertificate(hello *tls.ClientHelloInfo, currentCert Certificate) (Certificate, error) { - name := NormalizedName(hello.ServerName) - - obtainCertWaitChansMu.Lock() - wait, ok := obtainCertWaitChans[name] - if ok { - // lucky us -- another goroutine is already renewing the certificate. - // wait for it to finish, then we'll use the new one. - obtainCertWaitChansMu.Unlock() - <-wait - return cfg.getCertDuringHandshake(hello, true, false) - } - - // looks like it's up to us to do all the work and renew the cert - wait = make(chan struct{}) - obtainCertWaitChans[name] = wait - obtainCertWaitChansMu.Unlock() - - // renew and reload the certificate - log.Printf("[INFO] Renewing certificate for %s", name) - err := cfg.RenewCert(name, false) - if err == nil { - // even though the recursive nature of the dynamic cert loading - // would just call this function anyway, we do it here to - // make the replacement as atomic as possible. - newCert, err := cfg.CacheManagedCertificate(name) - if err != nil { - log.Printf("[ERROR] loading renewed certificate for %s: %v", name, err) - } else { - // replace the old certificate with the new one - cfg.certCache.replaceCertificate(currentCert, newCert) - } - } - - // immediately unblock anyone waiting for it; doing this in - // a defer would risk deadlock because of the recursive call - // to getCertDuringHandshake below when we return! - obtainCertWaitChansMu.Lock() - close(wait) - delete(obtainCertWaitChans, name) - obtainCertWaitChansMu.Unlock() - - if err != nil { - return Certificate{}, err - } - - return cfg.getCertDuringHandshake(hello, true, false) -} - -// tryDistributedChallengeSolver is to be called when the clientHello pertains to -// a TLS-ALPN challenge and a certificate is required to solve it. This method -// checks the distributed store of challenge info files and, if a matching ServerName -// is present, it makes a certificate to solve this challenge and returns it. -// A boolean true is returned if a valid certificate is returned. -func (cfg *Config) tryDistributedChallengeSolver(clientHello *tls.ClientHelloInfo) (Certificate, bool, error) { - tokenKey := distributedSolver{config: cfg}.challengeTokensKey(clientHello.ServerName) - chalInfoBytes, err := cfg.Storage.Load(tokenKey) - if err != nil { - if _, ok := err.(ErrNotExist); ok { - return Certificate{}, false, nil - } - return Certificate{}, false, fmt.Errorf("opening distributed challenge token file %s: %v", tokenKey, err) - } - - var chalInfo challengeInfo - err = json.Unmarshal(chalInfoBytes, &chalInfo) - if err != nil { - return Certificate{}, false, fmt.Errorf("decoding challenge token file %s (corrupted?): %v", tokenKey, err) - } - - cert, err := tlsalpn01.ChallengeCert(chalInfo.Domain, chalInfo.KeyAuth) - if err != nil { - return Certificate{}, false, fmt.Errorf("making TLS-ALPN challenge certificate: %v", err) - } - if cert == nil { - return Certificate{}, false, fmt.Errorf("got nil TLS-ALPN challenge certificate but no error") - } - - return Certificate{Certificate: *cert}, true, nil -} - -// NormalizedName returns a cleaned form of serverName that is -// used for consistency when referring to a SNI value. -func NormalizedName(serverName string) string { - return strings.ToLower(strings.TrimSpace(serverName)) -} - -// obtainCertWaitChans is used to coordinate obtaining certs for each hostname. -var obtainCertWaitChans = make(map[string]chan struct{}) -var obtainCertWaitChansMu sync.Mutex diff --git a/vendor/github.com/mholt/certmagic/httphandler.go b/vendor/github.com/mholt/certmagic/httphandler.go deleted file mode 100644 index b649c3031..000000000 --- a/vendor/github.com/mholt/certmagic/httphandler.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "encoding/json" - "log" - "net/http" - "strings" - - "github.com/go-acme/lego/challenge/http01" -) - -// HTTPChallengeHandler wraps h in a handler that can solve the ACME -// HTTP challenge. cfg is required, and it must have a certificate -// cache backed by a functional storage facility, since that is where -// the challenge state is stored between initiation and solution. -// -// If a request is not an ACME HTTP challenge, h willl be invoked. -func (cfg *Config) HTTPChallengeHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if cfg.HandleHTTPChallenge(w, r) { - return - } - h.ServeHTTP(w, r) - }) -} - -// HandleHTTPChallenge uses cfg to solve challenge requests from an ACME -// server that were initiated by this instance or any other instance in -// this cluster (being, any instances using the same storage cfg does). -// -// If the HTTP challenge is disabled, this function is a no-op. -// -// If cfg is nil or if cfg does not have a certificate cache backed by -// usable storage, solving the HTTP challenge will fail. -// -// It returns true if it handled the request; if so, the response has -// already been written. If false is returned, this call was a no-op and -// the request has not been handled. -func (cfg *Config) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool { - if cfg == nil { - return false - } - if cfg.DisableHTTPChallenge { - return false - } - if !LooksLikeHTTPChallenge(r) { - return false - } - return cfg.distributedHTTPChallengeSolver(w, r) -} - -// distributedHTTPChallengeSolver checks to see if this challenge -// request was initiated by this or another instance which uses the -// same storage as cfg does, and attempts to complete the challenge for -// it. It returns true if the request was handled; false otherwise. -func (cfg *Config) distributedHTTPChallengeSolver(w http.ResponseWriter, r *http.Request) bool { - if cfg == nil { - return false - } - - tokenKey := distributedSolver{config: cfg}.challengeTokensKey(r.Host) - chalInfoBytes, err := cfg.Storage.Load(tokenKey) - if err != nil { - if _, ok := err.(ErrNotExist); !ok { - log.Printf("[ERROR][%s] Opening distributed HTTP challenge token file: %v", r.Host, err) - } - return false - } - - var chalInfo challengeInfo - err = json.Unmarshal(chalInfoBytes, &chalInfo) - if err != nil { - log.Printf("[ERROR][%s] Decoding challenge token file %s (corrupted?): %v", r.Host, tokenKey, err) - return false - } - - return answerHTTPChallenge(w, r, chalInfo) -} - -// answerHTTPChallenge solves the challenge with chalInfo. -// Most of this code borrowed from xenolf/lego's built-in HTTP-01 -// challenge solver in March 2018. -func answerHTTPChallenge(w http.ResponseWriter, r *http.Request, chalInfo challengeInfo) bool { - challengeReqPath := http01.ChallengePath(chalInfo.Token) - if r.URL.Path == challengeReqPath && - strings.HasPrefix(r.Host, chalInfo.Domain) && - r.Method == "GET" { - w.Header().Add("Content-Type", "text/plain") - w.Write([]byte(chalInfo.KeyAuth)) - r.Close = true - log.Printf("[INFO][%s] Served key authentication (distributed)", chalInfo.Domain) - return true - } - return false -} - -// LooksLikeHTTPChallenge returns true if r looks like an ACME -// HTTP challenge request from an ACME server. -func LooksLikeHTTPChallenge(r *http.Request) bool { - return r.Method == "GET" && strings.HasPrefix(r.URL.Path, challengeBasePath) -} - -const challengeBasePath = "/.well-known/acme-challenge" diff --git a/vendor/github.com/mholt/certmagic/maintain.go b/vendor/github.com/mholt/certmagic/maintain.go deleted file mode 100644 index 75870adfc..000000000 --- a/vendor/github.com/mholt/certmagic/maintain.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "log" - "time" - - "golang.org/x/crypto/ocsp" -) - -// maintainAssets is a permanently-blocking function -// that loops indefinitely and, on a regular schedule, checks -// certificates for expiration and initiates a renewal of certs -// that are expiring soon. It also updates OCSP stapling. It -// should only be called once per cache. -func (certCache *Cache) maintainAssets() { - renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval) - ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval) - - log.Printf("[INFO][cache:%p] Started certificate maintenance routine", certCache) - - for { - select { - case <-renewalTicker.C: - log.Printf("[INFO][cache:%p] Scanning for expiring certificates", certCache) - err := certCache.RenewManagedCertificates(false) - if err != nil { - log.Printf("[ERROR][cache:%p] Renewing managed certificates: %v", certCache, err) - } - log.Printf("[INFO][cache:%p] Done scanning certificates", certCache) - case <-ocspTicker.C: - log.Printf("[INFO][cache:%p] Scanning for stale OCSP staples", certCache) - certCache.updateOCSPStaples() - // certCache.deleteOldStapleFiles() - log.Printf("[INFO][cache:%p] Done checking OCSP staples", certCache) - case <-certCache.stopChan: - renewalTicker.Stop() - ocspTicker.Stop() - // TODO: stop any in-progress maintenance operations and clear locks we made - log.Printf("[INFO][cache:%p] Stopped certificate maintenance routine", certCache) - close(certCache.doneChan) - return - } - } -} - -// RenewManagedCertificates renews managed certificates, -// including ones loaded on-demand. Note that this is done -// automatically on a regular basis; normally you will not -// need to call this. -func (certCache *Cache) RenewManagedCertificates(interactive bool) error { - // configs will hold a map of certificate name to the config - // to use when managing that certificate - configs := make(map[string]*Config) - - // we use the queues for a very important reason: to do any and all - // operations that could require an exclusive write lock outside - // of the read lock! otherwise we get a deadlock, yikes. in other - // words, our first iteration through the certificate cache does NOT - // perform any operations--only queues them--so that more fine-grained - // write locks may be obtained during the actual operations. - var renewQueue, reloadQueue, deleteQueue []Certificate - - certCache.mu.RLock() - for certKey, cert := range certCache.cache { - if !cert.managed { - continue - } - - // the list of names on this cert should never be empty... programmer error? - if cert.Names == nil || len(cert.Names) == 0 { - log.Printf("[WARNING] Certificate keyed by '%s' has no names: %v - removing from cache", certKey, cert.Names) - deleteQueue = append(deleteQueue, cert) - continue - } - - // get the config associated with this certificate - cfg, err := certCache.getConfig(cert) - if err != nil { - log.Printf("[ERROR] Getting configuration to manage certificate for names %v; unable to renew: %v", cert.Names, err) - continue - } - if cfg == nil { - // this is bad if this happens, probably a programmer error (oops) - log.Printf("[ERROR] No configuration associated with certificate for names %v; unable to manage", cert.Names) - continue - } - configs[cert.Names[0]] = cfg - - // if time is up or expires soon, we need to try to renew it - if cert.NeedsRenewal(cfg) { - // see if the certificate in storage has already been renewed, possibly by another - // instance that didn't coordinate with this one; if so, just load it (this - // might happen if another instance already renewed it - kinda sloppy but checking disk - // first is a simple way to possibly drastically reduce rate limit problems) - storedCertExpiring, err := cfg.managedCertInStorageExpiresSoon(cert) - if err != nil { - // hmm, weird, but not a big deal, maybe it was deleted or something - log.Printf("[NOTICE] Error while checking if certificate for %v in storage is also expiring soon: %v", - cert.Names, err) - } else if !storedCertExpiring { - // if the certificate is NOT expiring soon and there was no error, then we - // are good to just reload the certificate from storage instead of repeating - // a likely-unnecessary renewal procedure - reloadQueue = append(reloadQueue, cert) - continue - } - - // the certificate in storage has not been renewed yet, so we will do it - // NOTE: It is super-important to note that the TLS-ALPN challenge requires - // a write lock on the cache in order to complete its challenge, so it is extra - // vital that this renew operation does not happen inside our read lock! - renewQueue = append(renewQueue, cert) - } - } - certCache.mu.RUnlock() - - // Reload certificates that merely need to be updated in memory - for _, oldCert := range reloadQueue { - timeLeft := oldCert.NotAfter.Sub(time.Now().UTC()) - log.Printf("[INFO] Certificate for %v expires in %v, but is already renewed in storage; reloading stored certificate", - oldCert.Names, timeLeft) - - cfg := configs[oldCert.Names[0]] - - err := cfg.reloadManagedCertificate(oldCert) - if err != nil { - if interactive { - return err // operator is present, so report error immediately - } - log.Printf("[ERROR] Loading renewed certificate: %v", err) - } - } - - // Renewal queue - for _, oldCert := range renewQueue { - timeLeft := oldCert.NotAfter.Sub(time.Now().UTC()) - log.Printf("[INFO] Certificate for %v expires in %v; attempting renewal", oldCert.Names, timeLeft) - - cfg := configs[oldCert.Names[0]] - - // Get the name which we should use to renew this certificate; - // we only support managing certificates with one name per cert, - // so this should be easy. - renewName := oldCert.Names[0] - - // perform renewal - err := cfg.RenewCert(renewName, interactive) - if err != nil { - if interactive { - // Certificate renewal failed and the operator is present. See a discussion about - // this in issue mholt/caddy#642. For a while, we only stopped if the certificate - // was expired, but in reality, there is no difference between reporting it now - // versus later, except that there's somebody present to deal with it right now. - // Follow-up: See issue mholt/caddy#1680. Only fail in this case if the certificate - // is dangerously close to expiration. - timeLeft := oldCert.NotAfter.Sub(time.Now().UTC()) - if timeLeft < cfg.RenewDurationBeforeAtStartup { - return err - } - } - log.Printf("[ERROR] %v", err) - if cfg.OnDemand != nil { - // loaded dynamically, remove dynamically - deleteQueue = append(deleteQueue, oldCert) - } - continue - } - - // successful renewal, so update in-memory cache by loading - // renewed certificate so it will be used with handshakes - err = cfg.reloadManagedCertificate(oldCert) - if err != nil { - if interactive { - return err // operator is present, so report error immediately - } - log.Printf("[ERROR] %v", err) - } - } - - // Deletion queue - for _, cert := range deleteQueue { - certCache.removeCertificate(cert) - } - - return nil -} - -// updateOCSPStaples updates the OCSP stapling in all -// eligible, cached certificates. -// -// OCSP maintenance strives to abide the relevant points on -// Ryan Sleevi's recommendations for good OCSP support: -// https://gist.github.com/sleevi/5efe9ef98961ecfb4da8 -func (certCache *Cache) updateOCSPStaples() { - // Create a temporary place to store updates - // until we release the potentially long-lived - // read lock and use a short-lived write lock - // on the certificate cache. - type ocspUpdate struct { - rawBytes []byte - parsed *ocsp.Response - } - updated := make(map[string]ocspUpdate) - - certCache.mu.RLock() - for certHash, cert := range certCache.cache { - // no point in updating OCSP for expired certificates - if time.Now().After(cert.NotAfter) { - continue - } - - var lastNextUpdate time.Time - if cert.OCSP != nil { - lastNextUpdate = cert.OCSP.NextUpdate - if freshOCSP(cert.OCSP) { - continue // no need to update staple if ours is still fresh - } - } - - cfg, err := certCache.getConfig(cert) - if err != nil { - log.Printf("[ERROR] Getting configuration to manage OCSP for certificate with names %v; unable to refresh: %v", cert.Names, err) - continue - } - if cfg == nil { - // this is bad if this happens, probably a programmer error (oops) - log.Printf("[ERROR] No configuration associated with certificate for names %v; unable to manage OCSP", cert.Names) - continue - } - - err = stapleOCSP(cfg.Storage, &cert, nil) - if err != nil { - if cert.OCSP != nil { - // if there was no staple before, that's fine; otherwise we should log the error - log.Printf("[ERROR] Checking OCSP: %v", err) - } - continue - } - - // By this point, we've obtained the latest OCSP response. - // If there was no staple before, or if the response is updated, make - // sure we apply the update to all names on the certificate. - if cert.OCSP != nil && (lastNextUpdate.IsZero() || lastNextUpdate != cert.OCSP.NextUpdate) { - log.Printf("[INFO] Advancing OCSP staple for %v from %s to %s", - cert.Names, lastNextUpdate, cert.OCSP.NextUpdate) - updated[certHash] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.OCSP} - } - } - certCache.mu.RUnlock() - - // These write locks should be brief since we have all the info we need now. - for certKey, update := range updated { - certCache.mu.Lock() - cert := certCache.cache[certKey] - cert.OCSP = update.parsed - cert.Certificate.OCSPStaple = update.rawBytes - certCache.cache[certKey] = cert - certCache.mu.Unlock() - } -} - -// CleanStorageOptions specifies how to clean up a storage unit. -type CleanStorageOptions struct { - OCSPStaples bool - // TODO: long-expired certificates -} - -// CleanStorage tidies up the given storage according to opts; this -// generally involves deleting assets which are no longer required. -// TODO: We should do this for long-expired certificates, too. -func CleanStorage(storage Storage, opts CleanStorageOptions) { - if opts.OCSPStaples { - err := deleteOldOCSPStaples(storage) - if err != nil { - log.Printf("[ERROR] Deleting old OCSP staples: %v", err) - } - } -} - -func deleteOldOCSPStaples(storage Storage) error { - ocspKeys, err := storage.List(prefixOCSP, false) - if err != nil { - // maybe just hasn't been created yet; no big deal - return nil - } - for _, key := range ocspKeys { - ocspBytes, err := storage.Load(key) - if err != nil { - log.Printf("[ERROR] While deleting old OCSP staples, unable to load staple file: %v", err) - continue - } - resp, err := ocsp.ParseResponse(ocspBytes, nil) - if err != nil { - // contents are invalid; delete it - err = storage.Delete(key) - if err != nil { - log.Printf("[ERROR] Purging corrupt staple file %s: %v", key, err) - } - continue - } - if time.Now().After(resp.NextUpdate) { - // response has expired; delete it - err = storage.Delete(key) - if err != nil { - log.Printf("[ERROR] Purging expired staple file %s: %v", key, err) - } - } - } - return nil -} - -const ( - // DefaultRenewCheckInterval is how often to check certificates for renewal. - DefaultRenewCheckInterval = 12 * time.Hour - - // DefaultRenewDurationBefore is how long before expiration to renew certificates. - DefaultRenewDurationBefore = (24 * time.Hour) * 30 - - // DefaultRenewDurationBeforeAtStartup is how long before expiration to require - // a renewed certificate when the process is first starting up (see mholt/caddy#1680). - DefaultRenewDurationBeforeAtStartup = (24 * time.Hour) * 7 - - // DefaultOCSPCheckInterval is how often to check if OCSP stapling needs updating. - DefaultOCSPCheckInterval = 1 * time.Hour -) diff --git a/vendor/github.com/mholt/certmagic/ocsp.go b/vendor/github.com/mholt/certmagic/ocsp.go deleted file mode 100644 index 823b7c868..000000000 --- a/vendor/github.com/mholt/certmagic/ocsp.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "bytes" - "crypto/x509" - "encoding/pem" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "time" - - "golang.org/x/crypto/ocsp" -) - -// stapleOCSP staples OCSP information to cert for hostname name. -// If you have it handy, you should pass in the PEM-encoded certificate -// bundle; otherwise the DER-encoded cert will have to be PEM-encoded. -// If you don't have the PEM blocks already, just pass in nil. -// -// Errors here are not necessarily fatal, it could just be that the -// certificate doesn't have an issuer URL. -func stapleOCSP(storage Storage, cert *Certificate, pemBundle []byte) error { - if pemBundle == nil { - // we need a PEM encoding only for some function calls below - bundle := new(bytes.Buffer) - for _, derBytes := range cert.Certificate.Certificate { - pem.Encode(bundle, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - } - pemBundle = bundle.Bytes() - } - - var ocspBytes []byte - var ocspResp *ocsp.Response - var ocspErr error - var gotNewOCSP bool - - // First try to load OCSP staple from storage and see if - // we can still use it. - ocspStapleKey := StorageKeys.OCSPStaple(cert, pemBundle) - cachedOCSP, err := storage.Load(ocspStapleKey) - if err == nil { - resp, err := ocsp.ParseResponse(cachedOCSP, nil) - if err == nil { - if freshOCSP(resp) { - // staple is still fresh; use it - ocspBytes = cachedOCSP - ocspResp = resp - } - } else { - // invalid contents; delete the file - // (we do this independently of the maintenance routine because - // in this case we know for sure this should be a staple file - // because we loaded it by name, whereas the maintenance routine - // just iterates the list of files, even if somehow a non-staple - // file gets in the folder. in this case we are sure it is corrupt.) - err := storage.Delete(ocspStapleKey) - if err != nil { - log.Printf("[WARNING] Unable to delete invalid OCSP staple file: %v", err) - } - } - } - - // If we couldn't get a fresh staple by reading the cache, - // then we need to request it from the OCSP responder - if ocspResp == nil || len(ocspBytes) == 0 { - ocspBytes, ocspResp, ocspErr = getOCSPForCert(pemBundle) - if ocspErr != nil { - // An error here is not a problem because a certificate may simply - // not contain a link to an OCSP server. But we should log it anyway. - // There's nothing else we can do to get OCSP for this certificate, - // so we can return here with the error. - return fmt.Errorf("no OCSP stapling for %v: %v", cert.Names, ocspErr) - } - gotNewOCSP = true - } - - // By now, we should have a response. If good, staple it to - // the certificate. If the OCSP response was not loaded from - // storage, we persist it for next time. - if ocspResp.Status == ocsp.Good { - if ocspResp.NextUpdate.After(cert.NotAfter) { - // uh oh, this OCSP response expires AFTER the certificate does, that's kinda bogus. - // it was the reason a lot of Symantec-validated sites (not Caddy) went down - // in October 2017. https://twitter.com/mattiasgeniar/status/919432824708648961 - return fmt.Errorf("invalid: OCSP response for %v valid after certificate expiration (%s)", - cert.Names, cert.NotAfter.Sub(ocspResp.NextUpdate)) - } - cert.Certificate.OCSPStaple = ocspBytes - cert.OCSP = ocspResp - if gotNewOCSP { - err := storage.Store(ocspStapleKey, ocspBytes) - if err != nil { - return fmt.Errorf("unable to write OCSP staple file for %v: %v", cert.Names, err) - } - } - } - - return nil -} - -// getOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response, -// the parsed response, and an error, if any. The returned []byte can be passed directly -// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the -// issued certificate, this function will try to get the issuer certificate from the -// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return -// values are nil, the OCSP status may be assumed OCSPUnknown. -// -// Borrowed from github.com/go-acme/lego -func getOCSPForCert(bundle []byte) ([]byte, *ocsp.Response, error) { - // TODO: Perhaps this should be synchronized too, with a Locker? - - certificates, err := parseCertsFromPEMBundle(bundle) - if err != nil { - return nil, nil, err - } - - // We expect the certificate slice to be ordered downwards the chain. - // SRV CRT -> CA. We need to pull the leaf and issuer certs out of it, - // which should always be the first two certificates. If there's no - // OCSP server listed in the leaf cert, there's nothing to do. And if - // we have only one certificate so far, we need to get the issuer cert. - issuedCert := certificates[0] - if len(issuedCert.OCSPServer) == 0 { - return nil, nil, fmt.Errorf("no OCSP server specified in certificate") - } - if len(certificates) == 1 { - if len(issuedCert.IssuingCertificateURL) == 0 { - return nil, nil, fmt.Errorf("no URL to issuing certificate") - } - - resp, err := http.Get(issuedCert.IssuingCertificateURL[0]) - if err != nil { - return nil, nil, fmt.Errorf("getting issuer certificate: %v", err) - } - defer resp.Body.Close() - - issuerBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024*1024)) - if err != nil { - return nil, nil, fmt.Errorf("reading issuer certificate: %v", err) - } - - issuerCert, err := x509.ParseCertificate(issuerBytes) - if err != nil { - return nil, nil, fmt.Errorf("parsing issuer certificate: %v", err) - } - - // insert it into the slice on position 0; - // we want it ordered right SRV CRT -> CA - certificates = append(certificates, issuerCert) - } - - issuerCert := certificates[1] - - ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil) - if err != nil { - return nil, nil, fmt.Errorf("creating OCSP request: %v", err) - } - - reader := bytes.NewReader(ocspReq) - req, err := http.Post(issuedCert.OCSPServer[0], "application/ocsp-request", reader) - if err != nil { - return nil, nil, fmt.Errorf("making OCSP request: %v", err) - } - defer req.Body.Close() - - ocspResBytes, err := ioutil.ReadAll(io.LimitReader(req.Body, 1024*1024)) - if err != nil { - return nil, nil, fmt.Errorf("reading OCSP response: %v", err) - } - - ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert) - if err != nil { - return nil, nil, fmt.Errorf("parsing OCSP response: %v", err) - } - - return ocspResBytes, ocspRes, nil -} - -// freshOCSP returns true if resp is still fresh, -// meaning that it is not expedient to get an -// updated response from the OCSP server. -func freshOCSP(resp *ocsp.Response) bool { - nextUpdate := resp.NextUpdate - // If there is an OCSP responder certificate, and it expires before the - // OCSP response, use its expiration date as the end of the OCSP - // response's validity period. - if resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) { - nextUpdate = resp.Certificate.NotAfter - } - // start checking OCSP staple about halfway through validity period for good measure - refreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2) - return time.Now().Before(refreshTime) -} diff --git a/vendor/github.com/mholt/certmagic/solvers.go b/vendor/github.com/mholt/certmagic/solvers.go deleted file mode 100644 index 743edfce7..000000000 --- a/vendor/github.com/mholt/certmagic/solvers.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "encoding/json" - "fmt" - "log" - "path/filepath" - - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/challenge/tlsalpn01" -) - -// tlsALPNSolver is a type that can solve TLS-ALPN challenges using -// an existing listener and our custom, in-memory certificate cache. -type tlsALPNSolver struct { - certCache *Cache -} - -// Present adds the challenge certificate to the cache. -func (s tlsALPNSolver) Present(domain, token, keyAuth string) error { - cert, err := tlsalpn01.ChallengeCert(domain, keyAuth) - if err != nil { - return err - } - certHash := hashCertificateChain(cert.Certificate) - s.certCache.mu.Lock() - s.certCache.cache[tlsALPNCertKeyName(domain)] = Certificate{ - Certificate: *cert, - Names: []string{domain}, - Hash: certHash, // perhaps not necesssary - } - s.certCache.mu.Unlock() - return nil -} - -// CleanUp removes the challenge certificate from the cache. -func (s tlsALPNSolver) CleanUp(domain, token, keyAuth string) error { - s.certCache.mu.Lock() - delete(s.certCache.cache, tlsALPNCertKeyName(domain)) - s.certCache.mu.Unlock() - return nil -} - -// tlsALPNCertKeyName returns the key to use when caching a cert -// for use with the TLS-ALPN ACME challenge. It is simply to help -// avoid conflicts (although at time of writing, there shouldn't -// be, since the cert cache is keyed by hash of certificate chain). -func tlsALPNCertKeyName(sniName string) string { - return sniName + ":acme-tls-alpn" -} - -// distributedSolver allows the ACME HTTP-01 and TLS-ALPN challenges -// to be solved by an instance other than the one which initiated it. -// This is useful behind load balancers or in other cluster/fleet -// configurations. The only requirement is that the instance which -// initiates the challenge shares the same storage and locker with -// the others in the cluster. The storage backing the certificate -// cache in distributedSolver.config is crucial. -// -// Obviously, the instance which completes the challenge must be -// serving on the HTTPChallengePort for the HTTP-01 challenge or the -// TLSALPNChallengePort for the TLS-ALPN-01 challenge (or have all -// the packets port-forwarded) to receive and handle the request. The -// server which receives the challenge must handle it by checking to -// see if the challenge token exists in storage, and if so, decode it -// and use it to serve up the correct response. HTTPChallengeHandler -// in this package as well as the GetCertificate method implemented -// by a Config support and even require this behavior. -// -// In short: the only two requirements for cluster operation are -// sharing sync and storage, and using the facilities provided by -// this package for solving the challenges. -type distributedSolver struct { - // The config with a certificate cache - // with a reference to the storage to - // use which is shared among all the - // instances in the cluster - REQUIRED. - config *Config - - // Since the distributedSolver is only a - // wrapper over an actual solver, place - // the actual solver here. - providerServer challenge.Provider -} - -// Present invokes the underlying solver's Present method -// and also stores domain, token, and keyAuth to the storage -// backing the certificate cache of dhs.config. -func (dhs distributedSolver) Present(domain, token, keyAuth string) error { - if dhs.providerServer != nil { - err := dhs.providerServer.Present(domain, token, keyAuth) - if err != nil { - return fmt.Errorf("presenting with standard provider server: %v", err) - } - } - - infoBytes, err := json.Marshal(challengeInfo{ - Domain: domain, - Token: token, - KeyAuth: keyAuth, - }) - if err != nil { - return err - } - - return dhs.config.Storage.Store(dhs.challengeTokensKey(domain), infoBytes) -} - -// CleanUp invokes the underlying solver's CleanUp method -// and also cleans up any assets saved to storage. -func (dhs distributedSolver) CleanUp(domain, token, keyAuth string) error { - if dhs.providerServer != nil { - err := dhs.providerServer.CleanUp(domain, token, keyAuth) - if err != nil { - log.Printf("[ERROR] Cleaning up standard provider server: %v", err) - } - } - return dhs.config.Storage.Delete(dhs.challengeTokensKey(domain)) -} - -// challengeTokensPrefix returns the key prefix for challenge info. -func (dhs distributedSolver) challengeTokensPrefix() string { - return filepath.Join(StorageKeys.CAPrefix(dhs.config.CA), "challenge_tokens") -} - -// challengeTokensKey returns the key to use to store and access -// challenge info for domain. -func (dhs distributedSolver) challengeTokensKey(domain string) string { - return filepath.Join(dhs.challengeTokensPrefix(), StorageKeys.Safe(domain)+".json") -} - -type challengeInfo struct { - Domain, Token, KeyAuth string -} diff --git a/vendor/github.com/mholt/certmagic/storage.go b/vendor/github.com/mholt/certmagic/storage.go deleted file mode 100644 index 927ffef87..000000000 --- a/vendor/github.com/mholt/certmagic/storage.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "net/url" - "path" - "regexp" - "strings" - "time" -) - -// Storage is a type that implements a key-value store. -// Keys are prefix-based, with forward slash '/' as separators -// and without a leading slash. -// -// Processes running in a cluster will wish to use the -// same Storage value (its implementation and configuration) -// in order to share certificates and other TLS resources -// with the cluster. -// -// Implementations of Storage must be safe for concurrent use. -type Storage interface { - // Locker provides atomic synchronization - // operations, making Storage safe to share. - Locker - - // Store puts value at key. - Store(key string, value []byte) error - - // Load retrieves the value at key. - Load(key string) ([]byte, error) - - // Delete deletes key. - Delete(key string) error - - // Exists returns true if the key exists - // and there was no error checking. - Exists(key string) bool - - // List returns all keys that match prefix. - // If recursive is true, non-terminal keys - // will be enumerated (i.e. "directories" - // should be walked); otherwise, only keys - // prefixed exactly by prefix will be listed. - List(prefix string, recursive bool) ([]string, error) - - // Stat returns information about key. - Stat(key string) (KeyInfo, error) -} - -// Locker facilitates synchronization of certificate tasks across -// machines and networks. -type Locker interface { - // Lock acquires the lock for key, blocking until the lock - // can be obtained or an error is returned. Note that, even - // after acquiring a lock, an idempotent operation may have - // already been performed by another process that acquired - // the lock before - so always check to make sure idempotent - // operations still need to be performed after acquiring the - // lock. - // - // The actual implementation of obtaining of a lock must be - // an atomic operation so that multiple Lock calls at the - // same time always results in only one caller receiving the - // lock at any given time. - // - // To prevent deadlocks, all implementations (where this concern - // is relevant) should put a reasonable expiration on the lock in - // case Unlock is unable to be called due to some sort of network - // or system failure or crash. - Lock(key string) error - - // Unlock releases the lock for key. This method must ONLY be - // called after a successful call to Lock, and only after the - // critical section is finished, even if it errored or timed - // out. Unlock cleans up any resources allocated during Lock. - Unlock(key string) error -} - -// KeyInfo holds information about a key in storage. -type KeyInfo struct { - Key string - Modified time.Time - Size int64 - IsTerminal bool // false for keys that only contain other keys (like directories) -} - -// storeTx stores all the values or none at all. -func storeTx(s Storage, all []keyValue) error { - for i, kv := range all { - err := s.Store(kv.key, kv.value) - if err != nil { - for j := i - 1; j >= 0; j-- { - s.Delete(all[j].key) - } - return err - } - } - return nil -} - -// keyValue pairs a key and a value. -type keyValue struct { - key string - value []byte -} - -// KeyBuilder provides a namespace for methods that -// build keys and key prefixes, for addressing items -// in a Storage implementation. -type KeyBuilder struct{} - -// CAPrefix returns the storage key prefix for -// the given certificate authority URL. -func (keys KeyBuilder) CAPrefix(ca string) string { - caURL, err := url.Parse(ca) - if err != nil { - caURL = &url.URL{Host: ca} - } - return path.Join(prefixACME, keys.Safe(caURL.Host)) -} - -// SitePrefix returns a key prefix for items associated with -// the site using the given CA URL. -func (keys KeyBuilder) SitePrefix(ca, domain string) string { - return path.Join(keys.CAPrefix(ca), "sites", keys.Safe(domain)) -} - -// SiteCert returns the path to the certificate file for domain. -func (keys KeyBuilder) SiteCert(ca, domain string) string { - return path.Join(keys.SitePrefix(ca, domain), keys.Safe(domain)+".crt") -} - -// SitePrivateKey returns the path to domain's private key file. -func (keys KeyBuilder) SitePrivateKey(ca, domain string) string { - return path.Join(keys.SitePrefix(ca, domain), keys.Safe(domain)+".key") -} - -// SiteMeta returns the path to the domain's asset metadata file. -func (keys KeyBuilder) SiteMeta(ca, domain string) string { - return path.Join(keys.SitePrefix(ca, domain), keys.Safe(domain)+".json") -} - -// UsersPrefix returns a key prefix for items related to -// users associated with the given CA URL. -func (keys KeyBuilder) UsersPrefix(ca string) string { - return path.Join(keys.CAPrefix(ca), "users") -} - -// UserPrefix returns a key prefix for items related to -// the user with the given email for the given CA URL. -func (keys KeyBuilder) UserPrefix(ca, email string) string { - if email == "" { - email = emptyEmail - } - return path.Join(keys.UsersPrefix(ca), keys.Safe(email)) -} - -// UserReg gets the path to the registration file for the user -// with the given email address for the given CA URL. -func (keys KeyBuilder) UserReg(ca, email string) string { - return keys.safeUserKey(ca, email, "registration", ".json") -} - -// UserPrivateKey gets the path to the private key file for the -// user with the given email address on the given CA URL. -func (keys KeyBuilder) UserPrivateKey(ca, email string) string { - return keys.safeUserKey(ca, email, "private", ".key") -} - -// OCSPStaple returns a key for the OCSP staple associated -// with the given certificate. If you have the PEM bundle -// handy, pass that in to save an extra encoding step. -func (keys KeyBuilder) OCSPStaple(cert *Certificate, pemBundle []byte) string { - var ocspFileName string - if len(cert.Names) > 0 { - firstName := keys.Safe(cert.Names[0]) - ocspFileName = firstName + "-" - } - ocspFileName += fastHash(pemBundle) - return path.Join(prefixOCSP, ocspFileName) -} - -// safeUserKey returns a key for the given email, with the default -// filename, and the filename ending in the given extension. -func (keys KeyBuilder) safeUserKey(ca, email, defaultFilename, extension string) string { - if email == "" { - email = emptyEmail - } - email = strings.ToLower(email) - filename := keys.emailUsername(email) - if filename == "" { - filename = defaultFilename - } - filename = keys.Safe(filename) - return path.Join(keys.UserPrefix(ca, email), filename+extension) -} - -// emailUsername returns the username portion of an email address (part before -// '@') or the original input if it can't find the "@" symbol. -func (keys KeyBuilder) emailUsername(email string) string { - at := strings.Index(email, "@") - if at == -1 { - return email - } else if at == 0 { - return email[1:] - } - return email[:at] -} - -// Safe standardizes and sanitizes str for use as -// a storage key. This method is idempotent. -func (keys KeyBuilder) Safe(str string) string { - str = strings.ToLower(str) - str = strings.TrimSpace(str) - - // replace a few specific characters - repl := strings.NewReplacer( - " ", "_", - "+", "_plus_", - "*", "wildcard_", - "..", "", // prevent directory traversal (regex allows single dots) - ) - str = repl.Replace(str) - - // finally remove all non-word characters - return safeKeyRE.ReplaceAllLiteralString(str, "") -} - -// StorageKeys provides methods for accessing -// keys and key prefixes for items in a Storage. -// Typically, you will not need to use this -// because accessing storage is abstracted away -// for most cases. Only use this if you need to -// directly access TLS assets in your application. -var StorageKeys KeyBuilder - -const ( - prefixACME = "acme" - prefixOCSP = "ocsp" -) - -// safeKeyRE matches any undesirable characters in storage keys. -// Note that this allows dots, so you'll have to strip ".." manually. -var safeKeyRE = regexp.MustCompile(`[^\w@.-]`) - -// ErrNotExist is returned by Storage implementations when -// a resource is not found. It is similar to os.IsNotExist -// except this is a type, not a variable. -type ErrNotExist interface { - error -} - -// defaultFileStorage is a convenient, default storage -// implementation using the local file system. -var defaultFileStorage = &FileStorage{Path: dataDir()} diff --git a/vendor/github.com/mholt/certmagic/user.go b/vendor/github.com/mholt/certmagic/user.go deleted file mode 100644 index 2b5412447..000000000 --- a/vendor/github.com/mholt/certmagic/user.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2015 Matthew Holt -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package certmagic - -import ( - "bufio" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "path" - "sort" - "strings" - - "github.com/go-acme/lego/acme" - "github.com/go-acme/lego/registration" -) - -// user represents a Let's Encrypt user account. -type user struct { - Email string - Registration *registration.Resource - key crypto.PrivateKey -} - -// GetEmail gets u's email. -func (u user) GetEmail() string { - return u.Email -} - -// GetRegistration gets u's registration resource. -func (u user) GetRegistration() *registration.Resource { - return u.Registration -} - -// GetPrivateKey gets u's private key. -func (u user) GetPrivateKey() crypto.PrivateKey { - return u.key -} - -// newUser creates a new User for the given email address -// with a new private key. This function does NOT save the -// user to disk or register it via ACME. If you want to use -// a user account that might already exist, call getUser -// instead. It does NOT prompt the user. -func (cfg *Config) newUser(email string) (user, error) { - user := user{Email: email} - privateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - if err != nil { - return user, fmt.Errorf("generating private key: %v", err) - } - user.key = privateKey - return user, nil -} - -// getEmail does everything it can to obtain an email address -// from the user within the scope of memory and storage to use -// for ACME TLS. If it cannot get an email address, it does nothing -// (If user is prompted, it will warn the user of -// the consequences of an empty email.) This function MAY prompt -// the user for input. If allowPrompts is false, the user -// will NOT be prompted and an empty email may be returned. -func (cfg *Config) getEmail(allowPrompts bool) error { - leEmail := cfg.Email - - // First try package default email - if leEmail == "" { - leEmail = Default.Email - } - - // Then try to get most recent user email from storage - var gotRecentEmail bool - if leEmail == "" { - leEmail, gotRecentEmail = cfg.mostRecentUserEmail() - } - if !gotRecentEmail && leEmail == "" && allowPrompts { - // Looks like there is no email address readily available, - // so we will have to ask the user if we can. - var err error - leEmail, err = cfg.promptUserForEmail() - if err != nil { - return err - } - - // User might have just signified their agreement - cfg.Agreed = Default.Agreed - } - - // save the email for later and ensure it is consistent - // for repeated use; then update cfg with the email - Default.Email = strings.TrimSpace(strings.ToLower(leEmail)) - cfg.Email = Default.Email - - return nil -} - -func (cfg *Config) getAgreementURL() (string, error) { - if agreementTestURL != "" { - return agreementTestURL, nil - } - caURL := Default.CA - if cfg.CA != "" { - caURL = cfg.CA - } - response, err := http.Get(caURL) - if err != nil { - return "", err - } - defer response.Body.Close() - var dir acme.Directory - err = json.NewDecoder(response.Body).Decode(&dir) - if err != nil { - return "", err - } - return dir.Meta.TermsOfService, nil -} - -// promptUserForEmail prompts the user for an email address -// and returns the email address they entered (which could -// be the empty string). If no error is returned, then Agreed -// will also be set to true, since continuing through the -// prompt signifies agreement. -func (cfg *Config) promptUserForEmail() (string, error) { - agreementURL, err := cfg.getAgreementURL() - if err != nil { - return "", fmt.Errorf("get Agreement URL: %v", err) - } - // prompt the user for an email address and terms agreement - reader := bufio.NewReader(stdin) - cfg.promptUserAgreement(agreementURL) - fmt.Println("Please enter your email address to signify agreement and to be notified") - fmt.Println("in case of issues. You can leave it blank, but we don't recommend it.") - fmt.Print(" Email address: ") - leEmail, err := reader.ReadString('\n') - if err != nil && err != io.EOF { - return "", fmt.Errorf("reading email address: %v", err) - } - leEmail = strings.TrimSpace(leEmail) - Default.Agreed = true - return leEmail, nil -} - -// getUser loads the user with the given email from disk -// using the provided storage. If the user does not exist, -// it will create a new one, but it does NOT save new -// users to the disk or register them via ACME. It does -// NOT prompt the user. -func (cfg *Config) getUser(email string) (user, error) { - var user user - - regBytes, err := cfg.Storage.Load(StorageKeys.UserReg(cfg.CA, email)) - if err != nil { - if _, ok := err.(ErrNotExist); ok { - // create a new user - return cfg.newUser(email) - } - return user, err - } - keyBytes, err := cfg.Storage.Load(StorageKeys.UserPrivateKey(cfg.CA, email)) - if err != nil { - if _, ok := err.(ErrNotExist); ok { - // create a new user - return cfg.newUser(email) - } - return user, err - } - - err = json.Unmarshal(regBytes, &user) - if err != nil { - return user, err - } - user.key, err = decodePrivateKey(keyBytes) - return user, err -} - -// saveUser persists a user's key and account registration -// to the file system. It does NOT register the user via ACME -// or prompt the user. You must also pass in the storage -// wherein the user should be saved. It should be the storage -// for the CA with which user has an account. -func (cfg *Config) saveUser(user user) error { - regBytes, err := json.MarshalIndent(&user, "", "\t") - if err != nil { - return err - } - keyBytes, err := encodePrivateKey(user.key) - if err != nil { - return err - } - - all := []keyValue{ - { - key: StorageKeys.UserReg(cfg.CA, user.Email), - value: regBytes, - }, - { - key: StorageKeys.UserPrivateKey(cfg.CA, user.Email), - value: keyBytes, - }, - } - - return storeTx(cfg.Storage, all) -} - -// promptUserAgreement simply outputs the standard user -// agreement prompt with the given agreement URL. -// It outputs a newline after the message. -func (cfg *Config) promptUserAgreement(agreementURL string) { - const userAgreementPrompt = `Your sites will be served over HTTPS automatically using Let's Encrypt. -By continuing, you agree to the Let's Encrypt Subscriber Agreement at:` - fmt.Printf("\n\n%s\n %s\n", userAgreementPrompt, agreementURL) -} - -// askUserAgreement prompts the user to agree to the agreement -// at the given agreement URL via stdin. It returns whether the -// user agreed or not. -func (cfg *Config) askUserAgreement(agreementURL string) bool { - cfg.promptUserAgreement(agreementURL) - fmt.Print("Do you agree to the terms? (y/n): ") - - reader := bufio.NewReader(stdin) - answer, err := reader.ReadString('\n') - if err != nil { - return false - } - answer = strings.ToLower(strings.TrimSpace(answer)) - - return answer == "y" || answer == "yes" -} - -// mostRecentUserEmail finds the most recently-written user file -// in s. Since this is part of a complex sequence to get a user -// account, errors here are discarded to simplify code flow in -// the caller, and errors are not important here anyway. -func (cfg *Config) mostRecentUserEmail() (string, bool) { - userList, err := cfg.Storage.List(StorageKeys.UsersPrefix(cfg.CA), false) - if err != nil || len(userList) == 0 { - return "", false - } - sort.Slice(userList, func(i, j int) bool { - iInfo, _ := cfg.Storage.Stat(userList[i]) - jInfo, _ := cfg.Storage.Stat(userList[j]) - return jInfo.Modified.Before(iInfo.Modified) - }) - user, err := cfg.getUser(path.Base(userList[0])) - if err != nil { - return "", false - } - return user.Email, true -} - -// agreementTestURL is set during tests to skip requiring -// setting up an entire ACME CA endpoint. -var agreementTestURL string - -// stdin is used to read the user's input if prompted; -// this is changed by tests during tests. -var stdin = io.ReadWriter(os.Stdin) - -// The name of the folder for accounts where the email -// address was not provided; default 'username' if you will, -// but only for local/storage use, not with the CA. -const emptyEmail = "default" diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml deleted file mode 100644 index f91e5c1fe..000000000 --- a/vendor/github.com/miekg/dns/.codecov.yml +++ /dev/null @@ -1,8 +0,0 @@ -coverage: - status: - project: - default: - target: 40% - threshold: null - patch: false - changes: false diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore deleted file mode 100644 index 776cd950c..000000000 --- a/vendor/github.com/miekg/dns/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.6 -tags -test.out -a.out diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml deleted file mode 100644 index 013b5ae44..000000000 --- a/vendor/github.com/miekg/dns/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go -sudo: false - -go: - - 1.10.x - - 1.11.x - - 1.12.x - - tip - -before_install: - # don't use the miekg/dns when testing forks - - mkdir -p $GOPATH/src/github.com/miekg - - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true - -script: - - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./... - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS deleted file mode 100644 index 196568352..000000000 --- a/vendor/github.com/miekg/dns/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Miek Gieben diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS deleted file mode 100644 index 5903779d8..000000000 --- a/vendor/github.com/miekg/dns/CONTRIBUTORS +++ /dev/null @@ -1,10 +0,0 @@ -Alex A. Skinner -Andrew Tunnell-Jones -Ask Bjørn Hansen -Dave Cheney -Dusty Wilson -Marek Majkowski -Peter van Dijk -Omri Bahumi -Alex Sergeyev -James Hartig diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT deleted file mode 100644 index 35702b10e..000000000 --- a/vendor/github.com/miekg/dns/COPYRIGHT +++ /dev/null @@ -1,9 +0,0 @@ -Copyright 2009 The Go Authors. All rights reserved. Use of this source code -is governed by a BSD-style license that can be found in the LICENSE file. -Extensions of the original work are copyright (c) 2011 Miek Gieben - -Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. - -Copyright 2014 CloudFlare. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock deleted file mode 100644 index 686632207..000000000 --- a/vendor/github.com/miekg/dns/Gopkg.lock +++ /dev/null @@ -1,57 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870" - name = "golang.org/x/crypto" - packages = [ - "ed25519", - "ed25519/internal/edwards25519", - ] - pruneopts = "" - revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900" - -[[projects]] - branch = "master" - digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685" - name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "internal/iana", - "internal/socket", - "ipv4", - "ipv6", - ] - pruneopts = "" - revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de" - -[[projects]] - branch = "master" - digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1" - name = "golang.org/x/sync" - packages = ["errgroup"] - pruneopts = "" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "" - revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "golang.org/x/crypto/ed25519", - "golang.org/x/net/ipv4", - "golang.org/x/net/ipv6", - "golang.org/x/sync/errgroup", - "golang.org/x/sys/unix", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml deleted file mode 100644 index 85e6ff31b..000000000 --- a/vendor/github.com/miekg/dns/Gopkg.toml +++ /dev/null @@ -1,38 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "golang.org/x/sys" - -[[constraint]] - branch = "master" - name = "golang.org/x/sync" diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE deleted file mode 100644 index 5763fa7fe..000000000 --- a/vendor/github.com/miekg/dns/LICENSE +++ /dev/null @@ -1,32 +0,0 @@ -Extensions of the original work are copyright (c) 2011 Miek Gieben - -As this is fork of the official Go code the same license applies: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz deleted file mode 100644 index dc158c4ac..000000000 --- a/vendor/github.com/miekg/dns/Makefile.fuzz +++ /dev/null @@ -1,33 +0,0 @@ -# Makefile for fuzzing -# -# Use go-fuzz and needs the tools installed. -# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/ -# -# Installing go-fuzz: -# $ make -f Makefile.fuzz get -# Installs: -# * github.com/dvyukov/go-fuzz/go-fuzz -# * get github.com/dvyukov/go-fuzz/go-fuzz-build - -all: build - -.PHONY: build -build: - go-fuzz-build -tags fuzz github.com/miekg/dns - -.PHONY: build-newrr -build-newrr: - go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns - -.PHONY: fuzz -fuzz: - go-fuzz -bin=dns-fuzz.zip -workdir=fuzz - -.PHONY: get -get: - go get github.com/dvyukov/go-fuzz/go-fuzz - go get github.com/dvyukov/go-fuzz/go-fuzz-build - -.PHONY: clean -clean: - rm *-fuzz.zip diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release deleted file mode 100644 index 8fb748e8a..000000000 --- a/vendor/github.com/miekg/dns/Makefile.release +++ /dev/null @@ -1,52 +0,0 @@ -# Makefile for releasing. -# -# The release is controlled from version.go. The version found there is -# used to tag the git repo, we're not building any artifects so there is nothing -# to upload to github. -# -# * Up the version in version.go -# * Run: make -f Makefile.release release -# * will *commit* your change with 'Release $VERSION' -# * push to github -# - -define GO -//+build ignore - -package main - -import ( - "fmt" - - "github.com/miekg/dns" -) - -func main() { - fmt.Println(dns.Version.String()) -} -endef - -$(file > version_release.go,$(GO)) -VERSION:=$(shell go run version_release.go) -TAG="v$(VERSION)" - -all: - @echo Use the \'release\' target to start a release $(VERSION) - rm -f version_release.go - -.PHONY: release -release: commit push - @echo Released $(VERSION) - rm -f version_release.go - -.PHONY: commit -commit: - @echo Committing release $(VERSION) - git commit -am"Release $(VERSION)" - git tag $(TAG) - -.PHONY: push -push: - @echo Pushing release $(VERSION) to master - git push --tags - git push diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md deleted file mode 100644 index dc2a8228f..000000000 --- a/vendor/github.com/miekg/dns/README.md +++ /dev/null @@ -1,172 +0,0 @@ -[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) -[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns) -[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) - -# Alternative (more granular) approach to a DNS library - -> Less is more. - -Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types. -It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there -isn't a convenience function for it. Server side and client side programming is supported, i.e. you -can build servers and resolvers with it. - -We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, -avoiding breaking changes wherever reasonable. We support the last two versions of Go. - -# Goals - -* KISS; -* Fast; -* Small API. If it's easy to code in Go, don't make a function for it. - -# Users - -A not-so-up-to-date-list-that-may-be-actually-current: - -* https://github.com/coredns/coredns -* https://cloudflare.com -* https://github.com/abh/geodns -* http://www.statdns.com/ -* http://www.dnsinspect.com/ -* https://github.com/chuangbo/jianbing-dictionary-dns -* http://www.dns-lg.com/ -* https://github.com/fcambus/rrda -* https://github.com/kenshinx/godns -* https://github.com/skynetservices/skydns -* https://github.com/hashicorp/consul -* https://github.com/DevelopersPL/godnsagent -* https://github.com/duedil-ltd/discodns -* https://github.com/StalkR/dns-reverse-proxy -* https://github.com/tianon/rawdns -* https://mesosphere.github.io/mesos-dns/ -* https://pulse.turbobytes.com/ -* https://github.com/fcambus/statzone -* https://github.com/benschw/dns-clb-go -* https://github.com/corny/dnscheck for -* https://namesmith.io -* https://github.com/miekg/unbound -* https://github.com/miekg/exdns -* https://dnslookup.org -* https://github.com/looterz/grimd -* https://github.com/phamhongviet/serf-dns -* https://github.com/mehrdadrad/mylg -* https://github.com/bamarni/dockness -* https://github.com/fffaraz/microdns -* http://kelda.io -* https://github.com/ipdcode/hades -* https://github.com/StackExchange/dnscontrol/ -* https://www.dnsperf.com/ -* https://dnssectest.net/ -* https://dns.apebits.com -* https://github.com/oif/apex -* https://github.com/jedisct1/dnscrypt-proxy -* https://github.com/jedisct1/rpdns -* https://github.com/xor-gate/sshfp -* https://github.com/rs/dnstrace -* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) -* https://github.com/semihalev/sdns -* https://render.com -* https://github.com/peterzen/goresolver - -Send pull request if you want to be listed here. - -# Features - -* UDP/TCP queries, IPv4 and IPv6 -* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported -* Fast -* Server side programming (mimicking the net/http package) -* Client side programming -* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519 -* EDNS0, NSID, Cookies -* AXFR/IXFR -* TSIG, SIG(0) -* DNS over TLS (DoT): encrypted connection between client and server over TCP -* DNS name compression - -Have fun! - -Miek Gieben - 2010-2012 - -DNS Authors 2012- - -# Building - -Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should -work: - - go get github.com/miekg/dns - go build github.com/miekg/dns - -## Examples - -A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc -github.com/miekg/dns`). - -Example programs can be found in the `github.com/miekg/exdns` repository. - -## Supported RFCs - -*all of them* - -* 103{4,5} - DNS standard -* 1348 - NSAP record (removed the record) -* 1982 - Serial Arithmetic -* 1876 - LOC record -* 1995 - IXFR -* 1996 - DNS notify -* 2136 - DNS Update (dynamic updates) -* 2181 - RRset definition - there is no RRset type though, just []RR -* 2537 - RSAMD5 DNS keys -* 2065 - DNSSEC (updated in later RFCs) -* 2671 - EDNS record -* 2782 - SRV record -* 2845 - TSIG record -* 2915 - NAPTR record -* 2929 - DNS IANA Considerations -* 3110 - RSASHA1 DNS keys -* 3225 - DO bit (DNSSEC OK) -* 340{1,2,3} - NAPTR record -* 3445 - Limiting the scope of (DNS)KEY -* 3597 - Unknown RRs -* 403{3,4,5} - DNSSEC + validation functions -* 4255 - SSHFP record -* 4343 - Case insensitivity -* 4408 - SPF record -* 4509 - SHA256 Hash in DS -* 4592 - Wildcards in the DNS -* 4635 - HMAC SHA TSIG -* 4701 - DHCID -* 4892 - id.server -* 5001 - NSID -* 5155 - NSEC3 record -* 5205 - HIP record -* 5702 - SHA2 in the DNS -* 5936 - AXFR -* 5966 - TCP implementation recommendations -* 6605 - ECDSA -* 6725 - IANA Registry Update -* 6742 - ILNP DNS -* 6840 - Clarifications and Implementation Notes for DNS Security -* 6844 - CAA record -* 6891 - EDNS0 update -* 6895 - DNS IANA considerations -* 6975 - Algorithm Understanding in DNSSEC -* 7043 - EUI48/EUI64 records -* 7314 - DNS (EDNS) EXPIRE Option -* 7477 - CSYNC RR -* 7828 - edns-tcp-keepalive EDNS0 Option -* 7553 - URI record -* 7858 - DNS over TLS: Initiation and Performance Considerations -* 7871 - EDNS0 Client Subnet -* 7873 - Domain Name System (DNS) Cookies -* 8080 - EdDSA for DNSSEC -* 8499 - DNS Terminology - -## Loosely Based Upon - -* ldns - -* NSD - -* Net::DNS - -* GRONG - diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go deleted file mode 100644 index 78c076c25..000000000 --- a/vendor/github.com/miekg/dns/acceptfunc.go +++ /dev/null @@ -1,56 +0,0 @@ -package dns - -// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError. -// It returns a MsgAcceptAction to indicate what should happen with the message. -type MsgAcceptFunc func(dh Header) MsgAcceptAction - -// DefaultMsgAcceptFunc checks the request and will reject if: -// -// * isn't a request (don't respond in that case). -// * opcode isn't OpcodeQuery or OpcodeNotify -// * Zero bit isn't zero -// * has more than 1 question in the question section -// * has more than 1 RR in the Answer section -// * has more than 0 RRs in the Authority section -// * has more than 2 RRs in the Additional section -var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc - -// MsgAcceptAction represents the action to be taken. -type MsgAcceptAction int - -const ( - MsgAccept MsgAcceptAction = iota // Accept the message - MsgReject // Reject the message with a RcodeFormatError - MsgIgnore // Ignore the error and send nothing back. -) - -func defaultMsgAcceptFunc(dh Header) MsgAcceptAction { - if isResponse := dh.Bits&_QR != 0; isResponse { - return MsgIgnore - } - - // Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs. - opcode := int(dh.Bits>>11) & 0xF - if opcode != OpcodeQuery && opcode != OpcodeNotify { - return MsgReject - } - - if isZero := dh.Bits&_Z != 0; isZero { - return MsgReject - } - if dh.Qdcount != 1 { - return MsgReject - } - // NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11. - if dh.Ancount > 1 { - return MsgReject - } - // IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3. - if dh.Nscount > 1 { - return MsgReject - } - if dh.Arcount > 2 { - return MsgReject - } - return MsgAccept -} diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go deleted file mode 100644 index b14c8d908..000000000 --- a/vendor/github.com/miekg/dns/client.go +++ /dev/null @@ -1,417 +0,0 @@ -package dns - -// A client implementation. - -import ( - "context" - "crypto/tls" - "encoding/binary" - "fmt" - "io" - "net" - "strings" - "time" -) - -const ( - dnsTimeout time.Duration = 2 * time.Second - tcpIdleTimeout time.Duration = 8 * time.Second -) - -// A Conn represents a connection to a DNS server. -type Conn struct { - net.Conn // a net.Conn holding the connection - UDPSize uint16 // minimum receive buffer for UDP messages - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - tsigRequestMAC string -} - -// A Client defines parameters for a DNS client. -type Client struct { - Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) - UDPSize uint16 // minimum receive buffer for UDP messages - TLSConfig *tls.Config // TLS connection configuration - Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more - // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, - // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and - // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) - Timeout time.Duration - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass - group singleflight -} - -// Exchange performs a synchronous UDP query. It sends the message m to the address -// contained in a and waits for a reply. Exchange does not retry a failed query, nor -// will it fall back to TCP in case of truncation. -// See client.Exchange for more information on setting larger buffer sizes. -func Exchange(m *Msg, a string) (r *Msg, err error) { - client := Client{Net: "udp"} - r, _, err = client.Exchange(m, a) - return r, err -} - -func (c *Client) dialTimeout() time.Duration { - if c.Timeout != 0 { - return c.Timeout - } - if c.DialTimeout != 0 { - return c.DialTimeout - } - return dnsTimeout -} - -func (c *Client) readTimeout() time.Duration { - if c.ReadTimeout != 0 { - return c.ReadTimeout - } - return dnsTimeout -} - -func (c *Client) writeTimeout() time.Duration { - if c.WriteTimeout != 0 { - return c.WriteTimeout - } - return dnsTimeout -} - -// Dial connects to the address on the named network. -func (c *Client) Dial(address string) (conn *Conn, err error) { - // create a new dialer with the appropriate timeout - var d net.Dialer - if c.Dialer == nil { - d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())} - } else { - d = *c.Dialer - } - - network := c.Net - if network == "" { - network = "udp" - } - - useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls") - - conn = new(Conn) - if useTLS { - network = strings.TrimSuffix(network, "-tls") - - conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) - } else { - conn.Conn, err = d.Dial(network, address) - } - if err != nil { - return nil, err - } - - return conn, nil -} - -// Exchange performs a synchronous query. It sends the message m to the address -// contained in a and waits for a reply. Basic use pattern with a *dns.Client: -// -// c := new(dns.Client) -// in, rtt, err := c.Exchange(message, "127.0.0.1:53") -// -// Exchange does not retry a failed query, nor will it fall back to TCP in -// case of truncation. -// It is up to the caller to create a message that allows for larger responses to be -// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger -// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit -// of 512 bytes -// To specify a local address or a timeout, the caller has to set the `Client.Dialer` -// attribute appropriately -func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - return c.exchange(m, address) - } - - q := m.Question[0] - key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) - r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { - return c.exchange(m, address) - }) - if r != nil && shared { - r = r.Copy() - } - - return r, rtt, err -} - -func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var co *Conn - - co, err = c.Dial(a) - - if err != nil { - return nil, 0, err - } - defer co.Close() - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - // Otherwise use the client's configured UDP size. - if opt == nil && c.UDPSize >= MinMsgSize { - co.UDPSize = c.UDPSize - } - - co.TsigSecret = c.TsigSecret - t := time.Now() - // write with the appropriate write timeout - co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout()))) - if err = co.WriteMsg(m); err != nil { - return nil, 0, err - } - - co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - rtt = time.Since(t) - return r, rtt, err -} - -// ReadMsg reads a message from the connection co. -// If the received message contains a TSIG record the transaction signature -// is verified. This method always tries to return the message, however if an -// error is returned there are no guarantees that the returned message is a -// valid representation of the packet read. -func (co *Conn) ReadMsg() (*Msg, error) { - p, err := co.ReadMsgHeader(nil) - if err != nil { - return nil, err - } - - m := new(Msg) - if err := m.Unpack(p); err != nil { - // If an error was returned, we still want to allow the user to use - // the message, but naively they can just check err if they don't want - // to use an erroneous message - return m, err - } - if t := m.IsTsig(); t != nil { - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - } - return m, err -} - -// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). -// Returns message as a byte slice to be parsed with Msg.Unpack later on. -// Note that error handling on the message body is not possible as only the header is parsed. -func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { - var ( - p []byte - n int - err error - ) - switch co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - var length uint16 - if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil { - return nil, err - } - - p = make([]byte, length) - n, err = io.ReadFull(co.Conn, p) - default: - if co.UDPSize > MinMsgSize { - p = make([]byte, co.UDPSize) - } else { - p = make([]byte, MinMsgSize) - } - n, err = co.Read(p) - } - - if err != nil { - return nil, err - } else if n < headerSize { - return nil, ErrShortRead - } - - p = p[:n] - if hdr != nil { - dh, _, err := unpackMsgHdr(p, 0) - if err != nil { - return nil, err - } - *hdr = dh - } - return p, err -} - -// Read implements the net.Conn read method. -func (co *Conn) Read(p []byte) (n int, err error) { - if co.Conn == nil { - return 0, ErrConnEmpty - } - - switch co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - var length uint16 - if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil { - return 0, err - } - if int(length) > len(p) { - return 0, io.ErrShortBuffer - } - - return io.ReadFull(co.Conn, p[:length]) - } - - // UDP connection - return co.Conn.Read(p) -} - -// WriteMsg sends a message through the connection co. -// If the message m contains a TSIG record the transaction -// signature is calculated. -func (co *Conn) WriteMsg(m *Msg) (err error) { - var out []byte - if t := m.IsTsig(); t != nil { - mac := "" - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return ErrSecret - } - out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - // Set for the next read, although only used in zone transfers - co.tsigRequestMAC = mac - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - _, err = co.Write(out) - return err -} - -// Write implements the net.Conn Write method. -func (co *Conn) Write(p []byte) (n int, err error) { - switch co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - if len(p) > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - - l := make([]byte, 2) - binary.BigEndian.PutUint16(l, uint16(len(p))) - - n, err := (&net.Buffers{l, p}).WriteTo(co.Conn) - return int(n), err - } - - return co.Conn.Write(p) -} - -// Return the appropriate timeout for a specific request -func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration { - var requestTimeout time.Duration - if c.Timeout != 0 { - requestTimeout = c.Timeout - } else { - requestTimeout = timeout - } - // net.Dialer.Timeout has priority if smaller than the timeouts computed so - // far - if c.Dialer != nil && c.Dialer.Timeout != 0 { - if c.Dialer.Timeout < requestTimeout { - requestTimeout = c.Dialer.Timeout - } - } - return requestTimeout -} - -// Dial connects to the address on the named network. -func Dial(network, address string) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.Dial(network, address) - if err != nil { - return nil, err - } - return conn, nil -} - -// ExchangeContext performs a synchronous UDP query, like Exchange. It -// additionally obeys deadlines from the passed Context. -func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { - client := Client{Net: "udp"} - r, _, err = client.ExchangeContext(ctx, m, a) - // ignorint rtt to leave the original ExchangeContext API unchanged, but - // this function will go away - return r, err -} - -// ExchangeConn performs a synchronous query. It sends the message m via the connection -// c and waits for a reply. The connection c is not closed by ExchangeConn. -// Deprecated: This function is going away, but can easily be mimicked: -// -// co := &dns.Conn{Conn: c} // c is your net.Conn -// co.WriteMsg(m) -// in, _ := co.ReadMsg() -// co.Close() -// -func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { - println("dns: ExchangeConn: this function is deprecated") - co := new(Conn) - co.Conn = c - if err = co.WriteMsg(m); err != nil { - return nil, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// DialTimeout acts like Dial but takes a timeout. -func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { - client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} - return client.Dial(address) -} - -// DialWithTLS connects to the address on the named network with TLS. -func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { - if !strings.HasSuffix(network, "-tls") { - network += "-tls" - } - client := Client{Net: network, TLSConfig: tlsConfig} - return client.Dial(address) -} - -// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. -func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { - if !strings.HasSuffix(network, "-tls") { - network += "-tls" - } - client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} - return client.Dial(address) -} - -// ExchangeContext acts like Exchange, but honors the deadline on the provided -// context, if present. If there is both a context deadline and a configured -// timeout on the client, the earliest of the two takes effect. -func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var timeout time.Duration - if deadline, ok := ctx.Deadline(); !ok { - timeout = 0 - } else { - timeout = time.Until(deadline) - } - // not passing the context to the underlying calls, as the API does not support - // context. For timeouts you should set up Client.Dialer and call Client.Exchange. - // TODO(tmthrgd,miekg): this is a race condition. - c.Dialer = &net.Dialer{Timeout: timeout} - return c.Exchange(m, a) -} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go deleted file mode 100644 index e11b630df..000000000 --- a/vendor/github.com/miekg/dns/clientconfig.go +++ /dev/null @@ -1,135 +0,0 @@ -package dns - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" -) - -// ClientConfig wraps the contents of the /etc/resolv.conf file. -type ClientConfig struct { - Servers []string // servers to use - Search []string // suffixes to append to local name - Port string // what port to use - Ndots int // number of dots in name to trigger absolute lookup - Timeout int // seconds before giving up on packet - Attempts int // lost packets before giving up on server, not used in the package dns -} - -// ClientConfigFromFile parses a resolv.conf(5) like file and returns -// a *ClientConfig. -func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { - file, err := os.Open(resolvconf) - if err != nil { - return nil, err - } - defer file.Close() - return ClientConfigFromReader(file) -} - -// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument -func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { - c := new(ClientConfig) - scanner := bufio.NewScanner(resolvconf) - c.Servers = make([]string, 0) - c.Search = make([]string, 0) - c.Port = "53" - c.Ndots = 1 - c.Timeout = 5 - c.Attempts = 2 - - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return nil, err - } - line := scanner.Text() - f := strings.Fields(line) - if len(f) < 1 { - continue - } - switch f[0] { - case "nameserver": // add one name server - if len(f) > 1 { - // One more check: make sure server name is - // just an IP address. Otherwise we need DNS - // to look it up. - name := f[1] - c.Servers = append(c.Servers, name) - } - - case "domain": // set search path to just this domain - if len(f) > 1 { - c.Search = make([]string, 1) - c.Search[0] = f[1] - } else { - c.Search = make([]string, 0) - } - - case "search": // set search path to given servers - c.Search = append([]string(nil), f[1:]...) - - case "options": // magic options - for _, s := range f[1:] { - switch { - case len(s) >= 6 && s[:6] == "ndots:": - n, _ := strconv.Atoi(s[6:]) - if n < 0 { - n = 0 - } else if n > 15 { - n = 15 - } - c.Ndots = n - case len(s) >= 8 && s[:8] == "timeout:": - n, _ := strconv.Atoi(s[8:]) - if n < 1 { - n = 1 - } - c.Timeout = n - case len(s) >= 9 && s[:9] == "attempts:": - n, _ := strconv.Atoi(s[9:]) - if n < 1 { - n = 1 - } - c.Attempts = n - case s == "rotate": - /* not imp */ - } - } - } - } - return c, nil -} - -// NameList returns all of the names that should be queried based on the -// config. It is based off of go's net/dns name building, but it does not -// check the length of the resulting names. -func (c *ClientConfig) NameList(name string) []string { - // if this domain is already fully qualified, no append needed. - if IsFqdn(name) { - return []string{name} - } - - // Check to see if the name has more labels than Ndots. Do this before making - // the domain fully qualified. - hasNdots := CountLabel(name) > c.Ndots - // Make the domain fully qualified. - name = Fqdn(name) - - // Make a list of names based off search. - names := []string{} - - // If name has enough dots, try that first. - if hasNdots { - names = append(names, name) - } - for _, s := range c.Search { - names = append(names, Fqdn(name+s)) - } - // If we didn't have enough dots, try after suffixes. - if !hasNdots { - names = append(names, name) - } - return names -} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go deleted file mode 100644 index 8c4a14ef1..000000000 --- a/vendor/github.com/miekg/dns/dane.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/hex" - "errors" -) - -// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. -func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { - switch matchingType { - case 0: - switch selector { - case 0: - return hex.EncodeToString(cert.Raw), nil - case 1: - return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil - } - case 1: - h := sha256.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - case 2: - h := sha512.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - } - return "", errors.New("dns: bad MatchingType or Selector") -} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go deleted file mode 100644 index b059f6fc6..000000000 --- a/vendor/github.com/miekg/dns/defaults.go +++ /dev/null @@ -1,378 +0,0 @@ -package dns - -import ( - "errors" - "net" - "strconv" - "strings" -) - -const hexDigit = "0123456789abcdef" - -// Everything is assumed in ClassINET. - -// SetReply creates a reply message from a request message. -func (dns *Msg) SetReply(request *Msg) *Msg { - dns.Id = request.Id - dns.Response = true - dns.Opcode = request.Opcode - if dns.Opcode == OpcodeQuery { - dns.RecursionDesired = request.RecursionDesired // Copy rd bit - dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit - } - dns.Rcode = RcodeSuccess - if len(request.Question) > 0 { - dns.Question = make([]Question, 1) - dns.Question[0] = request.Question[0] - } - return dns -} - -// SetQuestion creates a question message, it sets the Question -// section, generates an Id and sets the RecursionDesired (RD) -// bit to true. -func (dns *Msg) SetQuestion(z string, t uint16) *Msg { - dns.Id = Id() - dns.RecursionDesired = true - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, t, ClassINET} - return dns -} - -// SetNotify creates a notify message, it sets the Question -// section, generates an Id and sets the Authoritative (AA) -// bit to true. -func (dns *Msg) SetNotify(z string) *Msg { - dns.Opcode = OpcodeNotify - dns.Authoritative = true - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetRcode creates an error message suitable for the request. -func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { - dns.SetReply(request) - dns.Rcode = rcode - return dns -} - -// SetRcodeFormatError creates a message with FormError set. -func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { - dns.Rcode = RcodeFormatError - dns.Opcode = OpcodeQuery - dns.Response = true - dns.Authoritative = false - dns.Id = request.Id - return dns -} - -// SetUpdate makes the message a dynamic update message. It -// sets the ZONE section to: z, TypeSOA, ClassINET. -func (dns *Msg) SetUpdate(z string) *Msg { - dns.Id = Id() - dns.Response = false - dns.Opcode = OpcodeUpdate - dns.Compress = false // BIND9 cannot handle compression - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetIxfr creates message for requesting an IXFR. -func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Ns = make([]RR, 1) - s := new(SOA) - s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} - s.Serial = serial - s.Ns = ns - s.Mbox = mbox - dns.Question[0] = Question{z, TypeIXFR, ClassINET} - dns.Ns[0] = s - return dns -} - -// SetAxfr creates message for requesting an AXFR. -func (dns *Msg) SetAxfr(z string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeAXFR, ClassINET} - return dns -} - -// SetTsig appends a TSIG RR to the message. -// This is only a skeleton TSIG RR that is added as the last RR in the -// additional section. The Tsig is calculated when the message is being send. -func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { - t := new(TSIG) - t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} - t.Algorithm = algo - t.Fudge = fudge - t.TimeSigned = uint64(timesigned) - t.OrigId = dns.Id - dns.Extra = append(dns.Extra, t) - return dns -} - -// SetEdns0 appends a EDNS0 OPT RR to the message. -// TSIG should always the last RR in a message. -func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { - e := new(OPT) - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - e.SetUDPSize(udpsize) - if do { - e.SetDo() - } - dns.Extra = append(dns.Extra, e) - return dns -} - -// IsTsig checks if the message has a TSIG record as the last record -// in the additional section. It returns the TSIG record found or nil. -func (dns *Msg) IsTsig() *TSIG { - if len(dns.Extra) > 0 { - if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { - return dns.Extra[len(dns.Extra)-1].(*TSIG) - } - } - return nil -} - -// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 -// record in the additional section will do. It returns the OPT record -// found or nil. -func (dns *Msg) IsEdns0() *OPT { - // RFC 6891, Section 6.1.1 allows the OPT record to appear - // anywhere in the additional record section, but it's usually at - // the end so start there. - for i := len(dns.Extra) - 1; i >= 0; i-- { - if dns.Extra[i].Header().Rrtype == TypeOPT { - return dns.Extra[i].(*OPT) - } - } - return nil -} - -// popEdns0 is like IsEdns0, but it removes the record from the message. -func (dns *Msg) popEdns0() *OPT { - // RFC 6891, Section 6.1.1 allows the OPT record to appear - // anywhere in the additional record section, but it's usually at - // the end so start there. - for i := len(dns.Extra) - 1; i >= 0; i-- { - if dns.Extra[i].Header().Rrtype == TypeOPT { - opt := dns.Extra[i].(*OPT) - dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...) - return opt - } - } - return nil -} - -// IsDomainName checks if s is a valid domain name, it returns the number of -// labels and true, when a domain name is valid. Note that non fully qualified -// domain name is considered valid, in this case the last label is counted in -// the number of labels. When false is returned the number of labels is not -// defined. Also note that this function is extremely liberal; almost any -// string is a valid domain name as the DNS is 8 bit protocol. It checks if each -// label fits in 63 characters and that the entire name will fit into the 255 -// octet wire format limit. -func IsDomainName(s string) (labels int, ok bool) { - // XXX: The logic in this function was copied from packDomainName and - // should be kept in sync with that function. - - const lenmsg = 256 - - if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata. - return 0, false - } - - s = Fqdn(s) - - // Each dot ends a segment of the name. Except for escaped dots (\.), which - // are normal dots. - - var ( - off int - begin int - wasDot bool - ) - for i := 0; i < len(s); i++ { - switch s[i] { - case '\\': - if off+1 > lenmsg { - return labels, false - } - - // check for \DDD - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { - i += 3 - begin += 3 - } else { - i++ - begin++ - } - - wasDot = false - case '.': - if wasDot { - // two dots back to back is not legal - return labels, false - } - wasDot = true - - labelLen := i - begin - if labelLen >= 1<<6 { // top two bits of length must be clear - return labels, false - } - - // off can already (we're in a loop) be bigger than lenmsg - // this happens when a name isn't fully qualified - off += 1 + labelLen - if off > lenmsg { - return labels, false - } - - labels++ - begin = i + 1 - default: - wasDot = false - } - } - - return labels, true -} - -// IsSubDomain checks if child is indeed a child of the parent. If child and parent -// are the same domain true is returned as well. -func IsSubDomain(parent, child string) bool { - // Entire child is contained in parent - return CompareDomainName(parent, child) == CountLabel(parent) -} - -// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. -// The checking is performed on the binary payload. -func IsMsg(buf []byte) error { - // Header - if len(buf) < headerSize { - return errors.New("dns: bad message header") - } - // Header: Opcode - // TODO(miek): more checks here, e.g. check all header bits. - return nil -} - -// IsFqdn checks if a domain name is fully qualified. -func IsFqdn(s string) bool { - s2 := strings.TrimSuffix(s, ".") - if s == s2 { - return false - } - - i := strings.LastIndexFunc(s2, func(r rune) bool { - return r != '\\' - }) - - // Test whether we have an even number of escape sequences before - // the dot or none. - return (len(s2)-i)%2 != 0 -} - -// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. -// This means the RRs need to have the same type, name, and class. Returns true -// if the RR set is valid, otherwise false. -func IsRRset(rrset []RR) bool { - if len(rrset) == 0 { - return false - } - if len(rrset) == 1 { - return true - } - rrHeader := rrset[0].Header() - rrType := rrHeader.Rrtype - rrClass := rrHeader.Class - rrName := rrHeader.Name - - for _, rr := range rrset[1:] { - curRRHeader := rr.Header() - if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { - // Mismatch between the records, so this is not a valid rrset for - //signing/verifying - return false - } - } - - return true -} - -// Fqdn return the fully qualified domain name from s. -// If s is already fully qualified, it behaves as the identity function. -func Fqdn(s string) string { - if IsFqdn(s) { - return s - } - return s + "." -} - -// Copied from the official Go code. - -// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP -// address suitable for reverse DNS (PTR) record lookups or an error if it fails -// to parse the IP address. -func ReverseAddr(addr string) (arpa string, err error) { - ip := net.ParseIP(addr) - if ip == nil { - return "", &Error{err: "unrecognized address: " + addr} - } - if v4 := ip.To4(); v4 != nil { - buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa.")) - // Add it, in reverse, to the buffer - for i := len(v4) - 1; i >= 0; i-- { - buf = strconv.AppendInt(buf, int64(v4[i]), 10) - buf = append(buf, '.') - } - // Append "in-addr.arpa." and return (buf already has the final .) - buf = append(buf, "in-addr.arpa."...) - return string(buf), nil - } - // Must be IPv6 - buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa.")) - // Add it, in reverse, to the buffer - for i := len(ip) - 1; i >= 0; i-- { - v := ip[i] - buf = append(buf, hexDigit[v&0xF]) - buf = append(buf, '.') - buf = append(buf, hexDigit[v>>4]) - buf = append(buf, '.') - } - // Append "ip6.arpa." and return (buf already has the final .) - buf = append(buf, "ip6.arpa."...) - return string(buf), nil -} - -// String returns the string representation for the type t. -func (t Type) String() string { - if t1, ok := TypeToString[uint16(t)]; ok { - return t1 - } - return "TYPE" + strconv.Itoa(int(t)) -} - -// String returns the string representation for the class c. -func (c Class) String() string { - if s, ok := ClassToString[uint16(c)]; ok { - // Only emit mnemonics when they are unambiguous, specically ANY is in both. - if _, ok := StringToType[s]; !ok { - return s - } - } - return "CLASS" + strconv.Itoa(int(c)) -} - -// String returns the string representation for the name n. -func (n Name) String() string { - return sprintName(string(n)) -} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go deleted file mode 100644 index f57337b89..000000000 --- a/vendor/github.com/miekg/dns/dns.go +++ /dev/null @@ -1,134 +0,0 @@ -package dns - -import "strconv" - -const ( - year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. - defaultTtl = 3600 // Default internal TTL. - - // DefaultMsgSize is the standard default for messages larger than 512 bytes. - DefaultMsgSize = 4096 - // MinMsgSize is the minimal size of a DNS packet. - MinMsgSize = 512 - // MaxMsgSize is the largest possible DNS packet. - MaxMsgSize = 65535 -) - -// Error represents a DNS error. -type Error struct{ err string } - -func (e *Error) Error() string { - if e == nil { - return "dns: " - } - return "dns: " + e.err -} - -// An RR represents a resource record. -type RR interface { - // Header returns the header of an resource record. The header contains - // everything up to the rdata. - Header() *RR_Header - // String returns the text representation of the resource record. - String() string - - // copy returns a copy of the RR - copy() RR - - // len returns the length (in octets) of the compressed or uncompressed RR in wire format. - // - // If compression is nil, the uncompressed size will be returned, otherwise the compressed - // size will be returned and domain names will be added to the map for future compression. - len(off int, compression map[string]struct{}) int - - // pack packs the records RDATA into wire format. The header will - // already have been packed into msg. - pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) - - // unpack unpacks an RR from wire format. - // - // This will only be called on a new and empty RR type with only the header populated. It - // will only be called if the record's RDATA is non-empty. - unpack(msg []byte, off int) (off1 int, err error) - - // parse parses an RR from zone file format. - // - // This will only be called on a new and empty RR type with only the header populated. - parse(c *zlexer, origin, file string) *ParseError - - // isDuplicate returns whether the two RRs are duplicates. - isDuplicate(r2 RR) bool -} - -// RR_Header is the header all DNS resource records share. -type RR_Header struct { - Name string `dns:"cdomain-name"` - Rrtype uint16 - Class uint16 - Ttl uint32 - Rdlength uint16 // Length of data after header. -} - -// Header returns itself. This is here to make RR_Header implements the RR interface. -func (h *RR_Header) Header() *RR_Header { return h } - -// Just to implement the RR interface. -func (h *RR_Header) copy() RR { return nil } - -func (h *RR_Header) String() string { - var s string - - if h.Rrtype == TypeOPT { - s = ";" - // and maybe other things - } - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += Class(h.Class).String() + "\t" - s += Type(h.Rrtype).String() + "\t" - return s -} - -func (h *RR_Header) len(off int, compression map[string]struct{}) int { - l := domainNameLen(h.Name, off, compression, true) - l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) - return l -} - -func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - // RR_Header has no RDATA to pack. - return off, nil -} - -func (h *RR_Header) unpack(msg []byte, off int) (int, error) { - panic("dns: internal error: unpack should never be called on RR_Header") -} - -func (h *RR_Header) parse(c *zlexer, origin, file string) *ParseError { - panic("dns: internal error: parse should never be called on RR_Header") -} - -// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. -func (rr *RFC3597) ToRFC3597(r RR) error { - buf := make([]byte, Len(r)*2) - headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false) - if err != nil { - return err - } - buf = buf[:off] - - *rr = RFC3597{Hdr: *r.Header()} - rr.Hdr.Rdlength = uint16(off - headerEnd) - - if noRdata(rr.Hdr) { - return nil - } - - _, err = rr.unpack(buf, headerEnd) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go deleted file mode 100644 index faf1dc659..000000000 --- a/vendor/github.com/miekg/dns/dnssec.go +++ /dev/null @@ -1,791 +0,0 @@ -package dns - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - _ "crypto/md5" - "crypto/rand" - "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/asn1" - "encoding/binary" - "encoding/hex" - "math/big" - "sort" - "strings" - "time" - - "golang.org/x/crypto/ed25519" -) - -// DNSSEC encryption algorithm codes. -const ( - _ uint8 = iota - RSAMD5 - DH - DSA - _ // Skip 4, RFC 6725, section 2.1 - RSASHA1 - DSANSEC3SHA1 - RSASHA1NSEC3SHA1 - RSASHA256 - _ // Skip 9, RFC 6725, section 2.1 - RSASHA512 - _ // Skip 11, RFC 6725, section 2.1 - ECCGOST - ECDSAP256SHA256 - ECDSAP384SHA384 - ED25519 - ED448 - INDIRECT uint8 = 252 - PRIVATEDNS uint8 = 253 // Private (experimental keys) - PRIVATEOID uint8 = 254 -) - -// AlgorithmToString is a map of algorithm IDs to algorithm names. -var AlgorithmToString = map[uint8]string{ - RSAMD5: "RSAMD5", - DH: "DH", - DSA: "DSA", - RSASHA1: "RSASHA1", - DSANSEC3SHA1: "DSA-NSEC3-SHA1", - RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", - RSASHA256: "RSASHA256", - RSASHA512: "RSASHA512", - ECCGOST: "ECC-GOST", - ECDSAP256SHA256: "ECDSAP256SHA256", - ECDSAP384SHA384: "ECDSAP384SHA384", - ED25519: "ED25519", - ED448: "ED448", - INDIRECT: "INDIRECT", - PRIVATEDNS: "PRIVATEDNS", - PRIVATEOID: "PRIVATEOID", -} - -// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. -var AlgorithmToHash = map[uint8]crypto.Hash{ - RSAMD5: crypto.MD5, // Deprecated in RFC 6725 - DSA: crypto.SHA1, - RSASHA1: crypto.SHA1, - RSASHA1NSEC3SHA1: crypto.SHA1, - RSASHA256: crypto.SHA256, - ECDSAP256SHA256: crypto.SHA256, - ECDSAP384SHA384: crypto.SHA384, - RSASHA512: crypto.SHA512, - ED25519: crypto.Hash(0), -} - -// DNSSEC hashing algorithm codes. -const ( - _ uint8 = iota - SHA1 // RFC 4034 - SHA256 // RFC 4509 - GOST94 // RFC 5933 - SHA384 // Experimental - SHA512 // Experimental -) - -// HashToString is a map of hash IDs to names. -var HashToString = map[uint8]string{ - SHA1: "SHA1", - SHA256: "SHA256", - GOST94: "GOST94", - SHA384: "SHA384", - SHA512: "SHA512", -} - -// DNSKEY flag values. -const ( - SEP = 1 - REVOKE = 1 << 7 - ZONE = 1 << 8 -) - -// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. -type rrsigWireFmt struct { - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - /* No Signature */ -} - -// Used for converting DNSKEY's rdata to wirefmt. -type dnskeyWireFmt struct { - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` - /* Nothing is left out */ -} - -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - -// KeyTag calculates the keytag (or key-id) of the DNSKEY. -func (k *DNSKEY) KeyTag() uint16 { - if k == nil { - return 0 - } - var keytag int - switch k.Algorithm { - case RSAMD5: - // Look at the bottom two bytes of the modules, which the last - // item in the pubkey. We could do this faster by looking directly - // at the base64 values. But I'm lazy. - modulus, _ := fromBase64([]byte(k.PublicKey)) - if len(modulus) > 1 { - x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) - keytag = int(x) - } - default: - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return 0 - } - wire = wire[:n] - for i, v := range wire { - if i&1 != 0 { - keytag += int(v) // must be larger than uint32 - } else { - keytag += int(v) << 8 - } - } - keytag += keytag >> 16 & 0xFFFF - keytag &= 0xFFFF - } - return uint16(keytag) -} - -// ToDS converts a DNSKEY record to a DS record. -func (k *DNSKEY) ToDS(h uint8) *DS { - if k == nil { - return nil - } - ds := new(DS) - ds.Hdr.Name = k.Hdr.Name - ds.Hdr.Class = k.Hdr.Class - ds.Hdr.Rrtype = TypeDS - ds.Hdr.Ttl = k.Hdr.Ttl - ds.Algorithm = k.Algorithm - ds.DigestType = h - ds.KeyTag = k.KeyTag() - - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return nil - } - wire = wire[:n] - - owner := make([]byte, 255) - off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) - if err1 != nil { - return nil - } - owner = owner[:off] - // RFC4034: - // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); - // "|" denotes concatenation - // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. - - var hash crypto.Hash - switch h { - case SHA1: - hash = crypto.SHA1 - case SHA256: - hash = crypto.SHA256 - case SHA384: - hash = crypto.SHA384 - case SHA512: - hash = crypto.SHA512 - default: - return nil - } - - s := hash.New() - s.Write(owner) - s.Write(wire) - ds.Digest = hex.EncodeToString(s.Sum(nil)) - return ds -} - -// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. -func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { - c := &CDNSKEY{DNSKEY: *k} - c.Hdr = k.Hdr - c.Hdr.Rrtype = TypeCDNSKEY - return c -} - -// ToCDS converts a DS record to a CDS record. -func (d *DS) ToCDS() *CDS { - c := &CDS{DS: *d} - c.Hdr = d.Hdr - c.Hdr.Rrtype = TypeCDS - return c -} - -// Sign signs an RRSet. The signature needs to be filled in with the values: -// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied -// from the RRset. Sign returns a non-nill error when the signing went OK. -// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non -// zero, it is used as-is, otherwise the TTL of the RRset is used as the -// OrigTTL. -func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { - if k == nil { - return ErrPrivKey - } - // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - h0 := rrset[0].Header() - rr.Hdr.Rrtype = TypeRRSIG - rr.Hdr.Name = h0.Name - rr.Hdr.Class = h0.Class - if rr.OrigTtl == 0 { // If set don't override - rr.OrigTtl = h0.Ttl - } - rr.TypeCovered = h0.Rrtype - rr.Labels = uint8(CountLabel(h0.Name)) - - if strings.HasPrefix(h0.Name, "*") { - rr.Labels-- // wildcard, remove from label count - } - - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - // For signing, lowercase this name - sigwire.SignerName = strings.ToLower(rr.SignerName) - - // Create the desired binary blob - signdata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signdata) - if err != nil { - return err - } - signdata = signdata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - switch rr.Algorithm { - case ED25519: - // ed25519 signs the raw message and performs hashing internally. - // All other supported signature schemes operate over the pre-hashed - // message, and thus ed25519 must be handled separately here. - // - // The raw message is passed directly into sign and crypto.Hash(0) is - // used to signal to the crypto.Signer that the data has not been hashed. - signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - default: - h := hash.New() - h.Write(signdata) - h.Write(wire) - - signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - } - - return nil -} - -func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { - signature, err := k.Sign(rand.Reader, hashed, hash) - if err != nil { - return nil, err - } - - switch alg { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: - return signature, nil - - case ECDSAP256SHA256, ECDSAP384SHA384: - ecdsaSignature := &struct { - R, S *big.Int - }{} - if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { - return nil, err - } - - var intlen int - switch alg { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - - signature := intToBytes(ecdsaSignature.R, intlen) - signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) - return signature, nil - - // There is no defined interface for what a DSA backed crypto.Signer returns - case DSA, DSANSEC3SHA1: - // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) - // signature := []byte{byte(t)} - // signature = append(signature, intToBytes(r1, 20)...) - // signature = append(signature, intToBytes(s1, 20)...) - // rr.Signature = signature - - case ED25519: - return signature, nil - } - - return nil, ErrAlg -} - -// Verify validates an RRSet with the signature and key. This is only the -// cryptographic test, the signature validity period must be checked separately. -// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. -func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { - // First the easy checks - if !IsRRset(rrset) { - return ErrRRset - } - if rr.KeyTag != k.KeyTag() { - return ErrKey - } - if rr.Hdr.Class != k.Hdr.Class { - return ErrKey - } - if rr.Algorithm != k.Algorithm { - return ErrKey - } - if !strings.EqualFold(rr.SignerName, k.Hdr.Name) { - return ErrKey - } - if k.Protocol != 3 { - return ErrKey - } - - // IsRRset checked that we have at least one RR and that the RRs in - // the set have consistent type, class, and name. Also check that type and - // class matches the RRSIG record. - if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered { - return ErrRRset - } - - // RFC 4035 5.3.2. Reconstructing the Signed Data - // Copy the sig, except the rrsig data - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - sigwire.SignerName = strings.ToLower(rr.SignerName) - // Create the desired binary blob - signeddata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signeddata) - if err != nil { - return err - } - signeddata = signeddata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(miek) - // remove the domain name and assume its ours? - } - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - switch rr.Algorithm { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: - // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? - pubkey := k.publicKeyRSA() // Get the key - if pubkey == nil { - return ErrKey - } - - h := hash.New() - h.Write(signeddata) - h.Write(wire) - return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) - - case ECDSAP256SHA256, ECDSAP384SHA384: - pubkey := k.publicKeyECDSA() - if pubkey == nil { - return ErrKey - } - - // Split sigbuf into the r and s coordinates - r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) - s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) - - h := hash.New() - h.Write(signeddata) - h.Write(wire) - if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { - return nil - } - return ErrSig - - case ED25519: - pubkey := k.publicKeyED25519() - if pubkey == nil { - return ErrKey - } - - if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) { - return nil - } - return ErrSig - - default: - return ErrAlg - } -} - -// ValidityPeriod uses RFC1982 serial arithmetic to calculate -// if a signature period is valid. If t is the zero time, the -// current time is taken other t is. Returns true if the signature -// is valid at the given time, otherwise returns false. -func (rr *RRSIG) ValidityPeriod(t time.Time) bool { - var utc int64 - if t.IsZero() { - utc = time.Now().UTC().Unix() - } else { - utc = t.UTC().Unix() - } - modi := (int64(rr.Inception) - utc) / year68 - mode := (int64(rr.Expiration) - utc) / year68 - ti := int64(rr.Inception) + modi*year68 - te := int64(rr.Expiration) + mode*year68 - return ti <= utc && utc <= te -} - -// Return the signatures base64 encodedig sigdata as a byte slice. -func (rr *RRSIG) sigBuf() []byte { - sigbuf, err := fromBase64([]byte(rr.Signature)) - if err != nil { - return nil - } - return sigbuf -} - -// publicKeyRSA returns the RSA public key from a DNSKEY record. -func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - - if len(keybuf) < 1+1+64 { - // Exponent must be at least 1 byte and modulus at least 64 - return nil - } - - // RFC 2537/3110, section 2. RSA Public KEY Resource Records - // Length is in the 0th byte, unless its zero, then it - // it in bytes 1 and 2 and its a 16 bit number - explen := uint16(keybuf[0]) - keyoff := 1 - if explen == 0 { - explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) - keyoff = 3 - } - - if explen > 4 || explen == 0 || keybuf[keyoff] == 0 { - // Exponent larger than supported by the crypto package, - // empty, or contains prohibited leading zero. - return nil - } - - modoff := keyoff + int(explen) - modlen := len(keybuf) - modoff - if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 { - // Modulus is too small, large, or contains prohibited leading zero. - return nil - } - - pubkey := new(rsa.PublicKey) - - var expo uint64 - // The exponent of length explen is between keyoff and modoff. - for _, v := range keybuf[keyoff:modoff] { - expo <<= 8 - expo |= uint64(v) - } - if expo > 1<<31-1 { - // Larger exponent than supported by the crypto package. - return nil - } - - pubkey.E = int(expo) - pubkey.N = new(big.Int).SetBytes(keybuf[modoff:]) - return pubkey -} - -// publicKeyECDSA returns the Curve public key from the DNSKEY record. -func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - pubkey := new(ecdsa.PublicKey) - switch k.Algorithm { - case ECDSAP256SHA256: - pubkey.Curve = elliptic.P256() - if len(keybuf) != 64 { - // wrongly encoded key - return nil - } - case ECDSAP384SHA384: - pubkey.Curve = elliptic.P384() - if len(keybuf) != 96 { - // Wrongly encoded key - return nil - } - } - pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2]) - pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:]) - return pubkey -} - -func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) < 22 { - return nil - } - t, keybuf := int(keybuf[0]), keybuf[1:] - size := 64 + t*8 - q, keybuf := keybuf[:20], keybuf[20:] - if len(keybuf) != 3*size { - return nil - } - p, keybuf := keybuf[:size], keybuf[size:] - g, y := keybuf[:size], keybuf[size:] - pubkey := new(dsa.PublicKey) - pubkey.Parameters.Q = new(big.Int).SetBytes(q) - pubkey.Parameters.P = new(big.Int).SetBytes(p) - pubkey.Parameters.G = new(big.Int).SetBytes(g) - pubkey.Y = new(big.Int).SetBytes(y) - return pubkey -} - -func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) != ed25519.PublicKeySize { - return nil - } - return keybuf -} - -type wireSlice [][]byte - -func (p wireSlice) Len() int { return len(p) } -func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p wireSlice) Less(i, j int) bool { - _, ioff, _ := UnpackDomainName(p[i], 0) - _, joff, _ := UnpackDomainName(p[j], 0) - return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 -} - -// Return the raw signature data. -func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { - wires := make(wireSlice, len(rrset)) - for i, r := range rrset { - r1 := r.copy() - h := r1.Header() - h.Ttl = s.OrigTtl - labels := SplitDomainName(h.Name) - // 6.2. Canonical RR Form. (4) - wildcards - if len(labels) > int(s.Labels) { - // Wildcard - h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." - } - // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase - h.Name = strings.ToLower(h.Name) - // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. - // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, - // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, - // SRV, DNAME, A6 - // - // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): - // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record - // that needs conversion to lowercase, and twice at that. Since HINFO - // records contain no domain names, they are not subject to case - // conversion. - switch x := r1.(type) { - case *NS: - x.Ns = strings.ToLower(x.Ns) - case *MD: - x.Md = strings.ToLower(x.Md) - case *MF: - x.Mf = strings.ToLower(x.Mf) - case *CNAME: - x.Target = strings.ToLower(x.Target) - case *SOA: - x.Ns = strings.ToLower(x.Ns) - x.Mbox = strings.ToLower(x.Mbox) - case *MB: - x.Mb = strings.ToLower(x.Mb) - case *MG: - x.Mg = strings.ToLower(x.Mg) - case *MR: - x.Mr = strings.ToLower(x.Mr) - case *PTR: - x.Ptr = strings.ToLower(x.Ptr) - case *MINFO: - x.Rmail = strings.ToLower(x.Rmail) - x.Email = strings.ToLower(x.Email) - case *MX: - x.Mx = strings.ToLower(x.Mx) - case *RP: - x.Mbox = strings.ToLower(x.Mbox) - x.Txt = strings.ToLower(x.Txt) - case *AFSDB: - x.Hostname = strings.ToLower(x.Hostname) - case *RT: - x.Host = strings.ToLower(x.Host) - case *SIG: - x.SignerName = strings.ToLower(x.SignerName) - case *PX: - x.Map822 = strings.ToLower(x.Map822) - x.Mapx400 = strings.ToLower(x.Mapx400) - case *NAPTR: - x.Replacement = strings.ToLower(x.Replacement) - case *KX: - x.Exchanger = strings.ToLower(x.Exchanger) - case *SRV: - x.Target = strings.ToLower(x.Target) - case *DNAME: - x.Target = strings.ToLower(x.Target) - } - // 6.2. Canonical RR Form. (5) - origTTL - wire := make([]byte, Len(r1)+1) // +1 to be safe(r) - off, err1 := PackRR(r1, wire, 0, nil, false) - if err1 != nil { - return nil, err1 - } - wire = wire[:off] - wires[i] = wire - } - sort.Sort(wires) - for i, wire := range wires { - if i > 0 && bytes.Equal(wire, wires[i-1]) { - continue - } - buf = append(buf, wire...) - } - return buf, nil -} - -func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go RRSIG packing - off, err := packUint16(sw.TypeCovered, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(sw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(sw.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(sw.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(sw.SignerName, msg, off, nil, false) - if err != nil { - return off, err - } - return off, nil -} - -func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { - // copied from zmsg.go DNSKEY packing - off, err := packUint16(dw.Flags, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(dw.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(dw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(dw.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go deleted file mode 100644 index 33e913ac5..000000000 --- a/vendor/github.com/miekg/dns/dnssec_keygen.go +++ /dev/null @@ -1,178 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "math/big" - - "golang.org/x/crypto/ed25519" -) - -// Generate generates a DNSKEY of the given bit size. -// The public part is put inside the DNSKEY record. -// The Algorithm in the key must be set as this will define -// what kind of DNSKEY will be generated. -// The ECDSA algorithms imply a fixed keysize, in that case -// bits should be set to the size of the algorithm. -func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - if bits != 1024 { - return nil, ErrKeySize - } - case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: - if bits < 512 || bits > 4096 { - return nil, ErrKeySize - } - case RSASHA512: - if bits < 1024 || bits > 4096 { - return nil, ErrKeySize - } - case ECDSAP256SHA256: - if bits != 256 { - return nil, ErrKeySize - } - case ECDSAP384SHA384: - if bits != 384 { - return nil, ErrKeySize - } - case ED25519: - if bits != 256 { - return nil, ErrKeySize - } - } - - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - params := new(dsa.Parameters) - if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { - return nil, err - } - priv := new(dsa.PrivateKey) - priv.PublicKey.Parameters = *params - err := dsa.GenerateKey(priv, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) - return priv, nil - case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: - priv, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) - return priv, nil - case ECDSAP256SHA256, ECDSAP384SHA384: - var c elliptic.Curve - switch k.Algorithm { - case ECDSAP256SHA256: - c = elliptic.P256() - case ECDSAP384SHA384: - c = elliptic.P384() - } - priv, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) - return priv, nil - case ED25519: - pub, priv, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyED25519(pub) - return priv, nil - default: - return nil, ErrAlg - } -} - -// Set the public key (the value E and N) -func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { - if _E == 0 || _N == nil { - return false - } - buf := exponentToBuf(_E) - buf = append(buf, _N.Bytes()...) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key for Elliptic Curves -func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { - if _X == nil || _Y == nil { - return false - } - var intlen int - switch k.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) - return true -} - -// Set the public key for DSA -func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { - if _Q == nil || _P == nil || _G == nil || _Y == nil { - return false - } - buf := dsaToBuf(_Q, _P, _G, _Y) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key for Ed25519 -func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool { - if _K == nil { - return false - } - k.PublicKey = toBase64(_K) - return true -} - -// Set the public key (the values E and N) for RSA -// RFC 3110: Section 2. RSA Public KEY Resource Records -func exponentToBuf(_E int) []byte { - var buf []byte - i := big.NewInt(int64(_E)).Bytes() - if len(i) < 256 { - buf = make([]byte, 1, 1+len(i)) - buf[0] = uint8(len(i)) - } else { - buf = make([]byte, 3, 3+len(i)) - buf[0] = 0 - buf[1] = uint8(len(i) >> 8) - buf[2] = uint8(len(i)) - } - buf = append(buf, i...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func curveToBuf(_X, _Y *big.Int, intlen int) []byte { - buf := intToBytes(_X, intlen) - buf = append(buf, intToBytes(_Y, intlen)...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { - t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) - buf := []byte{byte(t)} - buf = append(buf, intToBytes(_Q, 20)...) - buf = append(buf, intToBytes(_P, 64+t*8)...) - buf = append(buf, intToBytes(_G, 64+t*8)...) - buf = append(buf, intToBytes(_Y, 64+t*8)...) - return buf -} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go deleted file mode 100644 index e9dd69573..000000000 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ /dev/null @@ -1,352 +0,0 @@ -package dns - -import ( - "bufio" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "io" - "math/big" - "strconv" - "strings" - - "golang.org/x/crypto/ed25519" -) - -// NewPrivateKey returns a PrivateKey by parsing the string s. -// s should be in the same form of the BIND private key files. -func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { - if s == "" || s[len(s)-1] != '\n' { // We need a closing newline - return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") - } - return k.ReadPrivateKey(strings.NewReader(s), "") -} - -// ReadPrivateKey reads a private key from the io.Reader q. The string file is -// only used in error reporting. -// The public key must be known, because some cryptographic algorithms embed -// the public inside the privatekey. -func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { - m, err := parseKey(q, file) - if m == nil { - return nil, err - } - if _, ok := m["private-key-format"]; !ok { - return nil, ErrPrivKey - } - if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { - return nil, ErrPrivKey - } - // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) - if err != nil { - return nil, ErrPrivKey - } - switch uint8(algo) { - case DSA: - priv, err := readPrivateKeyDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case RSAMD5: - fallthrough - case RSASHA1: - fallthrough - case RSASHA1NSEC3SHA1: - fallthrough - case RSASHA256: - fallthrough - case RSASHA512: - priv, err := readPrivateKeyRSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyRSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case ECCGOST: - return nil, ErrPrivKey - case ECDSAP256SHA256: - fallthrough - case ECDSAP384SHA384: - priv, err := readPrivateKeyECDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyECDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case ED25519: - return readPrivateKeyED25519(m) - default: - return nil, ErrPrivKey - } -} - -// Read a private key (file) string and create a public key. Return the private key. -func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { - p := new(rsa.PrivateKey) - p.Primes = []*big.Int{nil, nil} - for k, v := range m { - switch k { - case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - switch k { - case "modulus": - p.PublicKey.N = new(big.Int).SetBytes(v1) - case "publicexponent": - i := new(big.Int).SetBytes(v1) - p.PublicKey.E = int(i.Int64()) // int64 should be large enough - case "privateexponent": - p.D = new(big.Int).SetBytes(v1) - case "prime1": - p.Primes[0] = new(big.Int).SetBytes(v1) - case "prime2": - p.Primes[1] = new(big.Int).SetBytes(v1) - } - case "exponent1", "exponent2", "coefficient": - // not used in Go (yet) - case "created", "publish", "activate": - // not used in Go (yet) - } - } - return p, nil -} - -func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { - p := new(dsa.PrivateKey) - p.X = new(big.Int) - for k, v := range m { - switch k { - case "private_value(x)": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.X.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { - p := new(ecdsa.PrivateKey) - p.D = new(big.Int) - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.D.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { - var p ed25519.PrivateKey - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - p1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - if len(p1) != ed25519.SeedSize { - return nil, ErrPrivKey - } - p = ed25519.NewKeyFromSeed(p1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -// parseKey reads a private key from r. It returns a map[string]string, -// with the key-value pairs, or an error when the file is not correct. -func parseKey(r io.Reader, file string) (map[string]string, error) { - m := make(map[string]string) - var k string - - c := newKLexer(r) - - for l, ok := c.Next(); ok; l, ok = c.Next() { - // It should alternate - switch l.value { - case zKey: - k = l.token - case zValue: - if k == "" { - return nil, &ParseError{file, "no private key seen", l} - } - - m[strings.ToLower(k)] = l.token - k = "" - } - } - - // Surface any read errors from r. - if err := c.Err(); err != nil { - return nil, &ParseError{file: file, err: err.Error()} - } - - return m, nil -} - -type klexer struct { - br io.ByteReader - - readErr error - - line int - column int - - key bool - - eol bool // end-of-line -} - -func newKLexer(r io.Reader) *klexer { - br, ok := r.(io.ByteReader) - if !ok { - br = bufio.NewReaderSize(r, 1024) - } - - return &klexer{ - br: br, - - line: 1, - - key: true, - } -} - -func (kl *klexer) Err() error { - if kl.readErr == io.EOF { - return nil - } - - return kl.readErr -} - -// readByte returns the next byte from the input -func (kl *klexer) readByte() (byte, bool) { - if kl.readErr != nil { - return 0, false - } - - c, err := kl.br.ReadByte() - if err != nil { - kl.readErr = err - return 0, false - } - - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if kl.eol { - kl.line++ - kl.column = 0 - kl.eol = false - } - - if c == '\n' { - kl.eol = true - } else { - kl.column++ - } - - return c, true -} - -func (kl *klexer) Next() (lex, bool) { - var ( - l lex - - str strings.Builder - - commt bool - ) - - for x, ok := kl.readByte(); ok; x, ok = kl.readByte() { - l.line, l.column = kl.line, kl.column - - switch x { - case ':': - if commt || !kl.key { - break - } - - kl.key = false - - // Next token is a space, eat it - kl.readByte() - - l.value = zKey - l.token = str.String() - return l, true - case ';': - commt = true - case '\n': - if commt { - // Reset a comment - commt = false - } - - if kl.key && str.Len() == 0 { - // ignore empty lines - break - } - - kl.key = true - - l.value = zValue - l.token = str.String() - return l, true - default: - if commt { - break - } - - str.WriteByte(x) - } - } - - if kl.readErr != nil && kl.readErr != io.EOF { - // Don't return any tokens after a read error occurs. - return lex{value: zEOF}, false - } - - if str.Len() > 0 { - // Send remainder - l.value = zValue - l.token = str.String() - return l, true - } - - return lex{value: zEOF}, false -} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go deleted file mode 100644 index 4493c9d57..000000000 --- a/vendor/github.com/miekg/dns/dnssec_privkey.go +++ /dev/null @@ -1,94 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "math/big" - "strconv" - - "golang.org/x/crypto/ed25519" -) - -const format = "Private-key-format: v1.3\n" - -var bigIntOne = big.NewInt(1) - -// PrivateKeyString converts a PrivateKey to a string. This string has the same -// format as the private-key-file of BIND9 (Private-key-format: v1.3). -// It needs some info from the key (the algorithm), so its a method of the DNSKEY -// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey -func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { - algorithm := strconv.Itoa(int(r.Algorithm)) - algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" - - switch p := p.(type) { - case *rsa.PrivateKey: - modulus := toBase64(p.PublicKey.N.Bytes()) - e := big.NewInt(int64(p.PublicKey.E)) - publicExponent := toBase64(e.Bytes()) - privateExponent := toBase64(p.D.Bytes()) - prime1 := toBase64(p.Primes[0].Bytes()) - prime2 := toBase64(p.Primes[1].Bytes()) - // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm - // and from: http://code.google.com/p/go/issues/detail?id=987 - p1 := new(big.Int).Sub(p.Primes[0], bigIntOne) - q1 := new(big.Int).Sub(p.Primes[1], bigIntOne) - exp1 := new(big.Int).Mod(p.D, p1) - exp2 := new(big.Int).Mod(p.D, q1) - coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0]) - - exponent1 := toBase64(exp1.Bytes()) - exponent2 := toBase64(exp2.Bytes()) - coefficient := toBase64(coeff.Bytes()) - - return format + - "Algorithm: " + algorithm + "\n" + - "Modulus: " + modulus + "\n" + - "PublicExponent: " + publicExponent + "\n" + - "PrivateExponent: " + privateExponent + "\n" + - "Prime1: " + prime1 + "\n" + - "Prime2: " + prime2 + "\n" + - "Exponent1: " + exponent1 + "\n" + - "Exponent2: " + exponent2 + "\n" + - "Coefficient: " + coefficient + "\n" - - case *ecdsa.PrivateKey: - var intlen int - switch r.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - private := toBase64(intToBytes(p.D, intlen)) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - - case *dsa.PrivateKey: - T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) - prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) - subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) - base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) - priv := toBase64(intToBytes(p.X, 20)) - pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) - return format + - "Algorithm: " + algorithm + "\n" + - "Prime(p): " + prime + "\n" + - "Subprime(q): " + subprime + "\n" + - "Base(g): " + base + "\n" + - "Private_value(x): " + priv + "\n" + - "Public_value(y): " + pub + "\n" - - case ed25519.PrivateKey: - private := toBase64(p.Seed()) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - - default: - return "" - } -} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go deleted file mode 100644 index d3d7cec9e..000000000 --- a/vendor/github.com/miekg/dns/doc.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Package dns implements a full featured interface to the Domain Name System. -Both server- and client-side programming is supported. The package allows -complete control over what is sent out to the DNS. The API follows the -less-is-more principle, by presenting a small, clean interface. - -It supports (asynchronous) querying/replying, incoming/outgoing zone transfers, -TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. - -Note that domain names MUST be fully qualified before sending them, unqualified -names in a message will result in a packing failure. - -Resource records are native types. They are not stored in wire format. Basic -usage pattern for creating a new resource record: - - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." - -Or directly from a string: - - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") - -Or when the default origin (.) and TTL (3600) and class (IN) suit you: - - mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") - -Or even: - - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") - -In the DNS messages are exchanged, these messages contain resource records -(sets). Use pattern for creating a message: - - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - -Or when not certain if the domain name is fully qualified: - - m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) - -The message m is now a message with the question section set to ask the MX -records for the miek.nl. zone. - -The following is slightly more verbose, but more flexible: - - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} - -After creating a message it can be sent. Basic use pattern for synchronous -querying the DNS at a server configured on 127.0.0.1 and port 53: - - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") - -Suppressing multiple outstanding queries (with the same question, type and -class) is as easy as setting: - - c.SingleInflight = true - -More advanced options are available using a net.Dialer and the corresponding API. -For example it is possible to set a timeout, or to specify a source IP address -and port to use for the connection: - - c := new(dns.Client) - laddr := net.UDPAddr{ - IP: net.ParseIP("[::1]"), - Port: 12345, - Zone: "", - } - c.Dialer := &net.Dialer{ - Timeout: 200 * time.Millisecond, - LocalAddr: &laddr, - } - in, rtt, err := c.Exchange(m1, "8.8.8.8:53") - -If these "advanced" features are not needed, a simple UDP query can be sent, -with: - - in, err := dns.Exchange(m1, "127.0.0.1:53") - -When this functions returns you will get dns message. A dns message consists -out of four sections. -The question section: in.Question, the answer section: in.Answer, -the authority section: in.Ns and the additional section: in.Extra. - -Each of these sections (except the Question section) contain a []RR. Basic -use pattern for accessing the rdata of a TXT RR as the first RR in -the Answer section: - - if t, ok := in.Answer[0].(*dns.TXT); ok { - // do something with t.Txt - } - -Domain Name and TXT Character String Representations - -Both domain names and TXT character strings are converted to presentation form -both when unpacked and when converted to strings. - -For TXT character strings, tabs, carriage returns and line feeds will be -converted to \t, \r and \n respectively. Back slashes and quotations marks will -be escaped. Bytes below 32 and above 127 will be converted to \DDD form. - -For domain names, in addition to the above rules brackets, periods, spaces, -semicolons and the at symbol are escaped. - -DNSSEC - -DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses -public key cryptography to sign resource records. The public keys are stored in -DNSKEY records and the signatures in RRSIG records. - -Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) -bit to a request. - - m := new(dns.Msg) - m.SetEdns0(4096, true) - -Signature generation, signature verification and key generation are all supported. - -DYNAMIC UPDATES - -Dynamic updates reuses the DNS message format, but renames three of the -sections. Question is Zone, Answer is Prerequisite, Authority is Update, only -the Additional is not renamed. See RFC 2136 for the gory details. - -You can set a rather complex set of rules for the existence of absence of -certain resource records or names in a zone to specify if resource records -should be added or removed. The table from RFC 2136 supplemented with the Go -DNS function shows which functions exist to specify the prerequisites. - - 3.2.4 - Table Of Metavalues Used In Prerequisite Section - - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used - -The prerequisite section can also be left empty. If you have decided on the -prerequisites you can tell what RRs should be added or deleted. The next table -shows the options you have and what functions to call. - - 3.4.2.6 - Table Of Metavalues Used In Update Section - - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert - -TRANSACTION SIGNATURE - -An TSIG or transaction signature adds a HMAC TSIG record to each message sent. -The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. - -Basic use pattern when querying with a TSIG name "axfr." (note that these key names -must be fully qualified - as they are domain names) and the base64 secret -"so6ZGir4GPAqINNh9U5c3A==": - -If an incoming message contains a TSIG record it MUST be the last record in -the additional section (RFC2845 3.2). This means that you should make the -call to SetTsig last, right before executing the query. If you make any -changes to the RRset after calling SetTsig() the signature will be incorrect. - - c := new(dns.Client) - c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - ... - // When sending the TSIG RR is calculated and filled in before sending - -When requesting an zone transfer (almost all TSIG usage is when requesting zone -transfers), with TSIG, this is the basic use pattern. In this example we -request an AXFR for miek.nl. with TSIG key named "axfr." and secret -"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54: - - t := new(dns.Transfer) - m := new(dns.Msg) - t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m.SetAxfr("miek.nl.") - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - c, err := t.In(m, "176.58.119.54:53") - for r := range c { ... } - -You can now read the records from the transfer as they come in. Each envelope -is checked with TSIG. If something is not correct an error is returned. - -Basic use pattern validating and replying to a message that has TSIG set. - - server := &dns.Server{Addr: ":53", Net: "udp"} - server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - go server.ListenAndServe() - dns.HandleFunc(".", handleRequest) - - func handleRequest(w dns.ResponseWriter, r *dns.Msg) { - m := new(dns.Msg) - m.SetReply(r) - if r.IsTsig() != nil { - if w.TsigStatus() == nil { - // *Msg r has an TSIG record and it was validated - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - } else { - // *Msg r has an TSIG records and it was not valided - } - } - w.WriteMsg(m) - } - -PRIVATE RRS - -RFC 6895 sets aside a range of type codes for private use. This range is 65,280 -- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these -can be used, before requesting an official type code from IANA. - -See https://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more -information. - -EDNS0 - -EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by -RFC 6891. It defines an new RR type, the OPT RR, which is then completely -abused. - -Basic use pattern for creating an (empty) OPT RR: - - o := new(dns.OPT) - o.Hdr.Name = "." // MUST be the root zone, per definition. - o.Hdr.Rrtype = dns.TypeOPT - -The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces. -Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and -EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note that these options -may be combined in an OPT RR. Basic use pattern for a server to check if (and -which) options are set: - - // o is a dns.OPT - for _, s := range o.Option { - switch e := s.(type) { - case *dns.EDNS0_NSID: - // do stuff with e.Nsid - case *dns.EDNS0_SUBNET: - // access e.Family, e.Address, etc. - } - } - -SIG(0) - -From RFC 2931: - - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. - -It works like TSIG, except that SIG(0) uses public key cryptography, instead of -the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256, -ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512. - -Signing subsequent messages in multi-message sessions is not implemented. -*/ -package dns diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/miekg/dns/duplicate.go deleted file mode 100644 index 00cda0aa2..000000000 --- a/vendor/github.com/miekg/dns/duplicate.go +++ /dev/null @@ -1,38 +0,0 @@ -package dns - -//go:generate go run duplicate_generate.go - -// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL. -// So this means the header data is equal *and* the RDATA is the same. Return true -// is so, otherwise false. -// It's is a protocol violation to have identical RRs in a message. -func IsDuplicate(r1, r2 RR) bool { - // Check whether the record header is identical. - if !r1.Header().isDuplicate(r2.Header()) { - return false - } - - // Check whether the RDATA is identical. - return r1.isDuplicate(r2) -} - -func (r1 *RR_Header) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*RR_Header) - if !ok { - return false - } - if r1.Class != r2.Class { - return false - } - if r1.Rrtype != r2.Rrtype { - return false - } - if !isDuplicateName(r1.Name, r2.Name) { - return false - } - // ignore TTL - return true -} - -// isDuplicateName checks if the domain names s1 and s2 are equal. -func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) } diff --git a/vendor/github.com/miekg/dns/duplicate_generate.go b/vendor/github.com/miekg/dns/duplicate_generate.go deleted file mode 100644 index 9b7a71b16..000000000 --- a/vendor/github.com/miekg/dns/duplicate_generate.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" -) - -var packageHdr = ` -// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. - -package dns - -` - -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - - if name == "PrivateRR" || name == "OPT" { - continue - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate the duplicate check for each type. - fmt.Fprint(b, "// isDuplicate() functions\n\n") - for _, name := range namedTypes { - - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name) - fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name) - fmt.Fprint(b, "if !ok { return false }\n") - fmt.Fprint(b, "_ = r2\n") - for i := 1; i < st.NumFields(); i++ { - field := st.Field(i).Name() - o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) } - o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) } - - // For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are - // *indirectly* defined as a slice in the net package). - if _, ok := st.Field(i).Type().(*types.Slice); ok { - o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}") - - if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` { - o3(`for i := 0; i < len(r1.%s); i++ { - if !isDuplicateName(r1.%s[i], r2.%s[i]) { - return false - } - }`) - - continue - } - - o3(`for i := 0; i < len(r1.%s); i++ { - if r1.%s[i] != r2.%s[i] { - return false - } - }`) - - continue - } - - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"a"`, `dns:"aaaa"`: - o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}") - case `dns:"cdomain-name"`, `dns:"domain-name"`: - o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}") - default: - o2("if r1.%s != r2.%s {\nreturn false\n}") - } - } - fmt.Fprintf(b, "return true\n}\n\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zduplicate.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go deleted file mode 100644 index ca8873e14..000000000 --- a/vendor/github.com/miekg/dns/edns.go +++ /dev/null @@ -1,659 +0,0 @@ -package dns - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "net" - "strconv" -) - -// EDNS0 Option codes. -const ( - EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 - EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt - EDNS0NSID = 0x3 // nsid (See RFC 5001) - EDNS0DAU = 0x5 // DNSSEC Algorithm Understood - EDNS0DHU = 0x6 // DS Hash Understood - EDNS0N3U = 0x7 // NSEC3 Hash Understood - EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871) - EDNS0EXPIRE = 0x9 // EDNS0 expire - EDNS0COOKIE = 0xa // EDNS0 Cookie - EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828) - EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830) - EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) - EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) - _DO = 1 << 15 // DNSSEC OK -) - -// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. -// See RFC 6891. -type OPT struct { - Hdr RR_Header - Option []EDNS0 `dns:"opt"` -} - -func (rr *OPT) String() string { - s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " - if rr.Do() { - s += "flags: do; " - } else { - s += "flags: ; " - } - s += "udp: " + strconv.Itoa(int(rr.UDPSize())) - - for _, o := range rr.Option { - switch o.(type) { - case *EDNS0_NSID: - s += "\n; NSID: " + o.String() - h, e := o.pack() - var r string - if e == nil { - for _, c := range h { - r += "(" + string(c) + ")" - } - s += " " + r - } - case *EDNS0_SUBNET: - s += "\n; SUBNET: " + o.String() - case *EDNS0_COOKIE: - s += "\n; COOKIE: " + o.String() - case *EDNS0_UL: - s += "\n; UPDATE LEASE: " + o.String() - case *EDNS0_LLQ: - s += "\n; LONG LIVED QUERIES: " + o.String() - case *EDNS0_DAU: - s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() - case *EDNS0_DHU: - s += "\n; DS HASH UNDERSTOOD: " + o.String() - case *EDNS0_N3U: - s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() - case *EDNS0_LOCAL: - s += "\n; LOCAL OPT: " + o.String() - case *EDNS0_PADDING: - s += "\n; PADDING: " + o.String() - } - } - return s -} - -func (rr *OPT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, o := range rr.Option { - l += 4 // Account for 2-byte option code and 2-byte option length. - lo, _ := o.pack() - l += len(lo) - } - return l -} - -func (rr *OPT) parse(c *zlexer, origin, file string) *ParseError { - panic("dns: internal error: parse should never be called on OPT") -} - -func (r1 *OPT) isDuplicate(r2 RR) bool { return false } - -// return the old value -> delete SetVersion? - -// Version returns the EDNS version used. Only zero is defined. -func (rr *OPT) Version() uint8 { - return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16) -} - -// SetVersion sets the version of EDNS. This is usually zero. -func (rr *OPT) SetVersion(v uint8) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16 -} - -// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). -func (rr *OPT) ExtendedRcode() int { - return int(rr.Hdr.Ttl&0xFF000000>>24) << 4 -} - -// SetExtendedRcode sets the EDNS extended RCODE field. -// -// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0. -func (rr *OPT) SetExtendedRcode(v uint16) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24 -} - -// UDPSize returns the UDP buffer size. -func (rr *OPT) UDPSize() uint16 { - return rr.Hdr.Class -} - -// SetUDPSize sets the UDP buffer size. -func (rr *OPT) SetUDPSize(size uint16) { - rr.Hdr.Class = size -} - -// Do returns the value of the DO (DNSSEC OK) bit. -func (rr *OPT) Do() bool { - return rr.Hdr.Ttl&_DO == _DO -} - -// SetDo sets the DO (DNSSEC OK) bit. -// If we pass an argument, set the DO bit to that value. -// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. -func (rr *OPT) SetDo(do ...bool) { - if len(do) == 1 { - if do[0] { - rr.Hdr.Ttl |= _DO - } else { - rr.Hdr.Ttl &^= _DO - } - } else { - rr.Hdr.Ttl |= _DO - } -} - -// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. -type EDNS0 interface { - // Option returns the option code for the option. - Option() uint16 - // pack returns the bytes of the option data. - pack() ([]byte, error) - // unpack sets the data as found in the buffer. Is also sets - // the length of the slice as the length of the option data. - unpack([]byte) error - // String returns the string representation of the option. - String() string - // copy returns a deep-copy of the option. - copy() EDNS0 -} - -// EDNS0_NSID option is used to retrieve a nameserver -// identifier. When sending a request Nsid must be set to the empty string -// The identifier is an opaque string encoded as hex. -// Basic use pattern for creating an nsid option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_NSID) -// e.Code = dns.EDNS0NSID -// e.Nsid = "AA" -// o.Option = append(o.Option, e) -type EDNS0_NSID struct { - Code uint16 // Always EDNS0NSID - Nsid string // This string needs to be hex encoded -} - -func (e *EDNS0_NSID) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Nsid) - if err != nil { - return nil, err - } - return h, nil -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. -func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } -func (e *EDNS0_NSID) String() string { return e.Nsid } -func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} } - -// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver -// an idea of where the client lives. See RFC 7871. It can then give back a different -// answer depending on the location or network topology. -// Basic use pattern for creating an subnet option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET -// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 -// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 -// e.SourceScope = 0 -// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 -// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 -// o.Option = append(o.Option, e) -// -// This code will parse all the available bits when unpacking (up to optlen). -// When packing it will apply SourceNetmask. If you need more advanced logic, -// patches welcome and good luck. -type EDNS0_SUBNET struct { - Code uint16 // Always EDNS0SUBNET - Family uint16 // 1 for IP, 2 for IP6 - SourceNetmask uint8 - SourceScope uint8 - Address net.IP -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET } - -func (e *EDNS0_SUBNET) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint16(b[0:], e.Family) - b[2] = e.SourceNetmask - b[3] = e.SourceScope - switch e.Family { - case 0: - // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 - // We might don't need to complain either - if e.SourceNetmask != 0 { - return nil, errors.New("dns: bad address family") - } - case 1: - if e.SourceNetmask > net.IPv4len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address.To4()) != net.IPv4len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - case 2: - if e.SourceNetmask > net.IPv6len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address) != net.IPv6len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - default: - return nil, errors.New("dns: bad address family") - } - return b, nil -} - -func (e *EDNS0_SUBNET) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Family = binary.BigEndian.Uint16(b) - e.SourceNetmask = b[2] - e.SourceScope = b[3] - switch e.Family { - case 0: - // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 - // It's okay to accept such a packet - if e.SourceNetmask != 0 { - return errors.New("dns: bad address family") - } - e.Address = net.IPv4(0, 0, 0, 0) - case 1: - if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { - return errors.New("dns: bad netmask") - } - addr := make(net.IP, net.IPv4len) - copy(addr, b[4:]) - e.Address = addr.To16() - case 2: - if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { - return errors.New("dns: bad netmask") - } - addr := make(net.IP, net.IPv6len) - copy(addr, b[4:]) - e.Address = addr - default: - return errors.New("dns: bad address family") - } - return nil -} - -func (e *EDNS0_SUBNET) String() (s string) { - if e.Address == nil { - s = "" - } else if e.Address.To4() != nil { - s = e.Address.String() - } else { - s = "[" + e.Address.String() + "]" - } - s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) - return -} - -func (e *EDNS0_SUBNET) copy() EDNS0 { - return &EDNS0_SUBNET{ - e.Code, - e.Family, - e.SourceNetmask, - e.SourceScope, - e.Address, - } -} - -// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_COOKIE) -// e.Code = dns.EDNS0COOKIE -// e.Cookie = "24a5ac.." -// o.Option = append(o.Option, e) -// -// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is -// always 8 bytes. It may then optionally be followed by the server cookie. The server -// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: -// -// cCookie := o.Cookie[:16] -// sCookie := o.Cookie[16:] -// -// There is no guarantee that the Cookie string has a specific length. -type EDNS0_COOKIE struct { - Code uint16 // Always EDNS0COOKIE - Cookie string // Hex-encoded cookie data -} - -func (e *EDNS0_COOKIE) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Cookie) - if err != nil { - return nil, err - } - return h, nil -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } -func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } -func (e *EDNS0_COOKIE) String() string { return e.Cookie } -func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} } - -// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set -// an expiration on an update RR. This is helpful for clients that cannot clean -// up after themselves. This is a draft RFC and more information can be found at -// http://files.dns-sd.org/draft-sekar-dns-ul.txt -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_UL) -// e.Code = dns.EDNS0UL -// e.Lease = 120 // in seconds -// o.Option = append(o.Option, e) -type EDNS0_UL struct { - Code uint16 // Always EDNS0UL - Lease uint32 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } -func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } -func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease} } - -// Copied: http://golang.org/src/pkg/net/dnsmsg.go -func (e *EDNS0_UL) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, e.Lease) - return b, nil -} - -func (e *EDNS0_UL) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Lease = binary.BigEndian.Uint32(b) - return nil -} - -// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 -// Implemented for completeness, as the EDNS0 type code is assigned. -type EDNS0_LLQ struct { - Code uint16 // Always EDNS0LLQ - Version uint16 - Opcode uint16 - Error uint16 - Id uint64 - LeaseLife uint32 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } - -func (e *EDNS0_LLQ) pack() ([]byte, error) { - b := make([]byte, 18) - binary.BigEndian.PutUint16(b[0:], e.Version) - binary.BigEndian.PutUint16(b[2:], e.Opcode) - binary.BigEndian.PutUint16(b[4:], e.Error) - binary.BigEndian.PutUint64(b[6:], e.Id) - binary.BigEndian.PutUint32(b[14:], e.LeaseLife) - return b, nil -} - -func (e *EDNS0_LLQ) unpack(b []byte) error { - if len(b) < 18 { - return ErrBuf - } - e.Version = binary.BigEndian.Uint16(b[0:]) - e.Opcode = binary.BigEndian.Uint16(b[2:]) - e.Error = binary.BigEndian.Uint16(b[4:]) - e.Id = binary.BigEndian.Uint64(b[6:]) - e.LeaseLife = binary.BigEndian.Uint32(b[14:]) - return nil -} - -func (e *EDNS0_LLQ) String() string { - s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + - " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) + - " " + strconv.FormatUint(uint64(e.LeaseLife), 10) - return s -} -func (e *EDNS0_LLQ) copy() EDNS0 { - return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife} -} - -// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. -type EDNS0_DAU struct { - Code uint16 // Always EDNS0DAU - AlgCode []uint8 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DAU) String() string { - s := "" - for _, alg := range e.AlgCode { - if a, ok := AlgorithmToString[alg]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(alg)) - } - } - return s -} -func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} } - -// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. -type EDNS0_DHU struct { - Code uint16 // Always EDNS0DHU - AlgCode []uint8 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DHU) String() string { - s := "" - for _, alg := range e.AlgCode { - if a, ok := HashToString[alg]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(alg)) - } - } - return s -} -func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} } - -// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. -type EDNS0_N3U struct { - Code uint16 // Always EDNS0N3U - AlgCode []uint8 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_N3U) String() string { - // Re-use the hash map - s := "" - for _, alg := range e.AlgCode { - if a, ok := HashToString[alg]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(alg)) - } - } - return s -} -func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} } - -// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. -type EDNS0_EXPIRE struct { - Code uint16 // Always EDNS0EXPIRE - Expire uint32 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } -func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } -func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} } - -func (e *EDNS0_EXPIRE) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, e.Expire) - return b, nil -} - -func (e *EDNS0_EXPIRE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Expire = binary.BigEndian.Uint32(b) - return nil -} - -// The EDNS0_LOCAL option is used for local/experimental purposes. The option -// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] -// (RFC6891), although any unassigned code can actually be used. The content of -// the option is made available in Data, unaltered. -// Basic use pattern for creating a local option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_LOCAL) -// e.Code = dns.EDNS0LOCALSTART -// e.Data = []byte{72, 82, 74} -// o.Option = append(o.Option, e) -type EDNS0_LOCAL struct { - Code uint16 - Data []byte -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } -func (e *EDNS0_LOCAL) String() string { - return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) -} -func (e *EDNS0_LOCAL) copy() EDNS0 { - b := make([]byte, len(e.Data)) - copy(b, e.Data) - return &EDNS0_LOCAL{e.Code, b} -} - -func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil -} - -func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } - return nil -} - -// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep -// the TCP connection alive. See RFC 7828. -type EDNS0_TCP_KEEPALIVE struct { - Code uint16 // Always EDNSTCPKEEPALIVE - Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; - Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } - -func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { - if e.Timeout != 0 && e.Length != 2 { - return nil, errors.New("dns: timeout specified but length is not 2") - } - if e.Timeout == 0 && e.Length != 0 { - return nil, errors.New("dns: timeout not specified but length is not 0") - } - b := make([]byte, 4+e.Length) - binary.BigEndian.PutUint16(b[0:], e.Code) - binary.BigEndian.PutUint16(b[2:], e.Length) - if e.Length == 2 { - binary.BigEndian.PutUint16(b[4:], e.Timeout) - } - return b, nil -} - -func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Length = binary.BigEndian.Uint16(b[2:4]) - if e.Length != 0 && e.Length != 2 { - return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) - } - if e.Length == 2 { - if len(b) < 6 { - return ErrBuf - } - e.Timeout = binary.BigEndian.Uint16(b[4:6]) - } - return nil -} - -func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { - s = "use tcp keep-alive" - if e.Length == 0 { - s += ", timeout omitted" - } else { - s += fmt.Sprintf(", timeout %dms", e.Timeout*100) - } - return -} -func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} } - -// EDNS0_PADDING option is used to add padding to a request/response. The default -// value of padding SHOULD be 0x0 but other values MAY be used, for instance if -// compression is applied before encryption which may break signatures. -type EDNS0_PADDING struct { - Padding []byte -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } -func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } -func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } -func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } -func (e *EDNS0_PADDING) copy() EDNS0 { - b := make([]byte, len(e.Padding)) - copy(b, e.Padding) - return &EDNS0_PADDING{b} -} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go deleted file mode 100644 index 0ec79f2fc..000000000 --- a/vendor/github.com/miekg/dns/format.go +++ /dev/null @@ -1,93 +0,0 @@ -package dns - -import ( - "net" - "reflect" - "strconv" -) - -// NumField returns the number of rdata fields r has. -func NumField(r RR) int { - return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header -} - -// Field returns the rdata field i as a string. Fields are indexed starting from 1. -// RR types that holds slice data, for instance the NSEC type bitmap will return a single -// string where the types are concatenated using a space. -// Accessing non existing fields will cause a panic. -func Field(r RR, i int) string { - if i == 0 { - return "" - } - d := reflect.ValueOf(r).Elem().Field(i) - switch d.Kind() { - case reflect.String: - return d.String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(d.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(d.Uint(), 10) - case reflect.Slice: - switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { - case `dns:"a"`: - // TODO(miek): Hmm store this as 16 bytes - if d.Len() < net.IPv4len { - return "" - } - if d.Len() < net.IPv6len { - return net.IPv4(byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint())).String() - } - return net.IPv4(byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint())).String() - case `dns:"aaaa"`: - if d.Len() < net.IPv6len { - return "" - } - return net.IP{ - byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint()), - byte(d.Index(4).Uint()), - byte(d.Index(5).Uint()), - byte(d.Index(6).Uint()), - byte(d.Index(7).Uint()), - byte(d.Index(8).Uint()), - byte(d.Index(9).Uint()), - byte(d.Index(10).Uint()), - byte(d.Index(11).Uint()), - byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint()), - }.String() - case `dns:"nsec"`: - if d.Len() == 0 { - return "" - } - s := Type(d.Index(0).Uint()).String() - for i := 1; i < d.Len(); i++ { - s += " " + Type(d.Index(i).Uint()).String() - } - return s - default: - // if it does not have a tag its a string slice - fallthrough - case `dns:"txt"`: - if d.Len() == 0 { - return "" - } - s := d.Index(0).String() - for i := 1; i < d.Len(); i++ { - s += " " + d.Index(i).String() - } - return s - } - } - return "" -} diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go deleted file mode 100644 index a8a09184d..000000000 --- a/vendor/github.com/miekg/dns/fuzz.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build fuzz - -package dns - -func Fuzz(data []byte) int { - msg := new(Msg) - - if err := msg.Unpack(data); err != nil { - return 0 - } - if _, err := msg.Pack(); err != nil { - return 0 - } - - return 1 -} - -func FuzzNewRR(data []byte) int { - if _, err := NewRR(string(data)); err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go deleted file mode 100644 index 97bc39f58..000000000 --- a/vendor/github.com/miekg/dns/generate.go +++ /dev/null @@ -1,242 +0,0 @@ -package dns - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" -) - -// Parse the $GENERATE statement as used in BIND9 zones. -// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. -// We are called after '$GENERATE '. After which we expect: -// * the range (12-24/2) -// * lhs (ownername) -// * [[ttl][class]] -// * type -// * rhs (rdata) -// But we are lazy here, only the range is parsed *all* occurrences -// of $ after that are interpreted. -func (zp *ZoneParser) generate(l lex) (RR, bool) { - token := l.token - step := 1 - if i := strings.IndexByte(token, '/'); i >= 0 { - if i+1 == len(token) { - return zp.setParseError("bad step in $GENERATE range", l) - } - - s, err := strconv.Atoi(token[i+1:]) - if err != nil || s <= 0 { - return zp.setParseError("bad step in $GENERATE range", l) - } - - step = s - token = token[:i] - } - - sx := strings.SplitN(token, "-", 2) - if len(sx) != 2 { - return zp.setParseError("bad start-stop in $GENERATE range", l) - } - - start, err := strconv.Atoi(sx[0]) - if err != nil { - return zp.setParseError("bad start in $GENERATE range", l) - } - - end, err := strconv.Atoi(sx[1]) - if err != nil { - return zp.setParseError("bad stop in $GENERATE range", l) - } - if end < 0 || start < 0 || end < start { - return zp.setParseError("bad range in $GENERATE range", l) - } - - zp.c.Next() // _BLANK - - // Create a complete new string, which we then parse again. - var s string - for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { - if l.err { - return zp.setParseError("bad data in $GENERATE directive", l) - } - if l.value == zNewline { - break - } - - s += l.token - } - - r := &generateReader{ - s: s, - - cur: start, - start: start, - end: end, - step: step, - - file: zp.file, - lex: &l, - } - zp.sub = NewZoneParser(r, zp.origin, zp.file) - zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed - zp.sub.SetDefaultTTL(defaultTtl) - return zp.subNext() -} - -type generateReader struct { - s string - si int - - cur int - start int - end int - step int - - mod bytes.Buffer - - escape bool - - eof bool - - file string - lex *lex -} - -func (r *generateReader) parseError(msg string, end int) *ParseError { - r.eof = true // Make errors sticky. - - l := *r.lex - l.token = r.s[r.si-1 : end] - l.column += r.si // l.column starts one zBLANK before r.s - - return &ParseError{r.file, msg, l} -} - -func (r *generateReader) Read(p []byte) (int, error) { - // NewZLexer, through NewZoneParser, should use ReadByte and - // not end up here. - - panic("not implemented") -} - -func (r *generateReader) ReadByte() (byte, error) { - if r.eof { - return 0, io.EOF - } - if r.mod.Len() > 0 { - return r.mod.ReadByte() - } - - if r.si >= len(r.s) { - r.si = 0 - r.cur += r.step - - r.eof = r.cur > r.end || r.cur < 0 - return '\n', nil - } - - si := r.si - r.si++ - - switch r.s[si] { - case '\\': - if r.escape { - r.escape = false - return '\\', nil - } - - r.escape = true - return r.ReadByte() - case '$': - if r.escape { - r.escape = false - return '$', nil - } - - mod := "%d" - - if si >= len(r.s)-1 { - // End of the string - fmt.Fprintf(&r.mod, mod, r.cur) - return r.mod.ReadByte() - } - - if r.s[si+1] == '$' { - r.si++ - return '$', nil - } - - var offset int - - // Search for { and } - if r.s[si+1] == '{' { - // Modifier block - sep := strings.Index(r.s[si+2:], "}") - if sep < 0 { - return 0, r.parseError("bad modifier in $GENERATE", len(r.s)) - } - - var errMsg string - mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep]) - if errMsg != "" { - return 0, r.parseError(errMsg, si+3+sep) - } - if r.start+offset < 0 || r.end+offset > 1<<31-1 { - return 0, r.parseError("bad offset in $GENERATE", si+3+sep) - } - - r.si += 2 + sep // Jump to it - } - - fmt.Fprintf(&r.mod, mod, r.cur+offset) - return r.mod.ReadByte() - default: - if r.escape { // Pretty useless here - r.escape = false - return r.ReadByte() - } - - return r.s[si], nil - } -} - -// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. -func modToPrintf(s string) (string, int, string) { - // Modifier is { offset [ ,width [ ,base ] ] } - provide default - // values for optional width and type, if necessary. - var offStr, widthStr, base string - switch xs := strings.Split(s, ","); len(xs) { - case 1: - offStr, widthStr, base = xs[0], "0", "d" - case 2: - offStr, widthStr, base = xs[0], xs[1], "d" - case 3: - offStr, widthStr, base = xs[0], xs[1], xs[2] - default: - return "", 0, "bad modifier in $GENERATE" - } - - switch base { - case "o", "d", "x", "X": - default: - return "", 0, "bad base in $GENERATE" - } - - offset, err := strconv.Atoi(offStr) - if err != nil { - return "", 0, "bad offset in $GENERATE" - } - - width, err := strconv.Atoi(widthStr) - if err != nil || width < 0 || width > 255 { - return "", 0, "bad width in $GENERATE" - } - - if width == 0 { - return "%" + base, offset, "" - } - - return "%0" + widthStr + base, offset, "" -} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go deleted file mode 100644 index e32d2a1d2..000000000 --- a/vendor/github.com/miekg/dns/labels.go +++ /dev/null @@ -1,188 +0,0 @@ -package dns - -// Holds a bunch of helper functions for dealing with labels. - -// SplitDomainName splits a name string into it's labels. -// www.miek.nl. returns []string{"www", "miek", "nl"} -// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, -// The root label (.) returns nil. Note that using -// strings.Split(s) will work in most cases, but does not handle -// escaped dots (\.) for instance. -// s must be a syntactically valid domain name, see IsDomainName. -func SplitDomainName(s string) (labels []string) { - if len(s) == 0 { - return nil - } - fqdnEnd := 0 // offset of the final '.' or the length of the name - idx := Split(s) - begin := 0 - if IsFqdn(s) { - fqdnEnd = len(s) - 1 - } else { - fqdnEnd = len(s) - } - - switch len(idx) { - case 0: - return nil - case 1: - // no-op - default: - for _, end := range idx[1:] { - labels = append(labels, s[begin:end-1]) - begin = end - } - } - - return append(labels, s[begin:fqdnEnd]) -} - -// CompareDomainName compares the names s1 and s2 and -// returns how many labels they have in common starting from the *right*. -// The comparison stops at the first inequality. The names are downcased -// before the comparison. -// -// www.miek.nl. and miek.nl. have two labels in common: miek and nl -// www.miek.nl. and www.bla.nl. have one label in common: nl -// -// s1 and s2 must be syntactically valid domain names. -func CompareDomainName(s1, s2 string) (n int) { - // the first check: root label - if s1 == "." || s2 == "." { - return 0 - } - - l1 := Split(s1) - l2 := Split(s2) - - j1 := len(l1) - 1 // end - i1 := len(l1) - 2 // start - j2 := len(l2) - 1 - i2 := len(l2) - 2 - // the second check can be done here: last/only label - // before we fall through into the for-loop below - if equal(s1[l1[j1]:], s2[l2[j2]:]) { - n++ - } else { - return - } - for { - if i1 < 0 || i2 < 0 { - break - } - if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) { - n++ - } else { - break - } - j1-- - i1-- - j2-- - i2-- - } - return -} - -// CountLabel counts the the number of labels in the string s. -// s must be a syntactically valid domain name. -func CountLabel(s string) (labels int) { - if s == "." { - return - } - off := 0 - end := false - for { - off, end = NextLabel(s, off) - labels++ - if end { - return - } - } -} - -// Split splits a name s into its label indexes. -// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. -// The root name (.) returns nil. Also see SplitDomainName. -// s must be a syntactically valid domain name. -func Split(s string) []int { - if s == "." { - return nil - } - idx := make([]int, 1, 3) - off := 0 - end := false - - for { - off, end = NextLabel(s, off) - if end { - return idx - } - idx = append(idx, off) - } -} - -// NextLabel returns the index of the start of the next label in the -// string s starting at offset. -// The bool end is true when the end of the string has been reached. -// Also see PrevLabel. -func NextLabel(s string, offset int) (i int, end bool) { - quote := false - for i = offset; i < len(s)-1; i++ { - switch s[i] { - case '\\': - quote = !quote - default: - quote = false - case '.': - if quote { - quote = !quote - continue - } - return i + 1, false - } - } - return i + 1, true -} - -// PrevLabel returns the index of the label when starting from the right and -// jumping n labels to the left. -// The bool start is true when the start of the string has been overshot. -// Also see NextLabel. -func PrevLabel(s string, n int) (i int, start bool) { - if n == 0 { - return len(s), false - } - lab := Split(s) - if lab == nil { - return 0, true - } - if n > len(lab) { - return 0, true - } - return lab[len(lab)-n], false -} - -// equal compares a and b while ignoring case. It returns true when equal otherwise false. -func equal(a, b string) bool { - // might be lifted into API function. - la := len(a) - lb := len(b) - if la != lb { - return false - } - - for i := la - 1; i >= 0; i-- { - ai := a[i] - bi := b[i] - if ai >= 'A' && ai <= 'Z' { - ai |= 'a' - 'A' - } - if bi >= 'A' && bi <= 'Z' { - bi |= 'a' - 'A' - } - if ai != bi { - return false - } - } - return true -} diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_go111.go deleted file mode 100644 index fad195cfe..000000000 --- a/vendor/github.com/miekg/dns/listen_go111.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build go1.11 -// +build aix darwin dragonfly freebsd linux netbsd openbsd - -package dns - -import ( - "context" - "net" - "syscall" - - "golang.org/x/sys/unix" -) - -const supportsReusePort = true - -func reuseportControl(network, address string, c syscall.RawConn) error { - var opErr error - err := c.Control(func(fd uintptr) { - opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) - }) - if err != nil { - return err - } - - return opErr -} - -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { - var lc net.ListenConfig - if reuseport { - lc.Control = reuseportControl - } - - return lc.Listen(context.Background(), network, addr) -} - -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { - var lc net.ListenConfig - if reuseport { - lc.Control = reuseportControl - } - - return lc.ListenPacket(context.Background(), network, addr) -} diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_go_not111.go deleted file mode 100644 index b9201417a..000000000 --- a/vendor/github.com/miekg/dns/listen_go_not111.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd - -package dns - -import "net" - -const supportsReusePort = false - -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { - if reuseport { - // TODO(tmthrgd): return an error? - } - - return net.Listen(network, addr) -} - -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { - if reuseport { - // TODO(tmthrgd): return an error? - } - - return net.ListenPacket(network, addr) -} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go deleted file mode 100644 index e04fb5d77..000000000 --- a/vendor/github.com/miekg/dns/msg.go +++ /dev/null @@ -1,1228 +0,0 @@ -// DNS packet assembly, see RFC 1035. Converting from - Unpack() - -// and to - Pack() - wire format. -// All the packers and unpackers take a (msg []byte, off int) -// and return (off1 int, ok bool). If they return ok==false, they -// also return off1==len(msg), so that the next unpacker will -// also fail. This lets us avoid checks of ok until the end of a -// packing sequence. - -package dns - -//go:generate go run msg_generate.go - -import ( - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/big" - "math/rand" - "strconv" - "strings" - "sync" -) - -const ( - maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer - maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 - - // This is the maximum number of compression pointers that should occur in a - // semantically valid message. Each label in a domain name must be at least one - // octet and is separated by a period. The root label won't be represented by a - // compression pointer to a compression pointer, hence the -2 to exclude the - // smallest valid root label. - // - // It is possible to construct a valid message that has more compression pointers - // than this, and still doesn't loop, by pointing to a previous pointer. This is - // not something a well written implementation should ever do, so we leave them - // to trip the maximum compression pointer check. - maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2 - - // This is the maximum length of a domain name in presentation format. The - // maximum wire length of a domain name is 255 octets (see above), with the - // maximum label length being 63. The wire format requires one extra byte over - // the presentation format, reducing the number of octets by 1. Each label in - // the name will be separated by a single period, with each octet in the label - // expanding to at most 4 bytes (\DDD). If all other labels are of the maximum - // length, then the final label can only be 61 octets long to not exceed the - // maximum allowed wire length. - maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1 -) - -// Errors defined in this package. -var ( - ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. - ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. - ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. - ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. - ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... - ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. - ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. - ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. - ErrKey error = &Error{err: "bad key"} - ErrKeySize error = &Error{err: "bad key size"} - ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} - ErrNoSig error = &Error{err: "no signature found"} - ErrPrivKey error = &Error{err: "bad private key"} - ErrRcode error = &Error{err: "bad rcode"} - ErrRdata error = &Error{err: "bad rdata"} - ErrRRset error = &Error{err: "bad rrset"} - ErrSecret error = &Error{err: "no secrets defined"} - ErrShortRead error = &Error{err: "short read"} - ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. - ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. - ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. -) - -// Id by default, returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. This being a -// variable the function can be reassigned to a custom function. -// For instance, to make it return a static value: -// -// dns.Id = func() uint16 { return 3 } -var Id = id - -var ( - idLock sync.Mutex - idRand *rand.Rand -) - -// id returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. -func id() uint16 { - idLock.Lock() - - if idRand == nil { - // This (partially) works around - // https://github.com/golang/go/issues/11833 by only - // seeding idRand upon the first call to id. - - var seed int64 - var buf [8]byte - - if _, err := crand.Read(buf[:]); err == nil { - seed = int64(binary.LittleEndian.Uint64(buf[:])) - } else { - seed = rand.Int63() - } - - idRand = rand.New(rand.NewSource(seed)) - } - - // The call to idRand.Uint32 must be within the - // mutex lock because *rand.Rand is not safe for - // concurrent use. - // - // There is no added performance overhead to calling - // idRand.Uint32 inside a mutex lock over just - // calling rand.Uint32 as the global math/rand rng - // is internally protected by a sync.Mutex. - id := uint16(idRand.Uint32()) - - idLock.Unlock() - return id -} - -// MsgHdr is a a manually-unpacked version of (id, bits). -type MsgHdr struct { - Id uint16 - Response bool - Opcode int - Authoritative bool - Truncated bool - RecursionDesired bool - RecursionAvailable bool - Zero bool - AuthenticatedData bool - CheckingDisabled bool - Rcode int -} - -// Msg contains the layout of a DNS message. -type Msg struct { - MsgHdr - Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. - Question []Question // Holds the RR(s) of the question section. - Answer []RR // Holds the RR(s) of the answer section. - Ns []RR // Holds the RR(s) of the authority section. - Extra []RR // Holds the RR(s) of the additional section. -} - -// ClassToString is a maps Classes to strings for each CLASS wire type. -var ClassToString = map[uint16]string{ - ClassINET: "IN", - ClassCSNET: "CS", - ClassCHAOS: "CH", - ClassHESIOD: "HS", - ClassNONE: "NONE", - ClassANY: "ANY", -} - -// OpcodeToString maps Opcodes to strings. -var OpcodeToString = map[int]string{ - OpcodeQuery: "QUERY", - OpcodeIQuery: "IQUERY", - OpcodeStatus: "STATUS", - OpcodeNotify: "NOTIFY", - OpcodeUpdate: "UPDATE", -} - -// RcodeToString maps Rcodes to strings. -var RcodeToString = map[int]string{ - RcodeSuccess: "NOERROR", - RcodeFormatError: "FORMERR", - RcodeServerFailure: "SERVFAIL", - RcodeNameError: "NXDOMAIN", - RcodeNotImplemented: "NOTIMP", - RcodeRefused: "REFUSED", - RcodeYXDomain: "YXDOMAIN", // See RFC 2136 - RcodeYXRrset: "YXRRSET", - RcodeNXRrset: "NXRRSET", - RcodeNotAuth: "NOTAUTH", - RcodeNotZone: "NOTZONE", - RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 - // RcodeBadVers: "BADVERS", - RcodeBadKey: "BADKEY", - RcodeBadTime: "BADTIME", - RcodeBadMode: "BADMODE", - RcodeBadName: "BADNAME", - RcodeBadAlg: "BADALG", - RcodeBadTrunc: "BADTRUNC", - RcodeBadCookie: "BADCOOKIE", -} - -// compressionMap is used to allow a more efficient compression map -// to be used for internal packDomainName calls without changing the -// signature or functionality of public API. -// -// In particular, map[string]uint16 uses 25% less per-entry memory -// than does map[string]int. -type compressionMap struct { - ext map[string]int // external callers - int map[string]uint16 // internal callers -} - -func (m compressionMap) valid() bool { - return m.int != nil || m.ext != nil -} - -func (m compressionMap) insert(s string, pos int) { - if m.ext != nil { - m.ext[s] = pos - } else { - m.int[s] = uint16(pos) - } -} - -func (m compressionMap) find(s string) (int, bool) { - if m.ext != nil { - pos, ok := m.ext[s] - return pos, ok - } - - pos, ok := m.int[s] - return int(pos), ok -} - -// Domain names are a sequence of counted strings -// split at the dots. They end with a zero-length string. - -// PackDomainName packs a domain name s into msg[off:]. -// If compression is wanted compress must be true and the compression -// map needs to hold a mapping between domain names and offsets -// pointing into msg. -func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - return packDomainName(s, msg, off, compressionMap{ext: compression}, compress) -} - -func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - // XXX: A logical copy of this function exists in IsDomainName and - // should be kept in sync with this function. - - ls := len(s) - if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. - return off, nil - } - - // If not fully qualified, error out. - if !IsFqdn(s) { - return len(msg), ErrFqdn - } - - // Each dot ends a segment of the name. - // We trade each dot byte for a length byte. - // Except for escaped dots (\.), which are normal dots. - // There is also a trailing zero. - - // Compression - pointer := -1 - - // Emit sequence of counted strings, chopping at dots. - var ( - begin int - compBegin int - compOff int - bs []byte - wasDot bool - ) -loop: - for i := 0; i < ls; i++ { - var c byte - if bs == nil { - c = s[i] - } else { - c = bs[i] - } - - switch c { - case '\\': - if off+1 > len(msg) { - return len(msg), ErrBuf - } - - if bs == nil { - bs = []byte(s) - } - - // check for \DDD - if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { - bs[i] = dddToByte(bs[i+1:]) - copy(bs[i+1:ls-3], bs[i+4:]) - ls -= 3 - compOff += 3 - } else { - copy(bs[i:ls-1], bs[i+1:]) - ls-- - compOff++ - } - - wasDot = false - case '.': - if wasDot { - // two dots back to back is not legal - return len(msg), ErrRdata - } - wasDot = true - - labelLen := i - begin - if labelLen >= 1<<6 { // top two bits of length must be clear - return len(msg), ErrRdata - } - - // off can already (we're in a loop) be bigger than len(msg) - // this happens when a name isn't fully qualified - if off+1+labelLen > len(msg) { - return len(msg), ErrBuf - } - - // Don't try to compress '.' - // We should only compress when compress is true, but we should also still pick - // up names that can be used for *future* compression(s). - if compression.valid() && !isRootLabel(s, bs, begin, ls) { - if p, ok := compression.find(s[compBegin:]); ok { - // The first hit is the longest matching dname - // keep the pointer offset we get back and store - // the offset of the current name, because that's - // where we need to insert the pointer later - - // If compress is true, we're allowed to compress this dname - if compress { - pointer = p // Where to point to - break loop - } - } else if off < maxCompressionOffset { - // Only offsets smaller than maxCompressionOffset can be used. - compression.insert(s[compBegin:], off) - } - } - - // The following is covered by the length check above. - msg[off] = byte(labelLen) - - if bs == nil { - copy(msg[off+1:], s[begin:i]) - } else { - copy(msg[off+1:], bs[begin:i]) - } - off += 1 + labelLen - - begin = i + 1 - compBegin = begin + compOff - default: - wasDot = false - } - } - - // Root label is special - if isRootLabel(s, bs, 0, ls) { - return off, nil - } - - // If we did compression and we find something add the pointer here - if pointer != -1 { - // We have two bytes (14 bits) to put the pointer in - binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000)) - return off + 2, nil - } - - if off < len(msg) { - msg[off] = 0 - } - - return off + 1, nil -} - -// isRootLabel returns whether s or bs, from off to end, is the root -// label ".". -// -// If bs is nil, s will be checked, otherwise bs will be checked. -func isRootLabel(s string, bs []byte, off, end int) bool { - if bs == nil { - return s[off:end] == "." - } - - return end-off == 1 && bs[off] == '.' -} - -// Unpack a domain name. -// In addition to the simple sequences of counted strings above, -// domain names are allowed to refer to strings elsewhere in the -// packet, to avoid repeating common suffixes when returning -// many entries in a single domain. The pointers are marked -// by a length byte with the top two bits set. Ignoring those -// two bits, that byte and the next give a 14 bit offset from msg[0] -// where we should pick up the trail. -// Note that if we jump elsewhere in the packet, -// we return off1 == the offset after the first pointer we found, -// which is where the next record will start. -// In theory, the pointers are only allowed to jump backward. -// We let them jump anywhere and stop jumping after a while. - -// UnpackDomainName unpacks a domain name into a string. It returns -// the name, the new offset into msg and any error that occurred. -// -// When an error is encountered, the unpacked name will be discarded -// and len(msg) will be returned as the offset. -func UnpackDomainName(msg []byte, off int) (string, int, error) { - s := make([]byte, 0, maxDomainNamePresentationLength) - off1 := 0 - lenmsg := len(msg) - budget := maxDomainNameWireOctets - ptr := 0 // number of pointers followed -Loop: - for { - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // end of name - break Loop - } - // literal string - if off+c > lenmsg { - return "", lenmsg, ErrBuf - } - budget -= c + 1 // +1 for the label separator - if budget <= 0 { - return "", lenmsg, ErrLongDomain - } - for _, b := range msg[off : off+c] { - switch b { - case '.', '(', ')', ';', ' ', '@': - fallthrough - case '"', '\\': - s = append(s, '\\', b) - default: - if b < ' ' || b > '~' { // unprintable, use \DDD - s = append(s, escapeByte(b)...) - } else { - s = append(s, b) - } - } - } - s = append(s, '.') - off += c - case 0xC0: - // pointer to somewhere else in msg. - // remember location after first ptr, - // since that's how many bytes we consumed. - // also, don't follow too many pointers -- - // maybe there's a loop. - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c1 := msg[off] - off++ - if ptr == 0 { - off1 = off - } - if ptr++; ptr > maxCompressionPointers { - return "", lenmsg, &Error{err: "too many compression pointers"} - } - // pointer should guarantee that it advances and points forwards at least - // but the condition on previous three lines guarantees that it's - // at least loop-free - off = (c^0xC0)<<8 | int(c1) - default: - // 0x80 and 0x40 are reserved - return "", lenmsg, ErrRdata - } - } - if ptr == 0 { - off1 = off - } - if len(s) == 0 { - return ".", off1, nil - } - return string(s), off1, nil -} - -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { - if len(txt) == 0 { - if offset >= len(msg) { - return offset, ErrBuf - } - msg[offset] = 0 - return offset, nil - } - var err error - for _, s := range txt { - if len(s) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(s, msg, offset, tmp) - if err != nil { - return offset, err - } - } - return offset, nil -} - -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { - lenByteOffset := offset - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - l := offset - lenByteOffset - 1 - if l > 255 { - return offset, &Error{err: "string exceeded 255 bytes in txt"} - } - msg[lenByteOffset] = byte(l) - return offset, nil -} - -func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - return offset, nil -} - -func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { - off = off0 - var s string - for off < len(msg) && err == nil { - s, off, err = unpackString(msg, off) - if err == nil { - ss = append(ss, s) - } - } - return -} - -// Helpers for dealing with escaped bytes -func isDigit(b byte) bool { return b >= '0' && b <= '9' } - -func dddToByte(s []byte) byte { - _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) -} - -func dddStringToByte(s string) byte { - _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) -} - -// Helper function for packing and unpacking -func intToBytes(i *big.Int, length int) []byte { - buf := i.Bytes() - if len(buf) < length { - b := make([]byte, length) - copy(b[length-len(buf):], buf) - return b - } - return buf -} - -// PackRR packs a resource record rr into msg[off:]. -// See PackDomainName for documentation about the compression. -func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress) - if err == nil { - // packRR no longer sets the Rdlength field on the rr, but - // callers might be expecting it so we set it here. - rr.Header().Rdlength = uint16(off1 - headerEnd) - } - return off1, err -} - -func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) { - if rr == nil { - return len(msg), len(msg), &Error{err: "nil rr"} - } - - headerEnd, err = rr.Header().packHeader(msg, off, compression, compress) - if err != nil { - return headerEnd, len(msg), err - } - - off1, err = rr.pack(msg, headerEnd, compression, compress) - if err != nil { - return headerEnd, len(msg), err - } - - rdlength := off1 - headerEnd - if int(uint16(rdlength)) != rdlength { // overflow - return headerEnd, len(msg), ErrRdata - } - - // The RDLENGTH field is the last field in the header and we set it here. - binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength)) - return headerEnd, off1, nil -} - -// UnpackRR unpacks msg[off:] into an RR. -func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { - h, off, msg, err := unpackHeader(msg, off) - if err != nil { - return nil, len(msg), err - } - - return UnpackRRWithHeader(h, msg, off) -} - -// UnpackRRWithHeader unpacks the record type specific payload given an existing -// RR_Header. -func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { - if newFn, ok := TypeToRR[h.Rrtype]; ok { - rr = newFn() - *rr.Header() = h - } else { - rr = &RFC3597{Hdr: h} - } - - if noRdata(h) { - return rr, off, nil - } - - end := off + int(h.Rdlength) - - off, err = rr.unpack(msg, off) - if err != nil { - return nil, end, err - } - if off != end { - return &h, end, &Error{err: "bad rdlength"} - } - - return rr, off, nil -} - -// unpackRRslice unpacks msg[off:] into an []RR. -// If we cannot unpack the whole array, then it will return nil -func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { - var r RR - // Don't pre-allocate, l may be under attacker control - var dst []RR - for i := 0; i < l; i++ { - off1 := off - r, off, err = UnpackRR(msg, off) - if err != nil { - off = len(msg) - break - } - // If offset does not increase anymore, l is a lie - if off1 == off { - l = i - break - } - dst = append(dst, r) - } - if err != nil && off == len(msg) { - dst = nil - } - return dst, off, err -} - -// Convert a MsgHdr to a string, with dig-like headers: -// -//;; opcode: QUERY, status: NOERROR, id: 48404 -// -//;; flags: qr aa rd ra; -func (h *MsgHdr) String() string { - if h == nil { - return " MsgHdr" - } - - s := ";; opcode: " + OpcodeToString[h.Opcode] - s += ", status: " + RcodeToString[h.Rcode] - s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" - - s += ";; flags:" - if h.Response { - s += " qr" - } - if h.Authoritative { - s += " aa" - } - if h.Truncated { - s += " tc" - } - if h.RecursionDesired { - s += " rd" - } - if h.RecursionAvailable { - s += " ra" - } - if h.Zero { // Hmm - s += " z" - } - if h.AuthenticatedData { - s += " ad" - } - if h.CheckingDisabled { - s += " cd" - } - - s += ";" - return s -} - -// Pack packs a Msg: it is converted to to wire format. -// If the dns.Compress is true the message will be in compressed wire format. -func (dns *Msg) Pack() (msg []byte, err error) { - return dns.PackBuffer(nil) -} - -// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated. -func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { - // If this message can't be compressed, avoid filling the - // compression map and creating garbage. - if dns.Compress && dns.isCompressible() { - compression := make(map[string]uint16) // Compression pointer mappings. - return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true) - } - - return dns.packBufferWithCompressionMap(buf, compressionMap{}, false) -} - -// packBufferWithCompressionMap packs a Msg, using the given buffer buf. -func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) { - if dns.Rcode < 0 || dns.Rcode > 0xFFF { - return nil, ErrRcode - } - - // Set extended rcode unconditionally if we have an opt, this will allow - // reseting the extended rcode bits if they need to. - if opt := dns.IsEdns0(); opt != nil { - opt.SetExtendedRcode(uint16(dns.Rcode)) - } else if dns.Rcode > 0xF { - // If Rcode is an extended one and opt is nil, error out. - return nil, ErrExtendedRcode - } - - // Convert convenient Msg into wire-like Header. - var dh Header - dh.Id = dns.Id - dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF) - if dns.Response { - dh.Bits |= _QR - } - if dns.Authoritative { - dh.Bits |= _AA - } - if dns.Truncated { - dh.Bits |= _TC - } - if dns.RecursionDesired { - dh.Bits |= _RD - } - if dns.RecursionAvailable { - dh.Bits |= _RA - } - if dns.Zero { - dh.Bits |= _Z - } - if dns.AuthenticatedData { - dh.Bits |= _AD - } - if dns.CheckingDisabled { - dh.Bits |= _CD - } - - dh.Qdcount = uint16(len(dns.Question)) - dh.Ancount = uint16(len(dns.Answer)) - dh.Nscount = uint16(len(dns.Ns)) - dh.Arcount = uint16(len(dns.Extra)) - - // We need the uncompressed length here, because we first pack it and then compress it. - msg = buf - uncompressedLen := msgLenWithCompressionMap(dns, nil) - if packLen := uncompressedLen + 1; len(msg) < packLen { - msg = make([]byte, packLen) - } - - // Pack it in: header and then the pieces. - off := 0 - off, err = dh.pack(msg, off, compression, compress) - if err != nil { - return nil, err - } - for _, r := range dns.Question { - off, err = r.pack(msg, off, compression, compress) - if err != nil { - return nil, err - } - } - for _, r := range dns.Answer { - _, off, err = packRR(r, msg, off, compression, compress) - if err != nil { - return nil, err - } - } - for _, r := range dns.Ns { - _, off, err = packRR(r, msg, off, compression, compress) - if err != nil { - return nil, err - } - } - for _, r := range dns.Extra { - _, off, err = packRR(r, msg, off, compression, compress) - if err != nil { - return nil, err - } - } - return msg[:off], nil -} - -func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { - // If we are at the end of the message we should return *just* the - // header. This can still be useful to the caller. 9.9.9.9 sends these - // when responding with REFUSED for instance. - if off == len(msg) { - // reset sections before returning - dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil - return nil - } - - // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are - // attacker controlled. This means we can't use them to pre-allocate - // slices. - dns.Question = nil - for i := 0; i < int(dh.Qdcount); i++ { - off1 := off - var q Question - q, off, err = unpackQuestion(msg, off) - if err != nil { - return err - } - if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! - dh.Qdcount = uint16(i) - break - } - dns.Question = append(dns.Question, q) - } - - dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) - // The header counts might have been wrong so we need to update it - dh.Ancount = uint16(len(dns.Answer)) - if err == nil { - dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Nscount = uint16(len(dns.Ns)) - if err == nil { - dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Arcount = uint16(len(dns.Extra)) - - // Set extended Rcode - if opt := dns.IsEdns0(); opt != nil { - dns.Rcode |= opt.ExtendedRcode() - } - - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } - return err - -} - -// Unpack unpacks a binary message to a Msg structure. -func (dns *Msg) Unpack(msg []byte) (err error) { - dh, off, err := unpackMsgHdr(msg, 0) - if err != nil { - return err - } - - dns.setHdr(dh) - return dns.unpack(dh, msg, off) -} - -// Convert a complete message to a string with dig-like output. -func (dns *Msg) String() string { - if dns == nil { - return " MsgHdr" - } - s := dns.MsgHdr.String() + " " - s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " - s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " - s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " - s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" - if len(dns.Question) > 0 { - s += "\n;; QUESTION SECTION:\n" - for _, r := range dns.Question { - s += r.String() + "\n" - } - } - if len(dns.Answer) > 0 { - s += "\n;; ANSWER SECTION:\n" - for _, r := range dns.Answer { - if r != nil { - s += r.String() + "\n" - } - } - } - if len(dns.Ns) > 0 { - s += "\n;; AUTHORITY SECTION:\n" - for _, r := range dns.Ns { - if r != nil { - s += r.String() + "\n" - } - } - } - if len(dns.Extra) > 0 { - s += "\n;; ADDITIONAL SECTION:\n" - for _, r := range dns.Extra { - if r != nil { - s += r.String() + "\n" - } - } - } - return s -} - -// isCompressible returns whether the msg may be compressible. -func (dns *Msg) isCompressible() bool { - // If we only have one question, there is nothing we can ever compress. - return len(dns.Question) > 1 || len(dns.Answer) > 0 || - len(dns.Ns) > 0 || len(dns.Extra) > 0 -} - -// Len returns the message length when in (un)compressed wire format. -// If dns.Compress is true compression it is taken into account. Len() -// is provided to be a faster way to get the size of the resulting packet, -// than packing it, measuring the size and discarding the buffer. -func (dns *Msg) Len() int { - // If this message can't be compressed, avoid filling the - // compression map and creating garbage. - if dns.Compress && dns.isCompressible() { - compression := make(map[string]struct{}) - return msgLenWithCompressionMap(dns, compression) - } - - return msgLenWithCompressionMap(dns, nil) -} - -func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int { - l := headerSize - - for _, r := range dns.Question { - l += r.len(l, compression) - } - for _, r := range dns.Answer { - if r != nil { - l += r.len(l, compression) - } - } - for _, r := range dns.Ns { - if r != nil { - l += r.len(l, compression) - } - } - for _, r := range dns.Extra { - if r != nil { - l += r.len(l, compression) - } - } - - return l -} - -func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int { - if s == "" || s == "." { - return 1 - } - - escaped := strings.Contains(s, "\\") - - if compression != nil && (compress || off < maxCompressionOffset) { - // compressionLenSearch will insert the entry into the compression - // map if it doesn't contain it. - if l, ok := compressionLenSearch(compression, s, off); ok && compress { - if escaped { - return escapedNameLen(s[:l]) + 2 - } - - return l + 2 - } - } - - if escaped { - return escapedNameLen(s) + 1 - } - - return len(s) + 1 -} - -func escapedNameLen(s string) int { - nameLen := len(s) - for i := 0; i < len(s); i++ { - if s[i] != '\\' { - continue - } - - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { - nameLen -= 3 - i += 3 - } else { - nameLen-- - i++ - } - } - - return nameLen -} - -func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) { - for off, end := 0, false; !end; off, end = NextLabel(s, off) { - if _, ok := c[s[off:]]; ok { - return off, true - } - - if msgOff+off < maxCompressionOffset { - c[s[off:]] = struct{}{} - } - } - - return 0, false -} - -// Copy returns a new RR which is a deep-copy of r. -func Copy(r RR) RR { return r.copy() } - -// Len returns the length (in octets) of the uncompressed RR in wire format. -func Len(r RR) int { return r.len(0, nil) } - -// Copy returns a new *Msg which is a deep-copy of dns. -func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } - -// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. -func (dns *Msg) CopyTo(r1 *Msg) *Msg { - r1.MsgHdr = dns.MsgHdr - r1.Compress = dns.Compress - - if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy - } - - rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) - r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):] - r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):] - r1.Extra = rrArr[:0:len(dns.Extra)] - - for _, r := range dns.Answer { - r1.Answer = append(r1.Answer, r.copy()) - } - - for _, r := range dns.Ns { - r1.Ns = append(r1.Ns, r.copy()) - } - - for _, r := range dns.Extra { - r1.Extra = append(r1.Extra, r.copy()) - } - - return r1 -} - -func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { - off, err := packDomainName(q.Name, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint16(q.Qtype, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(q.Qclass, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func unpackQuestion(msg []byte, off int) (Question, int, error) { - var ( - q Question - err error - ) - q.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qtype, off, err = unpackUint16(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qclass, off, err = unpackUint16(msg, off) - if off == len(msg) { - return q, off, nil - } - return q, off, err -} - -func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { - off, err := packUint16(dh.Id, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Bits, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Qdcount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Ancount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Nscount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Arcount, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func unpackMsgHdr(msg []byte, off int) (Header, int, error) { - var ( - dh Header - err error - ) - dh.Id, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Bits, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Qdcount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Ancount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Nscount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Arcount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - return dh, off, nil -} - -// setHdr set the header in the dns using the binary data in dh. -func (dns *Msg) setHdr(dh Header) { - dns.Id = dh.Id - dns.Response = dh.Bits&_QR != 0 - dns.Opcode = int(dh.Bits>>11) & 0xF - dns.Authoritative = dh.Bits&_AA != 0 - dns.Truncated = dh.Bits&_TC != 0 - dns.RecursionDesired = dh.Bits&_RD != 0 - dns.RecursionAvailable = dh.Bits&_RA != 0 - dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite. - dns.AuthenticatedData = dh.Bits&_AD != 0 - dns.CheckingDisabled = dh.Bits&_CD != 0 - dns.Rcode = int(dh.Bits & 0xF) -} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 721a0fce3..000000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,328 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// Code generated by "go run msg_generate.go"; DO NOT EDIT. - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {\n", name) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("off, err = packDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("off, err = packDomainName(rr.%s, msg, off, compression, false)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): - // directly write instead of using o() so we get the error check in the correct place - field := st.Field(i).Name() - fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty -if rr.%s != "-" { - off, err = packStringHex(rr.%s, msg, off) - if err != nil { - return off, err - } -} -`, field, field) - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"any"`: - o("off, err = packStringAny(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintln(b, "return off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) unpack(msg []byte, off int) (off1 int, err error) {\n", name) - fmt.Fprint(b, `rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"any"`: - o("rr.%s, off, err = unpackStringAny(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return off, nil - } -`) - } - } - fmt.Fprintf(b, "return off, nil }\n\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go deleted file mode 100644 index ecd9280f4..000000000 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ /dev/null @@ -1,648 +0,0 @@ -package dns - -import ( - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "net" - "strings" -) - -// helper functions called from the generated zmsg.go - -// These function are named after the tag to help pack/unpack, if there is no tag it is the name -// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or -// packDataDomainName. - -func unpackDataA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv4len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking a"} - } - a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) - off += net.IPv4len - return a, off, nil -} - -func packDataA(a net.IP, msg []byte, off int) (int, error) { - switch len(a) { - case net.IPv4len, net.IPv6len: - // It must be a slice of 4, even if it is 16, we encode only the first 4 - if off+net.IPv4len > len(msg) { - return len(msg), &Error{err: "overflow packing a"} - } - - copy(msg[off:], a.To4()) - off += net.IPv4len - case 0: - // Allowed, for dynamic updates. - default: - return len(msg), &Error{err: "overflow packing a"} - } - return off, nil -} - -func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv6len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking aaaa"} - } - aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) - off += net.IPv6len - return aaaa, off, nil -} - -func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { - switch len(aaaa) { - case net.IPv6len: - if off+net.IPv6len > len(msg) { - return len(msg), &Error{err: "overflow packing aaaa"} - } - - copy(msg[off:], aaaa) - off += net.IPv6len - case 0: - // Allowed, dynamic updates. - default: - return len(msg), &Error{err: "overflow packing aaaa"} - } - return off, nil -} - -// unpackHeader unpacks an RR header, returning the offset to the end of the header and a -// re-sliced msg according to the expected length of the RR. -func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { - hdr := RR_Header{} - if off == len(msg) { - return hdr, off, msg, nil - } - - hdr.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rrtype, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Class, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Ttl, off, err = unpackUint32(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rdlength, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) - return hdr, off, msg, err -} - -// packHeader packs an RR header, returning the offset to the end of the header. -// See PackDomainName for documentation about the compression. -func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) { - if off == len(msg) { - return off, nil - } - - off, err := packDomainName(hdr.Name, msg, off, compression, compress) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Rrtype, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint16(hdr.Class, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint32(hdr.Ttl, msg, off) - if err != nil { - return len(msg), err - } - off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR. - if err != nil { - return len(msg), err - } - return off, nil -} - -// helper helper functions. - -// truncateMsgFromRdLength truncates msg to match the expected length of the RR. -// Returns an error if msg is smaller than the expected size. -func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { - lenrd := off + int(rdlength) - if lenrd > len(msg) { - return msg, &Error{err: "overflowing header size"} - } - return msg[:lenrd], nil -} - -var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) - -func fromBase32(s []byte) (buf []byte, err error) { - for i, b := range s { - if b >= 'a' && b <= 'z' { - s[i] = b - 32 - } - } - buflen := base32HexNoPadEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base32HexNoPadEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase32(b []byte) string { - return base32HexNoPadEncoding.EncodeToString(b) -} - -func fromBase64(s []byte) (buf []byte, err error) { - buflen := base64.StdEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base64.StdEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } - -// dynamicUpdate returns true if the Rdlength is zero. -func noRdata(h RR_Header) bool { return h.Rdlength == 0 } - -func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { - if off+1 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint8"} - } - return msg[off], off + 1, nil -} - -func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { - if off+1 > len(msg) { - return len(msg), &Error{err: "overflow packing uint8"} - } - msg[off] = i - return off + 1, nil -} - -func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { - if off+2 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint16"} - } - return binary.BigEndian.Uint16(msg[off:]), off + 2, nil -} - -func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { - if off+2 > len(msg) { - return len(msg), &Error{err: "overflow packing uint16"} - } - binary.BigEndian.PutUint16(msg[off:], i) - return off + 2, nil -} - -func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { - if off+4 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint32"} - } - return binary.BigEndian.Uint32(msg[off:]), off + 4, nil -} - -func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { - if off+4 > len(msg) { - return len(msg), &Error{err: "overflow packing uint32"} - } - binary.BigEndian.PutUint32(msg[off:], i) - return off + 4, nil -} - -func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { - if off+6 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} - } - // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) - i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | - uint64(msg[off+4])<<8 | uint64(msg[off+5]) - off += 6 - return i, off, nil -} - -func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { - if off+6 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64 as uint48"} - } - msg[off] = byte(i >> 40) - msg[off+1] = byte(i >> 32) - msg[off+2] = byte(i >> 24) - msg[off+3] = byte(i >> 16) - msg[off+4] = byte(i >> 8) - msg[off+5] = byte(i) - off += 6 - return off, nil -} - -func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { - if off+8 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64"} - } - return binary.BigEndian.Uint64(msg[off:]), off + 8, nil -} - -func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { - if off+8 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64"} - } - binary.BigEndian.PutUint64(msg[off:], i) - off += 8 - return off, nil -} - -func unpackString(msg []byte, off int) (string, int, error) { - if off+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - l := int(msg[off]) - if off+l+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - var s strings.Builder - s.Grow(l) - for _, b := range msg[off+1 : off+1+l] { - switch { - case b == '"' || b == '\\': - s.WriteByte('\\') - s.WriteByte(b) - case b < ' ' || b > '~': // unprintable - s.WriteString(escapeByte(b)) - default: - s.WriteByte(b) - } - } - off += 1 + l - return s.String(), off, nil -} - -func packString(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packTxtString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackStringBase32(msg []byte, off, end int) (string, int, error) { - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base32"} - } - s := toBase32(msg[off:end]) - return s, end, nil -} - -func packStringBase32(s string, msg []byte, off int) (int, error) { - b32, err := fromBase32([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b32) > len(msg) { - return len(msg), &Error{err: "overflow packing base32"} - } - copy(msg[off:off+len(b32)], b32) - off += len(b32) - return off, nil -} - -func unpackStringBase64(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is base64 encoded value, so we don't need an explicit length - // to be set. Thus far all RR's that have base64 encoded fields have those as their - // last one. What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base64"} - } - s := toBase64(msg[off:end]) - return s, end, nil -} - -func packStringBase64(s string, msg []byte, off int) (int, error) { - b64, err := fromBase64([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b64) > len(msg) { - return len(msg), &Error{err: "overflow packing base64"} - } - copy(msg[off:off+len(b64)], b64) - off += len(b64) - return off, nil -} - -func unpackStringHex(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is hex encoded value, so we don't need an explicit length - // to be set. NSEC and TSIG have hex fields with a length field. - // What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking hex"} - } - - s := hex.EncodeToString(msg[off:end]) - return s, end, nil -} - -func packStringHex(s string, msg []byte, off int) (int, error) { - h, err := hex.DecodeString(s) - if err != nil { - return len(msg), err - } - if off+len(h) > len(msg) { - return len(msg), &Error{err: "overflow packing hex"} - } - copy(msg[off:off+len(h)], h) - off += len(h) - return off, nil -} - -func unpackStringAny(msg []byte, off, end int) (string, int, error) { - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking anything"} - } - return string(msg[off:end]), end, nil -} - -func packStringAny(s string, msg []byte, off int) (int, error) { - if off+len(s) > len(msg) { - return len(msg), &Error{err: "overflow packing anything"} - } - copy(msg[off:off+len(s)], s) - off += len(s) - return off, nil -} - -func unpackStringTxt(msg []byte, off int) ([]string, int, error) { - txt, off, err := unpackTxt(msg, off) - if err != nil { - return nil, len(msg), err - } - return txt, off, nil -} - -func packStringTxt(s []string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. - off, err := packTxt(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { - var edns []EDNS0 -Option: - var code uint16 - if off+4 > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - code = binary.BigEndian.Uint16(msg[off:]) - off += 2 - optlen := binary.BigEndian.Uint16(msg[off:]) - off += 2 - if off+int(optlen) > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - switch code { - case EDNS0NSID: - e := new(EDNS0_NSID) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0SUBNET: - e := new(EDNS0_SUBNET) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0COOKIE: - e := new(EDNS0_COOKIE) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0UL: - e := new(EDNS0_UL) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0LLQ: - e := new(EDNS0_LLQ) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DAU: - e := new(EDNS0_DAU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DHU: - e := new(EDNS0_DHU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0N3U: - e := new(EDNS0_N3U) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0PADDING: - e := new(EDNS0_PADDING) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - default: - e := new(EDNS0_LOCAL) - e.Code = code - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - } - - if off < len(msg) { - goto Option - } - - return edns, off, nil -} - -func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { - for _, el := range options { - b, err := el.pack() - if err != nil || off+3 > len(msg) { - return len(msg), &Error{err: "overflow packing opt"} - } - binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code - binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length - off += 4 - if off+len(b) > len(msg) { - copy(msg[off:], b) - off = len(msg) - continue - } - // Actual data - copy(msg[off:off+len(b)], b) - off += len(b) - } - return off, nil -} - -func unpackStringOctet(msg []byte, off int) (string, int, error) { - s := string(msg[off:]) - return s, len(msg), nil -} - -func packStringOctet(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packOctetString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { - var nsec []uint16 - length, window, lastwindow := 0, 0, -1 - for off < len(msg) { - if off+2 > len(msg) { - return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} - } - window = int(msg[off]) - length = int(msg[off+1]) - off += 2 - if window <= lastwindow { - // RFC 4034: Blocks are present in the NSEC RR RDATA in - // increasing numerical order. - return nsec, len(msg), &Error{err: "out of order NSEC block"} - } - if length == 0 { - // RFC 4034: Blocks with no types present MUST NOT be included. - return nsec, len(msg), &Error{err: "empty NSEC block"} - } - if length > 32 { - return nsec, len(msg), &Error{err: "NSEC block too long"} - } - if off+length > len(msg) { - return nsec, len(msg), &Error{err: "overflowing NSEC block"} - } - - // Walk the bytes in the window and extract the type bits - for j, b := range msg[off : off+length] { - // Check the bits one by one, and set the type - if b&0x80 == 0x80 { - nsec = append(nsec, uint16(window*256+j*8+0)) - } - if b&0x40 == 0x40 { - nsec = append(nsec, uint16(window*256+j*8+1)) - } - if b&0x20 == 0x20 { - nsec = append(nsec, uint16(window*256+j*8+2)) - } - if b&0x10 == 0x10 { - nsec = append(nsec, uint16(window*256+j*8+3)) - } - if b&0x8 == 0x8 { - nsec = append(nsec, uint16(window*256+j*8+4)) - } - if b&0x4 == 0x4 { - nsec = append(nsec, uint16(window*256+j*8+5)) - } - if b&0x2 == 0x2 { - nsec = append(nsec, uint16(window*256+j*8+6)) - } - if b&0x1 == 0x1 { - nsec = append(nsec, uint16(window*256+j*8+7)) - } - } - off += length - lastwindow = window - } - return nsec, off, nil -} - -func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { - if len(bitmap) == 0 { - return off, nil - } - var lastwindow, lastlength uint16 - for _, t := range bitmap { - window := t / 256 - length := (t-window*256)/8 + 1 - if window > lastwindow && lastlength != 0 { // New window, jump to the new offset - off += int(lastlength) + 2 - lastlength = 0 - } - if window < lastwindow || length < lastlength { - return len(msg), &Error{err: "nsec bits out of order"} - } - if off+2+int(length) > len(msg) { - return len(msg), &Error{err: "overflow packing nsec"} - } - // Setting the window # - msg[off] = byte(window) - // Setting the octets length - msg[off+1] = byte(length) - // Setting the bit value for the type in the right octet - msg[off+1+int(length)] |= byte(1 << (7 - t%8)) - lastwindow, lastlength = window, length - } - off += int(lastlength) + 2 - return off, nil -} - -func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { - var ( - servers []string - s string - err error - ) - if end > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking domain names"} - } - for off < end { - s, off, err = UnpackDomainName(msg, off) - if err != nil { - return servers, len(msg), err - } - servers = append(servers, s) - } - return servers, off, nil -} - -func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) { - var err error - for _, name := range names { - off, err = packDomainName(name, msg, off, compression, compress) - if err != nil { - return len(msg), err - } - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/miekg/dns/msg_truncate.go deleted file mode 100644 index 4763fc610..000000000 --- a/vendor/github.com/miekg/dns/msg_truncate.go +++ /dev/null @@ -1,106 +0,0 @@ -package dns - -// Truncate ensures the reply message will fit into the requested buffer -// size by removing records that exceed the requested size. -// -// It will first check if the reply fits without compression and then with -// compression. If it won't fit with compression, Scrub then walks the -// record adding as many records as possible without exceeding the -// requested buffer size. -// -// The TC bit will be set if any answer records were excluded from the -// message. This indicates to that the client should retry over TCP. -// -// The appropriate buffer size can be retrieved from the requests OPT -// record, if present, and is transport specific otherwise. dns.MinMsgSize -// should be used for UDP requests without an OPT record, and -// dns.MaxMsgSize for TCP requests without an OPT record. -func (dns *Msg) Truncate(size int) { - if dns.IsTsig() != nil { - // To simplify this implementation, we don't perform - // truncation on responses with a TSIG record. - return - } - - // RFC 6891 mandates that the payload size in an OPT record - // less than 512 bytes must be treated as equal to 512 bytes. - // - // For ease of use, we impose that restriction here. - if size < 512 { - size = 512 - } - - l := msgLenWithCompressionMap(dns, nil) // uncompressed length - if l <= size { - // Don't waste effort compressing this message. - dns.Compress = false - return - } - - dns.Compress = true - - edns0 := dns.popEdns0() - if edns0 != nil { - // Account for the OPT record that gets added at the end, - // by subtracting that length from our budget. - // - // The EDNS(0) OPT record must have the root domain and - // it's length is thus unaffected by compression. - size -= Len(edns0) - } - - compression := make(map[string]struct{}) - - l = headerSize - for _, r := range dns.Question { - l += r.len(l, compression) - } - - var numAnswer int - if l < size { - l, numAnswer = truncateLoop(dns.Answer, size, l, compression) - } - - var numNS int - if l < size { - l, numNS = truncateLoop(dns.Ns, size, l, compression) - } - - var numExtra int - if l < size { - l, numExtra = truncateLoop(dns.Extra, size, l, compression) - } - - // According to RFC 2181, the TC bit should only be set if not all - // of the answer RRs can be included in the response. - dns.Truncated = len(dns.Answer) > numAnswer - - dns.Answer = dns.Answer[:numAnswer] - dns.Ns = dns.Ns[:numNS] - dns.Extra = dns.Extra[:numExtra] - - if edns0 != nil { - // Add the OPT record back onto the additional section. - dns.Extra = append(dns.Extra, edns0) - } -} - -func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) { - for i, r := range rrs { - if r == nil { - continue - } - - l += r.len(l, compression) - if l > size { - // Return size, rather than l prior to this record, - // to prevent any further records being added. - return size, i - } - if l == size { - return l, i + 1 - } - } - - return l, len(rrs) -} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go deleted file mode 100644 index 8f071a473..000000000 --- a/vendor/github.com/miekg/dns/nsecx.go +++ /dev/null @@ -1,95 +0,0 @@ -package dns - -import ( - "crypto/sha1" - "encoding/hex" - "strings" -) - -// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. -func HashName(label string, ha uint8, iter uint16, salt string) string { - if ha != SHA1 { - return "" - } - - wireSalt := make([]byte, hex.DecodedLen(len(salt))) - n, err := packStringHex(salt, wireSalt, 0) - if err != nil { - return "" - } - wireSalt = wireSalt[:n] - - name := make([]byte, 255) - off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) - if err != nil { - return "" - } - name = name[:off] - - s := sha1.New() - // k = 0 - s.Write(name) - s.Write(wireSalt) - nsec3 := s.Sum(nil) - - // k > 0 - for k := uint16(0); k < iter; k++ { - s.Reset() - s.Write(nsec3) - s.Write(wireSalt) - nsec3 = s.Sum(nsec3[:0]) - } - - return toBase32(nsec3) -} - -// Cover returns true if a name is covered by the NSEC3 record -func (rr *NSEC3) Cover(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { - return false - } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone - return false - } - - nextHash := rr.NextDomain - - // if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash - if ownerHash == nextHash && nameHash != ownerHash { // empty interval - return true - } - if ownerHash > nextHash { // end of zone - if nameHash > ownerHash { // covered since there is nothing after ownerHash - return true - } - return nameHash < nextHash // if nameHash is before beginning of zone it is covered - } - if nameHash < ownerHash { // nameHash is before ownerHash, not covered - return false - } - return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) -} - -// Match returns true if a name matches the NSEC3 record -func (rr *NSEC3) Match(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { - return false - } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone - return false - } - if ownerHash == nameHash { - return true - } - return false -} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go deleted file mode 100644 index d9c0d2677..000000000 --- a/vendor/github.com/miekg/dns/privaterr.go +++ /dev/null @@ -1,132 +0,0 @@ -package dns - -import ( - "fmt" - "strings" -) - -// PrivateRdata is an interface used for implementing "Private Use" RR types, see -// RFC 6895. This allows one to experiment with new RR types, without requesting an -// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. -type PrivateRdata interface { - // String returns the text presentaton of the Rdata of the Private RR. - String() string - // Parse parses the Rdata of the private RR. - Parse([]string) error - // Pack is used when packing a private RR into a buffer. - Pack([]byte) (int, error) - // Unpack is used when unpacking a private RR from a buffer. - // TODO(miek): diff. signature than Pack, see edns0.go for instance. - Unpack([]byte) (int, error) - // Copy copies the Rdata. - Copy(PrivateRdata) error - // Len returns the length in octets of the Rdata. - Len() int -} - -// PrivateRR represents an RR that uses a PrivateRdata user-defined type. -// It mocks normal RRs and implements dns.RR interface. -type PrivateRR struct { - Hdr RR_Header - Data PrivateRdata -} - -func mkPrivateRR(rrtype uint16) *PrivateRR { - // Panics if RR is not an instance of PrivateRR. - rrfunc, ok := TypeToRR[rrtype] - if !ok { - panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) - } - - anyrr := rrfunc() - rr, ok := anyrr.(*PrivateRR) - if !ok { - panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) - } - - return rr -} - -// Header return the RR header of r. -func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } - -func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } - -// Private len and copy parts to satisfy RR interface. -func (r *PrivateRR) len(off int, compression map[string]struct{}) int { - l := r.Hdr.len(off, compression) - l += r.Data.Len() - return l -} - -func (r *PrivateRR) copy() RR { - // make new RR like this: - rr := mkPrivateRR(r.Hdr.Rrtype) - rr.Hdr = r.Hdr - - err := r.Data.Copy(rr.Data) - if err != nil { - panic("dns: got value that could not be used to copy Private rdata") - } - return rr -} - -func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { - n, err := r.Data.Pack(msg[off:]) - if err != nil { - return len(msg), err - } - off += n - return off, nil -} - -func (r *PrivateRR) unpack(msg []byte, off int) (int, error) { - off1, err := r.Data.Unpack(msg[off:]) - off += off1 - return off, err -} - -func (r *PrivateRR) parse(c *zlexer, origin, file string) *ParseError { - var l lex - text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 -Fetch: - for { - // TODO(miek): we could also be returning _QUOTE, this might or might not - // be an issue (basically parsing TXT becomes hard) - switch l, _ = c.Next(); l.value { - case zNewline, zEOF: - break Fetch - case zString: - text = append(text, l.token) - } - } - - err := r.Data.Parse(text) - if err != nil { - return &ParseError{file, err.Error(), l} - } - - return nil -} - -func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false } - -// PrivateHandle registers a private resource record type. It requires -// string and numeric representation of private RR type and generator function as argument. -func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { - rtypestr = strings.ToUpper(rtypestr) - - TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } - TypeToString[rtype] = rtypestr - StringToType[rtypestr] = rtype -} - -// PrivateHandleRemove removes definitions required to support private RR type. -func PrivateHandleRemove(rtype uint16) { - rtypestr, ok := TypeToString[rtype] - if ok { - delete(TypeToRR, rtype) - delete(TypeToString, rtype) - delete(StringToType, rtypestr) - } -} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go deleted file mode 100644 index 28151af83..000000000 --- a/vendor/github.com/miekg/dns/reverse.go +++ /dev/null @@ -1,52 +0,0 @@ -package dns - -// StringToType is the reverse of TypeToString, needed for string parsing. -var StringToType = reverseInt16(TypeToString) - -// StringToClass is the reverse of ClassToString, needed for string parsing. -var StringToClass = reverseInt16(ClassToString) - -// StringToOpcode is a map of opcodes to strings. -var StringToOpcode = reverseInt(OpcodeToString) - -// StringToRcode is a map of rcodes to strings. -var StringToRcode = reverseInt(RcodeToString) - -func init() { - // Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733. - StringToRcode["NOTIMPL"] = RcodeNotImplemented -} - -// StringToAlgorithm is the reverse of AlgorithmToString. -var StringToAlgorithm = reverseInt8(AlgorithmToString) - -// StringToHash is a map of names to hash IDs. -var StringToHash = reverseInt8(HashToString) - -// StringToCertType is the reverseof CertTypeToString. -var StringToCertType = reverseInt16(CertTypeToString) - -// Reverse a map -func reverseInt8(m map[uint8]string) map[string]uint8 { - n := make(map[string]uint8, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt16(m map[uint16]string) map[string]uint16 { - n := make(map[string]uint16, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt(m map[int]string) map[string]int { - n := make(map[string]int, len(m)) - for u, s := range m { - n[s] = u - } - return n -} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go deleted file mode 100644 index a638e862e..000000000 --- a/vendor/github.com/miekg/dns/sanitize.go +++ /dev/null @@ -1,86 +0,0 @@ -package dns - -// Dedup removes identical RRs from rrs. It preserves the original ordering. -// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies -// rrs. -// m is used to store the RRs temporary. If it is nil a new map will be allocated. -func Dedup(rrs []RR, m map[string]RR) []RR { - - if m == nil { - m = make(map[string]RR) - } - // Save the keys, so we don't have to call normalizedString twice. - keys := make([]*string, 0, len(rrs)) - - for _, r := range rrs { - key := normalizedString(r) - keys = append(keys, &key) - if mr, ok := m[key]; ok { - // Shortest TTL wins. - rh, mrh := r.Header(), mr.Header() - if mrh.Ttl > rh.Ttl { - mrh.Ttl = rh.Ttl - } - continue - } - - m[key] = r - } - // If the length of the result map equals the amount of RRs we got, - // it means they were all different. We can then just return the original rrset. - if len(m) == len(rrs) { - return rrs - } - - j := 0 - for i, r := range rrs { - // If keys[i] lives in the map, we should copy and remove it. - if _, ok := m[*keys[i]]; ok { - delete(m, *keys[i]) - rrs[j] = r - j++ - } - - if len(m) == 0 { - break - } - } - - return rrs[:j] -} - -// normalizedString returns a normalized string from r. The TTL -// is removed and the domain name is lowercased. We go from this: -// DomainNameTTLCLASSTYPERDATA to: -// lowercasenameCLASSTYPE... -func normalizedString(r RR) string { - // A string Go DNS makes has: domainnameTTL... - b := []byte(r.String()) - - // find the first non-escaped tab, then another, so we capture where the TTL lives. - esc := false - ttlStart, ttlEnd := 0, 0 - for i := 0; i < len(b) && ttlEnd == 0; i++ { - switch { - case b[i] == '\\': - esc = !esc - case b[i] == '\t' && !esc: - if ttlStart == 0 { - ttlStart = i - continue - } - if ttlEnd == 0 { - ttlEnd = i - } - case b[i] >= 'A' && b[i] <= 'Z' && !esc: - b[i] += 32 - default: - esc = false - } - } - - // remove TTL. - copy(b[ttlStart:], b[ttlEnd:]) - cut := ttlEnd - ttlStart - return string(b[:len(b)-cut]) -} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go deleted file mode 100644 index a8691bca7..000000000 --- a/vendor/github.com/miekg/dns/scan.go +++ /dev/null @@ -1,1337 +0,0 @@ -package dns - -import ( - "bufio" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" -) - -const maxTok = 2048 // Largest token we can return. - -// The maximum depth of $INCLUDE directives supported by the -// ZoneParser API. -const maxIncludeDepth = 7 - -// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: -// * Add ownernames if they are left blank; -// * Suppress sequences of spaces; -// * Make each RR fit on one line (_NEWLINE is send as last) -// * Handle comments: ; -// * Handle braces - anywhere. -const ( - // Zonefile - zEOF = iota - zString - zBlank - zQuote - zNewline - zRrtpe - zOwner - zClass - zDirOrigin // $ORIGIN - zDirTTL // $TTL - zDirInclude // $INCLUDE - zDirGenerate // $GENERATE - - // Privatekey file - zValue - zKey - - zExpectOwnerDir // Ownername - zExpectOwnerBl // Whitespace after the ownername - zExpectAny // Expect rrtype, ttl or class - zExpectAnyNoClass // Expect rrtype or ttl - zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS - zExpectAnyNoTTL // Expect rrtype or class - zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL - zExpectRrtype // Expect rrtype - zExpectRrtypeBl // Whitespace BEFORE rrtype - zExpectRdata // The first element of the rdata - zExpectDirTTLBl // Space after directive $TTL - zExpectDirTTL // Directive $TTL - zExpectDirOriginBl // Space after directive $ORIGIN - zExpectDirOrigin // Directive $ORIGIN - zExpectDirIncludeBl // Space after directive $INCLUDE - zExpectDirInclude // Directive $INCLUDE - zExpectDirGenerate // Directive $GENERATE - zExpectDirGenerateBl // Space after directive $GENERATE -) - -// ParseError is a parsing error. It contains the parse error and the location in the io.Reader -// where the error occurred. -type ParseError struct { - file string - err string - lex lex -} - -func (e *ParseError) Error() (s string) { - if e.file != "" { - s = e.file + ": " - } - s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + - strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) - return -} - -type lex struct { - token string // text of the token - err bool // when true, token text has lexer error - value uint8 // value: zString, _BLANK, etc. - torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar - line int // line in the file - column int // column in the file -} - -// Token holds the token that are returned when a zone file is parsed. -type Token struct { - // The scanned resource record when error is not nil. - RR - // When an error occurred, this has the error specifics. - Error *ParseError - // A potential comment positioned after the RR and on the same line. - Comment string -} - -// ttlState describes the state necessary to fill in an omitted RR TTL -type ttlState struct { - ttl uint32 // ttl is the current default TTL - isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive -} - -// NewRR reads the RR contained in the string s. Only the first RR is -// returned. If s contains no records, NewRR will return nil with no -// error. -// -// The class defaults to IN and TTL defaults to 3600. The full zone -// file syntax like $TTL, $ORIGIN, etc. is supported. -// -// All fields of the returned RR are set, except RR.Header().Rdlength -// which is set to 0. -func NewRR(s string) (RR, error) { - if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline - return ReadRR(strings.NewReader(s+"\n"), "") - } - return ReadRR(strings.NewReader(s), "") -} - -// ReadRR reads the RR contained in r. -// -// The string file is used in error reporting and to resolve relative -// $INCLUDE directives. -// -// See NewRR for more documentation. -func ReadRR(r io.Reader, file string) (RR, error) { - zp := NewZoneParser(r, ".", file) - zp.SetDefaultTTL(defaultTtl) - zp.SetIncludeAllowed(true) - rr, _ := zp.Next() - return rr, zp.Err() -} - -// ParseZone reads a RFC 1035 style zonefile from r. It returns -// *Tokens on the returned channel, each consisting of either a -// parsed RR and optional comment or a nil RR and an error. The -// channel is closed by ParseZone when the end of r is reached. -// -// The string file is used in error reporting and to resolve relative -// $INCLUDE directives. The string origin is used as the initial -// origin, as if the file would start with an $ORIGIN directive. -// -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all -// supported. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// for x := range dns.ParseZone(strings.NewReader(z), "", "") { -// if x.Error != nil { -// // log.Println(x.Error) -// } else { -// // Do something with x.RR -// } -// } -// -// Comments specified after an RR (and on the same line!) are -// returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned in Token.Comment. -// Comments inside the RR are returned concatenated along with the -// RR. Comments on a line by themselves are discarded. -// -// To prevent memory leaks it is important to always fully drain the -// returned channel. If an error occurs, it will always be the last -// Token sent on the channel. -// -// Deprecated: New users should prefer the ZoneParser API. -func ParseZone(r io.Reader, origin, file string) chan *Token { - t := make(chan *Token, 10000) - go parseZone(r, origin, file, t) - return t -} - -func parseZone(r io.Reader, origin, file string, t chan *Token) { - defer close(t) - - zp := NewZoneParser(r, origin, file) - zp.SetIncludeAllowed(true) - - for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { - t <- &Token{RR: rr, Comment: zp.Comment()} - } - - if err := zp.Err(); err != nil { - pe, ok := err.(*ParseError) - if !ok { - pe = &ParseError{file: file, err: err.Error()} - } - - t <- &Token{Error: pe} - } -} - -// ZoneParser is a parser for an RFC 1035 style zonefile. -// -// Each parsed RR in the zone is returned sequentially from Next. An -// optional comment can be retrieved with Comment. -// -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all -// supported. Although $INCLUDE is disabled by default. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// zp := NewZoneParser(strings.NewReader(z), "", "") -// -// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { -// // Do something with rr -// } -// -// if err := zp.Err(); err != nil { -// // log.Println(err) -// } -// -// Comments specified after an RR (and on the same line!) are -// returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned from Comment. Comments inside -// the RR are returned concatenated along with the RR. Comments on a line -// by themselves are discarded. -type ZoneParser struct { - c *zlexer - - parseErr *ParseError - - origin string - file string - - defttl *ttlState - - h RR_Header - - // sub is used to parse $INCLUDE files and $GENERATE directives. - // Next, by calling subNext, forwards the resulting RRs from this - // sub parser to the calling code. - sub *ZoneParser - osFile *os.File - - includeDepth uint8 - - includeAllowed bool -} - -// NewZoneParser returns an RFC 1035 style zonefile parser that reads -// from r. -// -// The string file is used in error reporting and to resolve relative -// $INCLUDE directives. The string origin is used as the initial -// origin, as if the file would start with an $ORIGIN directive. -func NewZoneParser(r io.Reader, origin, file string) *ZoneParser { - var pe *ParseError - if origin != "" { - origin = Fqdn(origin) - if _, ok := IsDomainName(origin); !ok { - pe = &ParseError{file, "bad initial origin name", lex{}} - } - } - - return &ZoneParser{ - c: newZLexer(r), - - parseErr: pe, - - origin: origin, - file: file, - } -} - -// SetDefaultTTL sets the parsers default TTL to ttl. -func (zp *ZoneParser) SetDefaultTTL(ttl uint32) { - zp.defttl = &ttlState{ttl, false} -} - -// SetIncludeAllowed controls whether $INCLUDE directives are -// allowed. $INCLUDE directives are not supported by default. -// -// The $INCLUDE directive will open and read from a user controlled -// file on the system. Even if the file is not a valid zonefile, the -// contents of the file may be revealed in error messages, such as: -// -// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31 -// /etc/shadow: dns: not a TTL: "root:$6$::0:99999:7:::" at line: 1:125 -func (zp *ZoneParser) SetIncludeAllowed(v bool) { - zp.includeAllowed = v -} - -// Err returns the first non-EOF error that was encountered by the -// ZoneParser. -func (zp *ZoneParser) Err() error { - if zp.parseErr != nil { - return zp.parseErr - } - - if zp.sub != nil { - if err := zp.sub.Err(); err != nil { - return err - } - } - - return zp.c.Err() -} - -func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) { - zp.parseErr = &ParseError{zp.file, err, l} - return nil, false -} - -// Comment returns an optional text comment that occurred alongside -// the RR. -func (zp *ZoneParser) Comment() string { - if zp.parseErr != nil { - return "" - } - - if zp.sub != nil { - return zp.sub.Comment() - } - - return zp.c.Comment() -} - -func (zp *ZoneParser) subNext() (RR, bool) { - if rr, ok := zp.sub.Next(); ok { - return rr, true - } - - if zp.sub.osFile != nil { - zp.sub.osFile.Close() - zp.sub.osFile = nil - } - - if zp.sub.Err() != nil { - // We have errors to surface. - return nil, false - } - - zp.sub = nil - return zp.Next() -} - -// Next advances the parser to the next RR in the zonefile and -// returns the (RR, true). It will return (nil, false) when the -// parsing stops, either by reaching the end of the input or an -// error. After Next returns (nil, false), the Err method will return -// any error that occurred during parsing. -func (zp *ZoneParser) Next() (RR, bool) { - if zp.parseErr != nil { - return nil, false - } - if zp.sub != nil { - return zp.subNext() - } - - // 6 possible beginnings of a line (_ is a space): - // - // 0. zRRTYPE -> all omitted until the rrtype - // 1. zOwner _ zRrtype -> class/ttl omitted - // 2. zOwner _ zString _ zRrtype -> class omitted - // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class - // 4. zOwner _ zClass _ zRrtype -> ttl omitted - // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) - // - // After detecting these, we know the zRrtype so we can jump to functions - // handling the rdata for each of these types. - - st := zExpectOwnerDir // initial state - h := &zp.h - - for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { - // zlexer spotted an error already - if l.err { - return zp.setParseError(l.token, l) - } - - switch st { - case zExpectOwnerDir: - // We can also expect a directive, like $TTL or $ORIGIN - if zp.defttl != nil { - h.Ttl = zp.defttl.ttl - } - - h.Class = ClassINET - - switch l.value { - case zNewline: - st = zExpectOwnerDir - case zOwner: - name, ok := toAbsoluteName(l.token, zp.origin) - if !ok { - return zp.setParseError("bad owner name", l) - } - - h.Name = name - - st = zExpectOwnerBl - case zDirTTL: - st = zExpectDirTTLBl - case zDirOrigin: - st = zExpectDirOriginBl - case zDirInclude: - st = zExpectDirIncludeBl - case zDirGenerate: - st = zExpectDirGenerateBl - case zRrtpe: - h.Rrtype = l.torc - - st = zExpectRdata - case zClass: - h.Class = l.torc - - st = zExpectAnyNoClassBl - case zBlank: - // Discard, can happen when there is nothing on the - // line except the RR type - case zString: - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("not a TTL", l) - } - - h.Ttl = ttl - - if zp.defttl == nil || !zp.defttl.isByDirective { - zp.defttl = &ttlState{ttl, false} - } - - st = zExpectAnyNoTTLBl - default: - return zp.setParseError("syntax error at beginning", l) - } - case zExpectDirIncludeBl: - if l.value != zBlank { - return zp.setParseError("no blank after $INCLUDE-directive", l) - } - - st = zExpectDirInclude - case zExpectDirInclude: - if l.value != zString { - return zp.setParseError("expecting $INCLUDE value, not this...", l) - } - - neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one - switch l, _ := zp.c.Next(); l.value { - case zBlank: - l, _ := zp.c.Next() - if l.value == zString { - name, ok := toAbsoluteName(l.token, zp.origin) - if !ok { - return zp.setParseError("bad origin name", l) - } - - neworigin = name - } - case zNewline, zEOF: - // Ok - default: - return zp.setParseError("garbage after $INCLUDE", l) - } - - if !zp.includeAllowed { - return zp.setParseError("$INCLUDE directive not allowed", l) - } - if zp.includeDepth >= maxIncludeDepth { - return zp.setParseError("too deeply nested $INCLUDE", l) - } - - // Start with the new file - includePath := l.token - if !filepath.IsAbs(includePath) { - includePath = filepath.Join(filepath.Dir(zp.file), includePath) - } - - r1, e1 := os.Open(includePath) - if e1 != nil { - var as string - if !filepath.IsAbs(l.token) { - as = fmt.Sprintf(" as `%s'", includePath) - } - - msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1) - return zp.setParseError(msg, l) - } - - zp.sub = NewZoneParser(r1, neworigin, includePath) - zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1 - zp.sub.SetIncludeAllowed(true) - return zp.subNext() - case zExpectDirTTLBl: - if l.value != zBlank { - return zp.setParseError("no blank after $TTL-directive", l) - } - - st = zExpectDirTTL - case zExpectDirTTL: - if l.value != zString { - return zp.setParseError("expecting $TTL value, not this...", l) - } - - if e := slurpRemainder(zp.c, zp.file); e != nil { - zp.parseErr = e - return nil, false - } - - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("expecting $TTL value, not this...", l) - } - - zp.defttl = &ttlState{ttl, true} - - st = zExpectOwnerDir - case zExpectDirOriginBl: - if l.value != zBlank { - return zp.setParseError("no blank after $ORIGIN-directive", l) - } - - st = zExpectDirOrigin - case zExpectDirOrigin: - if l.value != zString { - return zp.setParseError("expecting $ORIGIN value, not this...", l) - } - - if e := slurpRemainder(zp.c, zp.file); e != nil { - zp.parseErr = e - return nil, false - } - - name, ok := toAbsoluteName(l.token, zp.origin) - if !ok { - return zp.setParseError("bad origin name", l) - } - - zp.origin = name - - st = zExpectOwnerDir - case zExpectDirGenerateBl: - if l.value != zBlank { - return zp.setParseError("no blank after $GENERATE-directive", l) - } - - st = zExpectDirGenerate - case zExpectDirGenerate: - if l.value != zString { - return zp.setParseError("expecting $GENERATE value, not this...", l) - } - - return zp.generate(l) - case zExpectOwnerBl: - if l.value != zBlank { - return zp.setParseError("no blank after owner", l) - } - - st = zExpectAny - case zExpectAny: - switch l.value { - case zRrtpe: - if zp.defttl == nil { - return zp.setParseError("missing TTL with no previous value", l) - } - - h.Rrtype = l.torc - - st = zExpectRdata - case zClass: - h.Class = l.torc - - st = zExpectAnyNoClassBl - case zString: - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("not a TTL", l) - } - - h.Ttl = ttl - - if zp.defttl == nil || !zp.defttl.isByDirective { - zp.defttl = &ttlState{ttl, false} - } - - st = zExpectAnyNoTTLBl - default: - return zp.setParseError("expecting RR type, TTL or class, not this...", l) - } - case zExpectAnyNoClassBl: - if l.value != zBlank { - return zp.setParseError("no blank before class", l) - } - - st = zExpectAnyNoClass - case zExpectAnyNoTTLBl: - if l.value != zBlank { - return zp.setParseError("no blank before TTL", l) - } - - st = zExpectAnyNoTTL - case zExpectAnyNoTTL: - switch l.value { - case zClass: - h.Class = l.torc - - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - - st = zExpectRdata - default: - return zp.setParseError("expecting RR type or class, not this...", l) - } - case zExpectAnyNoClass: - switch l.value { - case zString: - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("not a TTL", l) - } - - h.Ttl = ttl - - if zp.defttl == nil || !zp.defttl.isByDirective { - zp.defttl = &ttlState{ttl, false} - } - - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - - st = zExpectRdata - default: - return zp.setParseError("expecting RR type or TTL, not this...", l) - } - case zExpectRrtypeBl: - if l.value != zBlank { - return zp.setParseError("no blank before RR type", l) - } - - st = zExpectRrtype - case zExpectRrtype: - if l.value != zRrtpe { - return zp.setParseError("unknown RR type", l) - } - - h.Rrtype = l.torc - - st = zExpectRdata - case zExpectRdata: - r, e := setRR(*h, zp.c, zp.origin, zp.file) - if e != nil { - // If e.lex is nil than we have encounter a unknown RR type - // in that case we substitute our current lex token - if e.lex.token == "" && e.lex.value == 0 { - e.lex = l // Uh, dirty - } - - zp.parseErr = e - return nil, false - } - - return r, true - } - } - - // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this - // is not an error, because an empty zone file is still a zone file. - return nil, false -} - -type zlexer struct { - br io.ByteReader - - readErr error - - line int - column int - - comBuf string - comment string - - l lex - - brace int - quote bool - space bool - commt bool - rrtype bool - owner bool - - nextL bool - - eol bool // end-of-line -} - -func newZLexer(r io.Reader) *zlexer { - br, ok := r.(io.ByteReader) - if !ok { - br = bufio.NewReaderSize(r, 1024) - } - - return &zlexer{ - br: br, - - line: 1, - - owner: true, - } -} - -func (zl *zlexer) Err() error { - if zl.readErr == io.EOF { - return nil - } - - return zl.readErr -} - -// readByte returns the next byte from the input -func (zl *zlexer) readByte() (byte, bool) { - if zl.readErr != nil { - return 0, false - } - - c, err := zl.br.ReadByte() - if err != nil { - zl.readErr = err - return 0, false - } - - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if zl.eol { - zl.line++ - zl.column = 0 - zl.eol = false - } - - if c == '\n' { - zl.eol = true - } else { - zl.column++ - } - - return c, true -} - -func (zl *zlexer) Next() (lex, bool) { - l := &zl.l - if zl.nextL { - zl.nextL = false - return *l, true - } - if l.err { - // Parsing errors should be sticky. - return lex{value: zEOF}, false - } - - var ( - str [maxTok]byte // Hold string text - com [maxTok]byte // Hold comment text - - stri int // Offset in str (0 means empty) - comi int // Offset in com (0 means empty) - - escape bool - ) - - if zl.comBuf != "" { - comi = copy(com[:], zl.comBuf) - zl.comBuf = "" - } - - zl.comment = "" - - for x, ok := zl.readByte(); ok; x, ok = zl.readByte() { - l.line, l.column = zl.line, zl.column - - if stri >= len(str) { - l.token = "token length insufficient for parsing" - l.err = true - return *l, true - } - if comi >= len(com) { - l.token = "comment length insufficient for parsing" - l.err = true - return *l, true - } - - switch x { - case ' ', '\t': - if escape || zl.quote { - // Inside quotes or escaped this is legal. - str[stri] = x - stri++ - - escape = false - break - } - - if zl.commt { - com[comi] = x - comi++ - break - } - - var retL lex - if stri == 0 { - // Space directly in the beginning, handled in the grammar - } else if zl.owner { - // If we have a string and its the first, make it an owner - l.value = zOwner - l.token = string(str[:stri]) - - // escape $... start with a \ not a $, so this will work - switch strings.ToUpper(l.token) { - case "$TTL": - l.value = zDirTTL - case "$ORIGIN": - l.value = zDirOrigin - case "$INCLUDE": - l.value = zDirInclude - case "$GENERATE": - l.value = zDirGenerate - } - - retL = *l - } else { - l.value = zString - l.token = string(str[:stri]) - - if !zl.rrtype { - tokenUpper := strings.ToUpper(l.token) - if t, ok := StringToType[tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - - zl.rrtype = true - } else if strings.HasPrefix(tokenUpper, "TYPE") { - t, ok := typeToInt(l.token) - if !ok { - l.token = "unknown RR type" - l.err = true - return *l, true - } - - l.value = zRrtpe - l.torc = t - - zl.rrtype = true - } - - if t, ok := StringToClass[tokenUpper]; ok { - l.value = zClass - l.torc = t - } else if strings.HasPrefix(tokenUpper, "CLASS") { - t, ok := classToInt(l.token) - if !ok { - l.token = "unknown class" - l.err = true - return *l, true - } - - l.value = zClass - l.torc = t - } - } - - retL = *l - } - - zl.owner = false - - if !zl.space { - zl.space = true - - l.value = zBlank - l.token = " " - - if retL == (lex{}) { - return *l, true - } - - zl.nextL = true - } - - if retL != (lex{}) { - return retL, true - } - case ';': - if escape || zl.quote { - // Inside quotes or escaped this is legal. - str[stri] = x - stri++ - - escape = false - break - } - - zl.commt = true - zl.comBuf = "" - - if comi > 1 { - // A newline was previously seen inside a comment that - // was inside braces and we delayed adding it until now. - com[comi] = ' ' // convert newline to space - comi++ - } - - com[comi] = ';' - comi++ - - if stri > 0 { - zl.comBuf = string(com[:comi]) - - l.value = zString - l.token = string(str[:stri]) - return *l, true - } - case '\r': - escape = false - - if zl.quote { - str[stri] = x - stri++ - } - - // discard if outside of quotes - case '\n': - escape = false - - // Escaped newline - if zl.quote { - str[stri] = x - stri++ - break - } - - if zl.commt { - // Reset a comment - zl.commt = false - zl.rrtype = false - - // If not in a brace this ends the comment AND the RR - if zl.brace == 0 { - zl.owner = true - - l.value = zNewline - l.token = "\n" - zl.comment = string(com[:comi]) - return *l, true - } - - zl.comBuf = string(com[:comi]) - break - } - - if zl.brace == 0 { - // If there is previous text, we should output it here - var retL lex - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - - if !zl.rrtype { - tokenUpper := strings.ToUpper(l.token) - if t, ok := StringToType[tokenUpper]; ok { - zl.rrtype = true - - l.value = zRrtpe - l.torc = t - } - } - - retL = *l - } - - l.value = zNewline - l.token = "\n" - - zl.comment = zl.comBuf - zl.comBuf = "" - zl.rrtype = false - zl.owner = true - - if retL != (lex{}) { - zl.nextL = true - return retL, true - } - - return *l, true - } - case '\\': - // comments do not get escaped chars, everything is copied - if zl.commt { - com[comi] = x - comi++ - break - } - - // something already escaped must be in string - if escape { - str[stri] = x - stri++ - - escape = false - break - } - - // something escaped outside of string gets added to string - str[stri] = x - stri++ - - escape = true - case '"': - if zl.commt { - com[comi] = x - comi++ - break - } - - if escape { - str[stri] = x - stri++ - - escape = false - break - } - - zl.space = false - - // send previous gathered text and the quote - var retL lex - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - - retL = *l - } - - // send quote itself as separate token - l.value = zQuote - l.token = "\"" - - zl.quote = !zl.quote - - if retL != (lex{}) { - zl.nextL = true - return retL, true - } - - return *l, true - case '(', ')': - if zl.commt { - com[comi] = x - comi++ - break - } - - if escape || zl.quote { - // Inside quotes or escaped this is legal. - str[stri] = x - stri++ - - escape = false - break - } - - switch x { - case ')': - zl.brace-- - - if zl.brace < 0 { - l.token = "extra closing brace" - l.err = true - return *l, true - } - case '(': - zl.brace++ - } - default: - escape = false - - if zl.commt { - com[comi] = x - comi++ - break - } - - str[stri] = x - stri++ - - zl.space = false - } - } - - if zl.readErr != nil && zl.readErr != io.EOF { - // Don't return any tokens after a read error occurs. - return lex{value: zEOF}, false - } - - var retL lex - if stri > 0 { - // Send remainder of str - l.value = zString - l.token = string(str[:stri]) - retL = *l - - if comi <= 0 { - return retL, true - } - } - - if comi > 0 { - // Send remainder of com - l.value = zNewline - l.token = "\n" - zl.comment = string(com[:comi]) - - if retL != (lex{}) { - zl.nextL = true - return retL, true - } - - return *l, true - } - - if zl.brace != 0 { - l.token = "unbalanced brace" - l.err = true - return *l, true - } - - return lex{value: zEOF}, false -} - -func (zl *zlexer) Comment() string { - if zl.l.err { - return "" - } - - return zl.comment -} - -// Extract the class number from CLASSxx -func classToInt(token string) (uint16, bool) { - offset := 5 - if len(token) < offset+1 { - return 0, false - } - class, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { - return 0, false - } - return uint16(class), true -} - -// Extract the rr number from TYPExxx -func typeToInt(token string) (uint16, bool) { - offset := 4 - if len(token) < offset+1 { - return 0, false - } - typ, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { - return 0, false - } - return uint16(typ), true -} - -// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. -func stringToTTL(token string) (uint32, bool) { - var s, i uint32 - for _, c := range token { - switch c { - case 's', 'S': - s += i - i = 0 - case 'm', 'M': - s += i * 60 - i = 0 - case 'h', 'H': - s += i * 60 * 60 - i = 0 - case 'd', 'D': - s += i * 60 * 60 * 24 - i = 0 - case 'w', 'W': - s += i * 60 * 60 * 24 * 7 - i = 0 - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i *= 10 - i += uint32(c) - '0' - default: - return 0, false - } - } - return s + i, true -} - -// Parse LOC records' [.][mM] into a -// mantissa exponent format. Token should contain the entire -// string (i.e. no spaces allowed) -func stringToCm(token string) (e, m uint8, ok bool) { - if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { - token = token[0 : len(token)-1] - } - s := strings.SplitN(token, ".", 2) - var meters, cmeters, val int - var err error - switch len(s) { - case 2: - if cmeters, err = strconv.Atoi(s[1]); err != nil { - return - } - fallthrough - case 1: - if meters, err = strconv.Atoi(s[0]); err != nil { - return - } - case 0: - // huh? - return 0, 0, false - } - ok = true - if meters > 0 { - e = 2 - val = meters - } else { - e = 0 - val = cmeters - } - for val > 10 { - e++ - val /= 10 - } - if e > 9 { - ok = false - } - m = uint8(val) - return -} - -func toAbsoluteName(name, origin string) (absolute string, ok bool) { - // check for an explicit origin reference - if name == "@" { - // require a nonempty origin - if origin == "" { - return "", false - } - return origin, true - } - - // require a valid domain name - _, ok = IsDomainName(name) - if !ok || name == "" { - return "", false - } - - // check if name is already absolute - if IsFqdn(name) { - return name, true - } - - // require a nonempty origin - if origin == "" { - return "", false - } - return appendOrigin(name, origin), true -} - -func appendOrigin(name, origin string) string { - if origin == "." { - return name + origin - } - return name + "." + origin -} - -// LOC record helper function -func locCheckNorth(token string, latitude uint32) (uint32, bool) { - switch token { - case "n", "N": - return LOC_EQUATOR + latitude, true - case "s", "S": - return LOC_EQUATOR - latitude, true - } - return latitude, false -} - -// LOC record helper function -func locCheckEast(token string, longitude uint32) (uint32, bool) { - switch token { - case "e", "E": - return LOC_EQUATOR + longitude, true - case "w", "W": - return LOC_EQUATOR - longitude, true - } - return longitude, false -} - -// "Eat" the rest of the "line" -func slurpRemainder(c *zlexer, f string) *ParseError { - l, _ := c.Next() - switch l.value { - case zBlank: - l, _ = c.Next() - if l.value != zNewline && l.value != zEOF { - return &ParseError{f, "garbage after rdata", l} - } - case zNewline: - case zEOF: - default: - return &ParseError{f, "garbage after rdata", l} - } - return nil -} - -// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" -// Used for NID and L64 record. -func stringToNodeID(l lex) (uint64, *ParseError) { - if len(l.token) < 19 { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - // There must be three colons at fixes postitions, if not its a parse error - if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] - u, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - return u, nil -} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go deleted file mode 100644 index 6096f9b0c..000000000 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ /dev/null @@ -1,1945 +0,0 @@ -package dns - -import ( - "encoding/base64" - "net" - "strconv" - "strings" -) - -// Parse the rdata of each rrtype. -// All data from the channel c is either zString or zBlank. -// After the rdata there may come a zBlank and then a zNewline -// or immediately a zNewline. If this is not the case we flag -// an *ParseError: garbage after rdata. -func setRR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError) { - var rr RR - if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) { - rr = newFn() - *rr.Header() = h - } else { - rr = &RFC3597{Hdr: h} - } - - err := rr.parse(c, o, f) - if err != nil { - return nil, err - } - - return rr, nil -} - -// canParseAsRR returns true if the record type can be parsed as a -// concrete RR. It blacklists certain record types that must be parsed -// according to RFC 3597 because they lack a presentation format. -func canParseAsRR(rrtype uint16) bool { - switch rrtype { - case TypeANY, TypeNULL, TypeOPT, TypeTSIG: - return false - default: - return true - } -} - -// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) -// or an error -func endingToString(c *zlexer, errstr, f string) (string, *ParseError) { - var s string - l, _ := c.Next() // zString - for l.value != zNewline && l.value != zEOF { - if l.err { - return s, &ParseError{f, errstr, l} - } - switch l.value { - case zString: - s += l.token - case zBlank: // Ok - default: - return "", &ParseError{f, errstr, l} - } - l, _ = c.Next() - } - - return s, nil -} - -// A remainder of the rdata with embedded spaces, split on unquoted whitespace -// and return the parsed string slice or an error -func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError) { - // Get the remaining data until we see a zNewline - l, _ := c.Next() - if l.err { - return nil, &ParseError{f, errstr, l} - } - - // Build the slice - s := make([]string, 0) - quote := false - empty := false - for l.value != zNewline && l.value != zEOF { - if l.err { - return nil, &ParseError{f, errstr, l} - } - switch l.value { - case zString: - empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - - } - p, i = p+255, i+255 - } - s = append(s, sx...) - break - } - - s = append(s, l.token) - case zBlank: - if quote { - // zBlank can only be seen in between txt parts. - return nil, &ParseError{f, errstr, l} - } - case zQuote: - if empty && quote { - s = append(s, "") - } - quote = !quote - empty = true - default: - return nil, &ParseError{f, errstr, l} - } - l, _ = c.Next() - } - - if quote { - return nil, &ParseError{f, errstr, l} - } - - return s, nil -} - -func (rr *A) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - rr.A = net.ParseIP(l.token) - // IPv4 addresses cannot include ":". - // We do this rather than use net.IP's To4() because - // To4() treats IPv4-mapped IPv6 addresses as being - // IPv4. - isIPv4 := !strings.Contains(l.token, ":") - if rr.A == nil || !isIPv4 || l.err { - return &ParseError{f, "bad A A", l} - } - return slurpRemainder(c, f) -} - -func (rr *AAAA) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - rr.AAAA = net.ParseIP(l.token) - // IPv6 addresses must include ":", and IPv4 - // addresses cannot include ":". - isIPv6 := strings.Contains(l.token, ":") - if rr.AAAA == nil || !isIPv6 || l.err { - return &ParseError{f, "bad AAAA AAAA", l} - } - return slurpRemainder(c, f) -} - -func (rr *NS) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Ns = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad NS Ns", l} - } - rr.Ns = name - return slurpRemainder(c, f) -} - -func (rr *PTR) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Ptr = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad PTR Ptr", l} - } - rr.Ptr = name - return slurpRemainder(c, f) -} - -func (rr *NSAPPTR) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Ptr = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad NSAP-PTR Ptr", l} - } - rr.Ptr = name - return slurpRemainder(c, f) -} - -func (rr *RP) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Mbox = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - mbox, mboxOk := toAbsoluteName(l.token, o) - if l.err || !mboxOk { - return &ParseError{f, "bad RP Mbox", l} - } - rr.Mbox = mbox - - c.Next() // zBlank - l, _ = c.Next() - rr.Txt = l.token - - txt, txtOk := toAbsoluteName(l.token, o) - if l.err || !txtOk { - return &ParseError{f, "bad RP Txt", l} - } - rr.Txt = txt - - return slurpRemainder(c, f) -} - -func (rr *MR) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Mr = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad MR Mr", l} - } - rr.Mr = name - return slurpRemainder(c, f) -} - -func (rr *MB) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Mb = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad MB Mb", l} - } - rr.Mb = name - return slurpRemainder(c, f) -} - -func (rr *MG) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Mg = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad MG Mg", l} - } - rr.Mg = name - return slurpRemainder(c, f) -} - -func (rr *HINFO) parse(c *zlexer, o, f string) *ParseError { - chunks, e := endingToTxtSlice(c, "bad HINFO Fields", f) - if e != nil { - return e - } - - if ln := len(chunks); ln == 0 { - return nil - } else if ln == 1 { - // Can we split it? - if out := strings.Fields(chunks[0]); len(out) > 1 { - chunks = out - } else { - chunks = append(chunks, "") - } - } - - rr.Cpu = chunks[0] - rr.Os = strings.Join(chunks[1:], " ") - - return nil -} - -func (rr *MINFO) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Rmail = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - rmail, rmailOk := toAbsoluteName(l.token, o) - if l.err || !rmailOk { - return &ParseError{f, "bad MINFO Rmail", l} - } - rr.Rmail = rmail - - c.Next() // zBlank - l, _ = c.Next() - rr.Email = l.token - - email, emailOk := toAbsoluteName(l.token, o) - if l.err || !emailOk { - return &ParseError{f, "bad MINFO Email", l} - } - rr.Email = email - - return slurpRemainder(c, f) -} - -func (rr *MF) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Mf = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad MF Mf", l} - } - rr.Mf = name - return slurpRemainder(c, f) -} - -func (rr *MD) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Md = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad MD Md", l} - } - rr.Md = name - return slurpRemainder(c, f) -} - -func (rr *MX) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad MX Pref", l} - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Mx = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad MX Mx", l} - } - rr.Mx = name - - return slurpRemainder(c, f) -} - -func (rr *RT) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil { - return &ParseError{f, "bad RT Preference", l} - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Host = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad RT Host", l} - } - rr.Host = name - - return slurpRemainder(c, f) -} - -func (rr *AFSDB) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad AFSDB Subtype", l} - } - rr.Subtype = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Hostname = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad AFSDB Hostname", l} - } - rr.Hostname = name - return slurpRemainder(c, f) -} - -func (rr *X25) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - if l.err { - return &ParseError{f, "bad X25 PSDNAddress", l} - } - rr.PSDNAddress = l.token - return slurpRemainder(c, f) -} - -func (rr *KX) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad KX Pref", l} - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Exchanger = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad KX Exchanger", l} - } - rr.Exchanger = name - return slurpRemainder(c, f) -} - -func (rr *CNAME) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Target = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad CNAME Target", l} - } - rr.Target = name - return slurpRemainder(c, f) -} - -func (rr *DNAME) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Target = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad DNAME Target", l} - } - rr.Target = name - return slurpRemainder(c, f) -} - -func (rr *SOA) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.Ns = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - ns, nsOk := toAbsoluteName(l.token, o) - if l.err || !nsOk { - return &ParseError{f, "bad SOA Ns", l} - } - rr.Ns = ns - - c.Next() // zBlank - l, _ = c.Next() - rr.Mbox = l.token - - mbox, mboxOk := toAbsoluteName(l.token, o) - if l.err || !mboxOk { - return &ParseError{f, "bad SOA Mbox", l} - } - rr.Mbox = mbox - - c.Next() // zBlank - - var ( - v uint32 - ok bool - ) - for i := 0; i < 5; i++ { - l, _ = c.Next() - if l.err { - return &ParseError{f, "bad SOA zone parameter", l} - } - if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { - if i == 0 { - // Serial must be a number - return &ParseError{f, "bad SOA zone parameter", l} - } - // We allow other fields to be unitful duration strings - if v, ok = stringToTTL(l.token); !ok { - return &ParseError{f, "bad SOA zone parameter", l} - - } - } else { - v = uint32(j) - } - switch i { - case 0: - rr.Serial = v - c.Next() // zBlank - case 1: - rr.Refresh = v - c.Next() // zBlank - case 2: - rr.Retry = v - c.Next() // zBlank - case 3: - rr.Expire = v - c.Next() // zBlank - case 4: - rr.Minttl = v - } - } - return slurpRemainder(c, f) -} - -func (rr *SRV) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad SRV Priority", l} - } - rr.Priority = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad SRV Weight", l} - } - rr.Weight = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad SRV Port", l} - } - rr.Port = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Target = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad SRV Target", l} - } - rr.Target = name - return slurpRemainder(c, f) -} - -func (rr *NAPTR) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad NAPTR Order", l} - } - rr.Order = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad NAPTR Preference", l} - } - rr.Preference = uint16(i) - - // Flags - c.Next() // zBlank - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return &ParseError{f, "bad NAPTR Flags", l} - } - l, _ = c.Next() // Either String or Quote - if l.value == zString { - rr.Flags = l.token - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return &ParseError{f, "bad NAPTR Flags", l} - } - } else if l.value == zQuote { - rr.Flags = "" - } else { - return &ParseError{f, "bad NAPTR Flags", l} - } - - // Service - c.Next() // zBlank - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return &ParseError{f, "bad NAPTR Service", l} - } - l, _ = c.Next() // Either String or Quote - if l.value == zString { - rr.Service = l.token - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return &ParseError{f, "bad NAPTR Service", l} - } - } else if l.value == zQuote { - rr.Service = "" - } else { - return &ParseError{f, "bad NAPTR Service", l} - } - - // Regexp - c.Next() // zBlank - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return &ParseError{f, "bad NAPTR Regexp", l} - } - l, _ = c.Next() // Either String or Quote - if l.value == zString { - rr.Regexp = l.token - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return &ParseError{f, "bad NAPTR Regexp", l} - } - } else if l.value == zQuote { - rr.Regexp = "" - } else { - return &ParseError{f, "bad NAPTR Regexp", l} - } - - // After quote no space?? - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Replacement = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad NAPTR Replacement", l} - } - rr.Replacement = name - return slurpRemainder(c, f) -} - -func (rr *TALINK) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.PreviousName = l.token - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - previousName, previousNameOk := toAbsoluteName(l.token, o) - if l.err || !previousNameOk { - return &ParseError{f, "bad TALINK PreviousName", l} - } - rr.PreviousName = previousName - - c.Next() // zBlank - l, _ = c.Next() - rr.NextName = l.token - - nextName, nextNameOk := toAbsoluteName(l.token, o) - if l.err || !nextNameOk { - return &ParseError{f, "bad TALINK NextName", l} - } - rr.NextName = nextName - - return slurpRemainder(c, f) -} - -func (rr *LOC) parse(c *zlexer, o, f string) *ParseError { - // Non zero defaults for LOC record, see RFC 1876, Section 3. - rr.HorizPre = 165 // 10000 - rr.VertPre = 162 // 10 - rr.Size = 18 // 1 - ok := false - - // North - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return &ParseError{f, "bad LOC Latitude", l} - } - rr.Latitude = 1000 * 60 * 60 * uint32(i) - - c.Next() // zBlank - // Either number, 'N' or 'S' - l, _ = c.Next() - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - i, e = strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return &ParseError{f, "bad LOC Latitude minutes", l} - } - rr.Latitude += 1000 * 60 * uint32(i) - - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return &ParseError{f, "bad LOC Latitude seconds", l} - } else { - rr.Latitude += uint32(1000 * i) - } - c.Next() // zBlank - // Either number, 'N' or 'S' - l, _ = c.Next() - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - // If still alive, flag an error - return &ParseError{f, "bad LOC Latitude North/South", l} - -East: - // East - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return &ParseError{f, "bad LOC Longitude", l} - } else { - rr.Longitude = 1000 * 60 * 60 * uint32(i) - } - c.Next() // zBlank - // Either number, 'E' or 'W' - l, _ = c.Next() - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return &ParseError{f, "bad LOC Longitude minutes", l} - } else { - rr.Longitude += 1000 * 60 * uint32(i) - } - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return &ParseError{f, "bad LOC Longitude seconds", l} - } else { - rr.Longitude += uint32(1000 * i) - } - c.Next() // zBlank - // Either number, 'E' or 'W' - l, _ = c.Next() - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - // If still alive, flag an error - return &ParseError{f, "bad LOC Longitude East/West", l} - -Altitude: - c.Next() // zBlank - l, _ = c.Next() - if len(l.token) == 0 || l.err { - return &ParseError{f, "bad LOC Altitude", l} - } - if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { - l.token = l.token[0 : len(l.token)-1] - } - if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return &ParseError{f, "bad LOC Altitude", l} - } else { - rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) - } - - // And now optionally the other values - l, _ = c.Next() - count := 0 - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - switch count { - case 0: // Size - e, m, ok := stringToCm(l.token) - if !ok { - return &ParseError{f, "bad LOC Size", l} - } - rr.Size = e&0x0f | m<<4&0xf0 - case 1: // HorizPre - e, m, ok := stringToCm(l.token) - if !ok { - return &ParseError{f, "bad LOC HorizPre", l} - } - rr.HorizPre = e&0x0f | m<<4&0xf0 - case 2: // VertPre - e, m, ok := stringToCm(l.token) - if !ok { - return &ParseError{f, "bad LOC VertPre", l} - } - rr.VertPre = e&0x0f | m<<4&0xf0 - } - count++ - case zBlank: - // Ok - default: - return &ParseError{f, "bad LOC Size, HorizPre or VertPre", l} - } - l, _ = c.Next() - } - return nil -} - -func (rr *HIP) parse(c *zlexer, o, f string) *ParseError { - // HitLength is not represented - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad HIP PublicKeyAlgorithm", l} - } - rr.PublicKeyAlgorithm = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - if len(l.token) == 0 || l.err { - return &ParseError{f, "bad HIP Hit", l} - } - rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. - rr.HitLength = uint8(len(rr.Hit)) / 2 - - c.Next() // zBlank - l, _ = c.Next() // zString - if len(l.token) == 0 || l.err { - return &ParseError{f, "bad HIP PublicKey", l} - } - rr.PublicKey = l.token // This cannot contain spaces - rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) - - // RendezvousServers (if any) - l, _ = c.Next() - var xs []string - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad HIP RendezvousServers", l} - } - xs = append(xs, name) - case zBlank: - // Ok - default: - return &ParseError{f, "bad HIP RendezvousServers", l} - } - l, _ = c.Next() - } - - rr.RendezvousServers = xs - return nil -} - -func (rr *CERT) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - if v, ok := StringToCertType[l.token]; ok { - rr.Type = v - } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { - return &ParseError{f, "bad CERT Type", l} - } else { - rr.Type = uint16(i) - } - c.Next() // zBlank - l, _ = c.Next() // zString - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad CERT KeyTag", l} - } - rr.KeyTag = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - if v, ok := StringToAlgorithm[l.token]; ok { - rr.Algorithm = v - } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - return &ParseError{f, "bad CERT Algorithm", l} - } else { - rr.Algorithm = uint8(i) - } - s, e1 := endingToString(c, "bad CERT Certificate", f) - if e1 != nil { - return e1 - } - rr.Certificate = s - return nil -} - -func (rr *OPENPGPKEY) parse(c *zlexer, o, f string) *ParseError { - s, e := endingToString(c, "bad OPENPGPKEY PublicKey", f) - if e != nil { - return e - } - rr.PublicKey = s - return nil -} - -func (rr *CSYNC) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - j, e := strconv.ParseUint(l.token, 10, 32) - if e != nil { - // Serial must be a number - return &ParseError{f, "bad CSYNC serial", l} - } - rr.Serial = uint32(j) - - c.Next() // zBlank - - l, _ = c.Next() - j, e = strconv.ParseUint(l.token, 10, 16) - if e != nil { - // Serial must be a number - return &ParseError{f, "bad CSYNC flags", l} - } - rr.Flags = uint16(j) - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l, _ = c.Next() - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - tokenUpper := strings.ToUpper(l.token) - if k, ok = StringToType[tokenUpper]; !ok { - if k, ok = typeToInt(l.token); !ok { - return &ParseError{f, "bad CSYNC TypeBitMap", l} - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return &ParseError{f, "bad CSYNC TypeBitMap", l} - } - l, _ = c.Next() - } - return nil -} - -func (rr *SIG) parse(c *zlexer, o, f string) *ParseError { - return rr.RRSIG.parse(c, o, f) -} - -func (rr *RRSIG) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - tokenUpper := strings.ToUpper(l.token) - if t, ok := StringToType[tokenUpper]; !ok { - if strings.HasPrefix(tokenUpper, "TYPE") { - t, ok = typeToInt(l.token) - if !ok { - return &ParseError{f, "bad RRSIG Typecovered", l} - } - rr.TypeCovered = t - } else { - return &ParseError{f, "bad RRSIG Typecovered", l} - } - } else { - rr.TypeCovered = t - } - - c.Next() // zBlank - l, _ = c.Next() - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return &ParseError{f, "bad RRSIG Algorithm", l} - } - rr.Algorithm = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return &ParseError{f, "bad RRSIG Labels", l} - } - rr.Labels = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 32) - if err != nil || l.err { - return &ParseError{f, "bad RRSIG OrigTtl", l} - } - rr.OrigTtl = uint32(i) - - c.Next() // zBlank - l, _ = c.Next() - if i, err := StringToTime(l.token); err != nil { - // Try to see if all numeric and use it as epoch - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - // TODO(miek): error out on > MAX_UINT32, same below - rr.Expiration = uint32(i) - } else { - return &ParseError{f, "bad RRSIG Expiration", l} - } - } else { - rr.Expiration = i - } - - c.Next() // zBlank - l, _ = c.Next() - if i, err := StringToTime(l.token); err != nil { - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - rr.Inception = uint32(i) - } else { - return &ParseError{f, "bad RRSIG Inception", l} - } - } else { - rr.Inception = i - } - - c.Next() // zBlank - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 16) - if err != nil || l.err { - return &ParseError{f, "bad RRSIG KeyTag", l} - } - rr.KeyTag = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() - rr.SignerName = l.token - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad RRSIG SignerName", l} - } - rr.SignerName = name - - s, e := endingToString(c, "bad RRSIG Signature", f) - if e != nil { - return e - } - rr.Signature = s - - return nil -} - -func (rr *NSEC) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - rr.NextDomain = l.token - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad NSEC NextDomain", l} - } - rr.NextDomain = name - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l, _ = c.Next() - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - tokenUpper := strings.ToUpper(l.token) - if k, ok = StringToType[tokenUpper]; !ok { - if k, ok = typeToInt(l.token); !ok { - return &ParseError{f, "bad NSEC TypeBitMap", l} - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return &ParseError{f, "bad NSEC TypeBitMap", l} - } - l, _ = c.Next() - } - return nil -} - -func (rr *NSEC3) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad NSEC3 Hash", l} - } - rr.Hash = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad NSEC3 Flags", l} - } - rr.Flags = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad NSEC3 Iterations", l} - } - rr.Iterations = uint16(i) - c.Next() - l, _ = c.Next() - if len(l.token) == 0 || l.err { - return &ParseError{f, "bad NSEC3 Salt", l} - } - if l.token != "-" { - rr.SaltLength = uint8(len(l.token)) / 2 - rr.Salt = l.token - } - - c.Next() - l, _ = c.Next() - if len(l.token) == 0 || l.err { - return &ParseError{f, "bad NSEC3 NextDomain", l} - } - rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) - rr.NextDomain = l.token - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l, _ = c.Next() - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - tokenUpper := strings.ToUpper(l.token) - if k, ok = StringToType[tokenUpper]; !ok { - if k, ok = typeToInt(l.token); !ok { - return &ParseError{f, "bad NSEC3 TypeBitMap", l} - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return &ParseError{f, "bad NSEC3 TypeBitMap", l} - } - l, _ = c.Next() - } - return nil -} - -func (rr *NSEC3PARAM) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad NSEC3PARAM Hash", l} - } - rr.Hash = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad NSEC3PARAM Flags", l} - } - rr.Flags = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad NSEC3PARAM Iterations", l} - } - rr.Iterations = uint16(i) - c.Next() - l, _ = c.Next() - if l.token != "-" { - rr.SaltLength = uint8(len(l.token)) - rr.Salt = l.token - } - return slurpRemainder(c, f) -} - -func (rr *EUI48) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - if len(l.token) != 17 || l.err { - return &ParseError{f, "bad EUI48 Address", l} - } - addr := make([]byte, 12) - dash := 0 - for i := 0; i < 10; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return &ParseError{f, "bad EUI48 Address", l} - } - } - addr[10] = l.token[15] - addr[11] = l.token[16] - - i, e := strconv.ParseUint(string(addr), 16, 48) - if e != nil { - return &ParseError{f, "bad EUI48 Address", l} - } - rr.Address = i - return slurpRemainder(c, f) -} - -func (rr *EUI64) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - if len(l.token) != 23 || l.err { - return &ParseError{f, "bad EUI64 Address", l} - } - addr := make([]byte, 16) - dash := 0 - for i := 0; i < 14; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return &ParseError{f, "bad EUI64 Address", l} - } - } - addr[14] = l.token[21] - addr[15] = l.token[22] - - i, e := strconv.ParseUint(string(addr), 16, 64) - if e != nil { - return &ParseError{f, "bad EUI68 Address", l} - } - rr.Address = i - return slurpRemainder(c, f) -} - -func (rr *SSHFP) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad SSHFP Algorithm", l} - } - rr.Algorithm = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad SSHFP Type", l} - } - rr.Type = uint8(i) - c.Next() // zBlank - s, e1 := endingToString(c, "bad SSHFP Fingerprint", f) - if e1 != nil { - return e1 - } - rr.FingerPrint = s - return nil -} - -func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, f, typ string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad " + typ + " Flags", l} - } - rr.Flags = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad " + typ + " Protocol", l} - } - rr.Protocol = uint8(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad " + typ + " Algorithm", l} - } - rr.Algorithm = uint8(i) - s, e1 := endingToString(c, "bad "+typ+" PublicKey", f) - if e1 != nil { - return e1 - } - rr.PublicKey = s - return nil -} - -func (rr *DNSKEY) parse(c *zlexer, o, f string) *ParseError { - return rr.parseDNSKEY(c, o, f, "DNSKEY") -} - -func (rr *KEY) parse(c *zlexer, o, f string) *ParseError { - return rr.parseDNSKEY(c, o, f, "KEY") -} - -func (rr *CDNSKEY) parse(c *zlexer, o, f string) *ParseError { - return rr.parseDNSKEY(c, o, f, "CDNSKEY") -} - -func (rr *RKEY) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad RKEY Flags", l} - } - rr.Flags = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad RKEY Protocol", l} - } - rr.Protocol = uint8(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad RKEY Algorithm", l} - } - rr.Algorithm = uint8(i) - s, e1 := endingToString(c, "bad RKEY PublicKey", f) - if e1 != nil { - return e1 - } - rr.PublicKey = s - return nil -} - -func (rr *EID) parse(c *zlexer, o, f string) *ParseError { - s, e := endingToString(c, "bad EID Endpoint", f) - if e != nil { - return e - } - rr.Endpoint = s - return nil -} - -func (rr *NIMLOC) parse(c *zlexer, o, f string) *ParseError { - s, e := endingToString(c, "bad NIMLOC Locator", f) - if e != nil { - return e - } - rr.Locator = s - return nil -} - -func (rr *GPOS) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - _, e := strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return &ParseError{f, "bad GPOS Longitude", l} - } - rr.Longitude = l.token - c.Next() // zBlank - l, _ = c.Next() - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return &ParseError{f, "bad GPOS Latitude", l} - } - rr.Latitude = l.token - c.Next() // zBlank - l, _ = c.Next() - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return &ParseError{f, "bad GPOS Altitude", l} - } - rr.Altitude = l.token - return slurpRemainder(c, f) -} - -func (rr *DS) parseDS(c *zlexer, o, f, typ string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad " + typ + " KeyTag", l} - } - rr.KeyTag = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { - tokenUpper := strings.ToUpper(l.token) - i, ok := StringToAlgorithm[tokenUpper] - if !ok || l.err { - return &ParseError{f, "bad " + typ + " Algorithm", l} - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad " + typ + " DigestType", l} - } - rr.DigestType = uint8(i) - s, e1 := endingToString(c, "bad "+typ+" Digest", f) - if e1 != nil { - return e1 - } - rr.Digest = s - return nil -} - -func (rr *DS) parse(c *zlexer, o, f string) *ParseError { - return rr.parseDS(c, o, f, "DS") -} - -func (rr *DLV) parse(c *zlexer, o, f string) *ParseError { - return rr.parseDS(c, o, f, "DLV") -} - -func (rr *CDS) parse(c *zlexer, o, f string) *ParseError { - return rr.parseDS(c, o, f, "CDS") -} - -func (rr *TA) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad TA KeyTag", l} - } - rr.KeyTag = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - tokenUpper := strings.ToUpper(l.token) - i, ok := StringToAlgorithm[tokenUpper] - if !ok || l.err { - return &ParseError{f, "bad TA Algorithm", l} - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad TA DigestType", l} - } - rr.DigestType = uint8(i) - s, err := endingToString(c, "bad TA Digest", f) - if err != nil { - return err - } - rr.Digest = s - return nil -} - -func (rr *TLSA) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad TLSA Usage", l} - } - rr.Usage = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad TLSA Selector", l} - } - rr.Selector = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad TLSA MatchingType", l} - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2 := endingToString(c, "bad TLSA Certificate", f) - if e2 != nil { - return e2 - } - rr.Certificate = s - return nil -} - -func (rr *SMIMEA) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad SMIMEA Usage", l} - } - rr.Usage = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad SMIMEA Selector", l} - } - rr.Selector = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{f, "bad SMIMEA MatchingType", l} - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2 := endingToString(c, "bad SMIMEA Certificate", f) - if e2 != nil { - return e2 - } - rr.Certificate = s - return nil -} - -func (rr *RFC3597) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if l.token != "\\#" { - return &ParseError{f, "bad RFC3597 Rdata", l} - } - - c.Next() // zBlank - l, _ = c.Next() - rdlength, e := strconv.Atoi(l.token) - if e != nil || l.err { - return &ParseError{f, "bad RFC3597 Rdata ", l} - } - - s, e1 := endingToString(c, "bad RFC3597 Rdata", f) - if e1 != nil { - return e1 - } - if rdlength*2 != len(s) { - return &ParseError{f, "bad RFC3597 Rdata", l} - } - rr.Rdata = s - return nil -} - -func (rr *SPF) parse(c *zlexer, o, f string) *ParseError { - s, e := endingToTxtSlice(c, "bad SPF Txt", f) - if e != nil { - return e - } - rr.Txt = s - return nil -} - -func (rr *AVC) parse(c *zlexer, o, f string) *ParseError { - s, e := endingToTxtSlice(c, "bad AVC Txt", f) - if e != nil { - return e - } - rr.Txt = s - return nil -} - -func (rr *TXT) parse(c *zlexer, o, f string) *ParseError { - // no zBlank reading here, because all this rdata is TXT - s, e := endingToTxtSlice(c, "bad TXT Txt", f) - if e != nil { - return e - } - rr.Txt = s - return nil -} - -// identical to setTXT -func (rr *NINFO) parse(c *zlexer, o, f string) *ParseError { - s, e := endingToTxtSlice(c, "bad NINFO ZSData", f) - if e != nil { - return e - } - rr.ZSData = s - return nil -} - -func (rr *URI) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad URI Priority", l} - } - rr.Priority = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad URI Weight", l} - } - rr.Weight = uint16(i) - - c.Next() // zBlank - s, err := endingToTxtSlice(c, "bad URI Target", f) - if err != nil { - return err - } - if len(s) != 1 { - return &ParseError{f, "bad URI Target", l} - } - rr.Target = s[0] - return nil -} - -func (rr *DHCID) parse(c *zlexer, o, f string) *ParseError { - // awesome record to parse! - s, e := endingToString(c, "bad DHCID Digest", f) - if e != nil { - return e - } - rr.Digest = s - return nil -} - -func (rr *NID) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad NID Preference", l} - } - rr.Preference = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return err - } - rr.NodeID = u - return slurpRemainder(c, f) -} - -func (rr *L32) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad L32 Preference", l} - } - rr.Preference = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Locator32 = net.ParseIP(l.token) - if rr.Locator32 == nil || l.err { - return &ParseError{f, "bad L32 Locator", l} - } - return slurpRemainder(c, f) -} - -func (rr *LP) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad LP Preference", l} - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Fqdn = l.token - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return &ParseError{f, "bad LP Fqdn", l} - } - rr.Fqdn = name - - return slurpRemainder(c, f) -} - -func (rr *L64) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad L64 Preference", l} - } - rr.Preference = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return err - } - rr.Locator64 = u - return slurpRemainder(c, f) -} - -func (rr *UID) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return &ParseError{f, "bad UID Uid", l} - } - rr.Uid = uint32(i) - return slurpRemainder(c, f) -} - -func (rr *GID) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return &ParseError{f, "bad GID Gid", l} - } - rr.Gid = uint32(i) - return slurpRemainder(c, f) -} - -func (rr *UINFO) parse(c *zlexer, o, f string) *ParseError { - s, e := endingToTxtSlice(c, "bad UINFO Uinfo", f) - if e != nil { - return e - } - if ln := len(s); ln == 0 { - return nil - } - rr.Uinfo = s[0] // silently discard anything after the first character-string - return nil -} - -func (rr *PX) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return slurpRemainder(c, f) - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return &ParseError{f, "bad PX Preference", l} - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Map822 = l.token - map822, map822Ok := toAbsoluteName(l.token, o) - if l.err || !map822Ok { - return &ParseError{f, "bad PX Map822", l} - } - rr.Map822 = map822 - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Mapx400 = l.token - mapx400, mapx400Ok := toAbsoluteName(l.token, o) - if l.err || !mapx400Ok { - return &ParseError{f, "bad PX Mapx400", l} - } - rr.Mapx400 = mapx400 - - return slurpRemainder(c, f) -} - -func (rr *CAA) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return nil - } - - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return &ParseError{f, "bad CAA Flag", l} - } - rr.Flag = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - if l.value != zString { - return &ParseError{f, "bad CAA Tag", l} - } - rr.Tag = l.token - - c.Next() // zBlank - s, e := endingToTxtSlice(c, "bad CAA Value", f) - if e != nil { - return e - } - if len(s) != 1 { - return &ParseError{f, "bad CAA Value", l} - } - rr.Value = s[0] - return nil -} - -func (rr *TKEY) parse(c *zlexer, o, f string) *ParseError { - l, _ := c.Next() - - // Algorithm - if l.value != zString { - return &ParseError{f, "bad TKEY algorithm", l} - } - rr.Algorithm = l.token - c.Next() // zBlank - - // Get the key length and key values - l, _ = c.Next() - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return &ParseError{f, "bad TKEY key length", l} - } - rr.KeySize = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if l.value != zString { - return &ParseError{f, "bad TKEY key", l} - } - rr.Key = l.token - c.Next() // zBlank - - // Get the otherdata length and string data - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return &ParseError{f, "bad TKEY otherdata length", l} - } - rr.OtherLen = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if l.value != zString { - return &ParseError{f, "bad TKEY otherday", l} - } - rr.OtherData = l.token - - return nil -} diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go deleted file mode 100644 index ae304db53..000000000 --- a/vendor/github.com/miekg/dns/serve_mux.go +++ /dev/null @@ -1,147 +0,0 @@ -package dns - -import ( - "strings" - "sync" -) - -// ServeMux is an DNS request multiplexer. It matches the zone name of -// each incoming request against a list of registered patterns add calls -// the handler for the pattern that most closely matches the zone name. -// -// ServeMux is DNSSEC aware, meaning that queries for the DS record are -// redirected to the parent zone (if that is also registered), otherwise -// the child gets the query. -// -// ServeMux is also safe for concurrent access from multiple goroutines. -// -// The zero ServeMux is empty and ready for use. -type ServeMux struct { - z map[string]Handler - m sync.RWMutex -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { - return new(ServeMux) -} - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -func (mux *ServeMux) match(q string, t uint16) Handler { - mux.m.RLock() - defer mux.m.RUnlock() - if mux.z == nil { - return nil - } - - var handler Handler - - // TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575 - // lands in a go release, replace the following with strings.ToLower. - var sb strings.Builder - for i := 0; i < len(q); i++ { - c := q[i] - if !(c >= 'A' && c <= 'Z') { - continue - } - - sb.Grow(len(q)) - sb.WriteString(q[:i]) - - for ; i < len(q); i++ { - c := q[i] - if c >= 'A' && c <= 'Z' { - c += 'a' - 'A' - } - - sb.WriteByte(c) - } - - q = sb.String() - break - } - - for off, end := 0, false; !end; off, end = NextLabel(q, off) { - if h, ok := mux.z[q[off:]]; ok { - if t != TypeDS { - return h - } - // Continue for DS to see if we have a parent too, if so delegate to the parent - handler = h - } - } - - // Wildcard match, if we have found nothing try the root zone as a last resort. - if h, ok := mux.z["."]; ok { - return h - } - - return handler -} - -// Handle adds a handler to the ServeMux for pattern. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - if mux.z == nil { - mux.z = make(map[string]Handler) - } - mux.z[Fqdn(pattern)] = handler - mux.m.Unlock() -} - -// HandleFunc adds a handler function to the ServeMux for pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// HandleRemove deregisters the handler specific for pattern from the ServeMux. -func (mux *ServeMux) HandleRemove(pattern string) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - delete(mux.z, Fqdn(pattern)) - mux.m.Unlock() -} - -// ServeDNS dispatches the request to the handler whose pattern most -// closely matches the request message. -// -// ServeDNS is DNSSEC aware, meaning that queries for the DS record -// are redirected to the parent zone (if that is also registered), -// otherwise the child gets the query. -// -// If no handler is found, or there is no question, a standard SERVFAIL -// message is returned -func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) { - var h Handler - if len(req.Question) >= 1 { // allow more than one question - h = mux.match(req.Question[0].Name, req.Question[0].Qtype) - } - - if h != nil { - h.ServeDNS(w, req) - } else { - HandleFailed(w, req) - } -} - -// Handle registers the handler with the given pattern -// in the DefaultServeMux. The documentation for -// ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleRemove deregisters the handle with the given pattern -// in the DefaultServeMux. -func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } - -// HandleFunc registers the handler function with the given pattern -// in the DefaultServeMux. -func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - DefaultServeMux.HandleFunc(pattern, handler) -} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go deleted file mode 100644 index 97cc87a71..000000000 --- a/vendor/github.com/miekg/dns/server.go +++ /dev/null @@ -1,757 +0,0 @@ -// DNS server implementation. - -package dns - -import ( - "context" - "crypto/tls" - "encoding/binary" - "errors" - "io" - "net" - "strings" - "sync" - "time" -) - -// Default maximum number of TCP queries before we close the socket. -const maxTCPQueries = 128 - -// aLongTimeAgo is a non-zero time, far in the past, used for -// immediate cancelation of network operations. -var aLongTimeAgo = time.Unix(1, 0) - -// Handler is implemented by any value that implements ServeDNS. -type Handler interface { - ServeDNS(w ResponseWriter, r *Msg) -} - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as DNS handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Msg) - -// ServeDNS calls f(w, r). -func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { - f(w, r) -} - -// A ResponseWriter interface is used by an DNS handler to -// construct an DNS response. -type ResponseWriter interface { - // LocalAddr returns the net.Addr of the server - LocalAddr() net.Addr - // RemoteAddr returns the net.Addr of the client that sent the current request. - RemoteAddr() net.Addr - // WriteMsg writes a reply back to the client. - WriteMsg(*Msg) error - // Write writes a raw buffer back to the client. - Write([]byte) (int, error) - // Close closes the connection. - Close() error - // TsigStatus returns the status of the Tsig. - TsigStatus() error - // TsigTimersOnly sets the tsig timers only boolean. - TsigTimersOnly(bool) - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the DNS package will not do anything with the connection. - Hijack() -} - -// A ConnectionStater interface is used by a DNS Handler to access TLS connection state -// when available. -type ConnectionStater interface { - ConnectionState() *tls.ConnectionState -} - -type response struct { - closed bool // connection has been closed - hijacked bool // connection has been hijacked by handler - tsigTimersOnly bool - tsigStatus error - tsigRequestMAC string - tsigSecret map[string]string // the tsig secrets - udp *net.UDPConn // i/o connection if UDP was used - tcp net.Conn // i/o connection if TCP was used - udpSession *SessionUDP // oob data to get egress interface right - writer Writer // writer to output the raw DNS bits -} - -// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. -func HandleFailed(w ResponseWriter, r *Msg) { - m := new(Msg) - m.SetRcode(r, RcodeServerFailure) - // does not matter if this write fails - w.WriteMsg(m) -} - -// ListenAndServe Starts a server on address and network specified Invoke handler -// for incoming queries. -func ListenAndServe(addr string, network string, handler Handler) error { - server := &Server{Addr: addr, Net: network, Handler: handler} - return server.ListenAndServe() -} - -// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in -// http://golang.org/pkg/net/http/#ListenAndServeTLS -func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - server := &Server{ - Addr: addr, - Net: "tcp-tls", - TLSConfig: &config, - Handler: handler, - } - - return server.ListenAndServe() -} - -// ActivateAndServe activates a server with a listener from systemd, -// l and p should not both be non-nil. -// If both l and p are not nil only p will be used. -// Invoke handler for incoming queries. -func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { - server := &Server{Listener: l, PacketConn: p, Handler: handler} - return server.ActivateAndServe() -} - -// Writer writes raw DNS messages; each call to Write should send an entire message. -type Writer interface { - io.Writer -} - -// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. -type Reader interface { - // ReadTCP reads a raw message from a TCP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) - // ReadUDP reads a raw message from a UDP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) -} - -// defaultReader is an adapter for the Server struct that implements the Reader interface -// using the readTCP and readUDP func of the embedded Server. -type defaultReader struct { - *Server -} - -func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - return dr.readTCP(conn, timeout) -} - -func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - return dr.readUDP(conn, timeout) -} - -// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. -// Implementations should never return a nil Reader. -type DecorateReader func(Reader) Reader - -// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. -// Implementations should never return a nil Writer. -type DecorateWriter func(Writer) Writer - -// A Server defines parameters for running an DNS server. -type Server struct { - // Address to listen on, ":dns" if empty. - Addr string - // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one - Net string - // TCP Listener to use, this is to aid in systemd's socket activation. - Listener net.Listener - // TLS connection configuration - TLSConfig *tls.Config - // UDP "Listener" to use, this is to aid in systemd's socket activation. - PacketConn net.PacketConn - // Handler to invoke, dns.DefaultServeMux if nil. - Handler Handler - // Default buffer size to use to read incoming UDP messages. If not set - // it defaults to MinMsgSize (512 B). - UDPSize int - // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. - ReadTimeout time.Duration - // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. - WriteTimeout time.Duration - // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). - IdleTimeout func() time.Duration - // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). - TsigSecret map[string]string - // If NotifyStartedFunc is set it is called once the server has started listening. - NotifyStartedFunc func() - // DecorateReader is optional, allows customization of the process that reads raw DNS messages. - DecorateReader DecorateReader - // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. - DecorateWriter DecorateWriter - // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). - MaxTCPQueries int - // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. - // It is only supported on go1.11+ and when using ListenAndServe. - ReusePort bool - // AcceptMsgFunc will check the incoming message and will reject it early in the process. - // By default DefaultMsgAcceptFunc will be used. - MsgAcceptFunc MsgAcceptFunc - - // Shutdown handling - lock sync.RWMutex - started bool - shutdown chan struct{} - conns map[net.Conn]struct{} - - // A pool for UDP message buffers. - udpPool sync.Pool -} - -func (srv *Server) isStarted() bool { - srv.lock.RLock() - started := srv.started - srv.lock.RUnlock() - return started -} - -func makeUDPBuffer(size int) func() interface{} { - return func() interface{} { - return make([]byte, size) - } -} - -func (srv *Server) init() { - srv.shutdown = make(chan struct{}) - srv.conns = make(map[net.Conn]struct{}) - - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - if srv.MsgAcceptFunc == nil { - srv.MsgAcceptFunc = DefaultMsgAcceptFunc - } - if srv.Handler == nil { - srv.Handler = DefaultServeMux - } - - srv.udpPool.New = makeUDPBuffer(srv.UDPSize) -} - -func unlockOnce(l sync.Locker) func() { - var once sync.Once - return func() { once.Do(l.Unlock) } -} - -// ListenAndServe starts a nameserver on the configured address in *Server. -func (srv *Server) ListenAndServe() error { - unlock := unlockOnce(&srv.lock) - srv.lock.Lock() - defer unlock() - - if srv.started { - return &Error{err: "server already started"} - } - - addr := srv.Addr - if addr == "" { - addr = ":domain" - } - - srv.init() - - switch srv.Net { - case "tcp", "tcp4", "tcp6": - l, err := listenTCP(srv.Net, addr, srv.ReusePort) - if err != nil { - return err - } - srv.Listener = l - srv.started = true - unlock() - return srv.serveTCP(l) - case "tcp-tls", "tcp4-tls", "tcp6-tls": - if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) { - return errors.New("dns: neither Certificates nor GetCertificate set in Config") - } - network := strings.TrimSuffix(srv.Net, "-tls") - l, err := listenTCP(network, addr, srv.ReusePort) - if err != nil { - return err - } - l = tls.NewListener(l, srv.TLSConfig) - srv.Listener = l - srv.started = true - unlock() - return srv.serveTCP(l) - case "udp", "udp4", "udp6": - l, err := listenUDP(srv.Net, addr, srv.ReusePort) - if err != nil { - return err - } - u := l.(*net.UDPConn) - if e := setUDPSocketOptions(u); e != nil { - return e - } - srv.PacketConn = l - srv.started = true - unlock() - return srv.serveUDP(u) - } - return &Error{err: "bad network"} -} - -// ActivateAndServe starts a nameserver with the PacketConn or Listener -// configured in *Server. Its main use is to start a server from systemd. -func (srv *Server) ActivateAndServe() error { - unlock := unlockOnce(&srv.lock) - srv.lock.Lock() - defer unlock() - - if srv.started { - return &Error{err: "server already started"} - } - - srv.init() - - pConn := srv.PacketConn - l := srv.Listener - if pConn != nil { - // Check PacketConn interface's type is valid and value - // is not nil - if t, ok := pConn.(*net.UDPConn); ok && t != nil { - if e := setUDPSocketOptions(t); e != nil { - return e - } - srv.started = true - unlock() - return srv.serveUDP(t) - } - } - if l != nil { - srv.started = true - unlock() - return srv.serveTCP(l) - } - return &Error{err: "bad listeners"} -} - -// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and -// ActivateAndServe will return. -func (srv *Server) Shutdown() error { - return srv.ShutdownContext(context.Background()) -} - -// ShutdownContext shuts down a server. After a call to ShutdownContext, -// ListenAndServe and ActivateAndServe will return. -// -// A context.Context may be passed to limit how long to wait for connections -// to terminate. -func (srv *Server) ShutdownContext(ctx context.Context) error { - srv.lock.Lock() - if !srv.started { - srv.lock.Unlock() - return &Error{err: "server not started"} - } - - srv.started = false - - if srv.PacketConn != nil { - srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads - } - - if srv.Listener != nil { - srv.Listener.Close() - } - - for rw := range srv.conns { - rw.SetReadDeadline(aLongTimeAgo) // Unblock reads - } - - srv.lock.Unlock() - - if testShutdownNotify != nil { - testShutdownNotify.Broadcast() - } - - var ctxErr error - select { - case <-srv.shutdown: - case <-ctx.Done(): - ctxErr = ctx.Err() - } - - if srv.PacketConn != nil { - srv.PacketConn.Close() - } - - return ctxErr -} - -var testShutdownNotify *sync.Cond - -// getReadTimeout is a helper func to use system timeout if server did not intend to change it. -func (srv *Server) getReadTimeout() time.Duration { - if srv.ReadTimeout != 0 { - return srv.ReadTimeout - } - return dnsTimeout -} - -// serveTCP starts a TCP listener for the server. -func (srv *Server) serveTCP(l net.Listener) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - var wg sync.WaitGroup - defer func() { - wg.Wait() - close(srv.shutdown) - }() - - for srv.isStarted() { - rw, err := l.Accept() - if err != nil { - if !srv.isStarted() { - return nil - } - if neterr, ok := err.(net.Error); ok && neterr.Temporary() { - continue - } - return err - } - srv.lock.Lock() - // Track the connection to allow unblocking reads on shutdown. - srv.conns[rw] = struct{}{} - srv.lock.Unlock() - wg.Add(1) - go srv.serveTCPConn(&wg, rw) - } - - return nil -} - -// serveUDP starts a UDP listener for the server. -func (srv *Server) serveUDP(l *net.UDPConn) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - reader := Reader(defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - var wg sync.WaitGroup - defer func() { - wg.Wait() - close(srv.shutdown) - }() - - rtimeout := srv.getReadTimeout() - // deadline is not used here - for srv.isStarted() { - m, s, err := reader.ReadUDP(l, rtimeout) - if err != nil { - if !srv.isStarted() { - return nil - } - if netErr, ok := err.(net.Error); ok && netErr.Temporary() { - continue - } - return err - } - if len(m) < headerSize { - if cap(m) == srv.UDPSize { - srv.udpPool.Put(m[:srv.UDPSize]) - } - continue - } - wg.Add(1) - go srv.serveUDPPacket(&wg, m, l, s) - } - - return nil -} - -// Serve a new TCP connection. -func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) { - w := &response{tsigSecret: srv.TsigSecret, tcp: rw} - if srv.DecorateWriter != nil { - w.writer = srv.DecorateWriter(w) - } else { - w.writer = w - } - - reader := Reader(defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - idleTimeout := tcpIdleTimeout - if srv.IdleTimeout != nil { - idleTimeout = srv.IdleTimeout() - } - - timeout := srv.getReadTimeout() - - limit := srv.MaxTCPQueries - if limit == 0 { - limit = maxTCPQueries - } - - for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ { - m, err := reader.ReadTCP(w.tcp, timeout) - if err != nil { - // TODO(tmthrgd): handle error - break - } - srv.serveDNS(m, w) - if w.closed { - break // Close() was called - } - if w.hijacked { - break // client will call Close() themselves - } - // The first read uses the read timeout, the rest use the - // idle timeout. - timeout = idleTimeout - } - - if !w.hijacked { - w.Close() - } - - srv.lock.Lock() - delete(srv.conns, w.tcp) - srv.lock.Unlock() - - wg.Done() -} - -// Serve a new UDP request. -func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) { - w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s} - if srv.DecorateWriter != nil { - w.writer = srv.DecorateWriter(w) - } else { - w.writer = w - } - - srv.serveDNS(m, w) - wg.Done() -} - -func (srv *Server) serveDNS(m []byte, w *response) { - dh, off, err := unpackMsgHdr(m, 0) - if err != nil { - // Let client hang, they are sending crap; any reply can be used to amplify. - return - } - - req := new(Msg) - req.setHdr(dh) - - switch srv.MsgAcceptFunc(dh) { - case MsgAccept: - if req.unpack(dh, m, off) == nil { - break - } - - fallthrough - case MsgReject: - req.SetRcodeFormatError(req) - // Are we allowed to delete any OPT records here? - req.Ns, req.Answer, req.Extra = nil, nil, nil - - w.WriteMsg(req) - fallthrough - case MsgIgnore: - if w.udp != nil && cap(m) == srv.UDPSize { - srv.udpPool.Put(m[:srv.UDPSize]) - } - - return - } - - w.tsigStatus = nil - if w.tsigSecret != nil { - if t := req.IsTsig(); t != nil { - if secret, ok := w.tsigSecret[t.Hdr.Name]; ok { - w.tsigStatus = TsigVerify(m, secret, "", false) - } else { - w.tsigStatus = ErrSecret - } - w.tsigTimersOnly = false - w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC - } - } - - if w.udp != nil && cap(m) == srv.UDPSize { - srv.udpPool.Put(m[:srv.UDPSize]) - } - - srv.Handler.ServeDNS(w, req) // Writes back to the client -} - -func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - // If we race with ShutdownContext, the read deadline may - // have been set in the distant past to unblock the read - // below. We must not override it, otherwise we may block - // ShutdownContext. - srv.lock.RLock() - if srv.started { - conn.SetReadDeadline(time.Now().Add(timeout)) - } - srv.lock.RUnlock() - - var length uint16 - if err := binary.Read(conn, binary.BigEndian, &length); err != nil { - return nil, err - } - - m := make([]byte, length) - if _, err := io.ReadFull(conn, m); err != nil { - return nil, err - } - - return m, nil -} - -func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - srv.lock.RLock() - if srv.started { - // See the comment in readTCP above. - conn.SetReadDeadline(time.Now().Add(timeout)) - } - srv.lock.RUnlock() - - m := srv.udpPool.Get().([]byte) - n, s, err := ReadFromSessionUDP(conn, m) - if err != nil { - srv.udpPool.Put(m) - return nil, nil, err - } - m = m[:n] - return m, s, nil -} - -// WriteMsg implements the ResponseWriter.WriteMsg method. -func (w *response) WriteMsg(m *Msg) (err error) { - if w.closed { - return &Error{err: "WriteMsg called after Close"} - } - - var data []byte - if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) - if t := m.IsTsig(); t != nil { - data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err - } - } - data, err = m.Pack() - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err -} - -// Write implements the ResponseWriter.Write method. -func (w *response) Write(m []byte) (int, error) { - if w.closed { - return 0, &Error{err: "Write called after Close"} - } - - switch { - case w.udp != nil: - return WriteToSessionUDP(w.udp, m, w.udpSession) - case w.tcp != nil: - if len(m) > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - - l := make([]byte, 2) - binary.BigEndian.PutUint16(l, uint16(len(m))) - - n, err := (&net.Buffers{l, m}).WriteTo(w.tcp) - return int(n), err - default: - panic("dns: internal error: udp and tcp both nil") - } -} - -// LocalAddr implements the ResponseWriter.LocalAddr method. -func (w *response) LocalAddr() net.Addr { - switch { - case w.udp != nil: - return w.udp.LocalAddr() - case w.tcp != nil: - return w.tcp.LocalAddr() - default: - panic("dns: internal error: udp and tcp both nil") - } -} - -// RemoteAddr implements the ResponseWriter.RemoteAddr method. -func (w *response) RemoteAddr() net.Addr { - switch { - case w.udpSession != nil: - return w.udpSession.RemoteAddr() - case w.tcp != nil: - return w.tcp.RemoteAddr() - default: - panic("dns: internal error: udpSession and tcp both nil") - } -} - -// TsigStatus implements the ResponseWriter.TsigStatus method. -func (w *response) TsigStatus() error { return w.tsigStatus } - -// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. -func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } - -// Hijack implements the ResponseWriter.Hijack method. -func (w *response) Hijack() { w.hijacked = true } - -// Close implements the ResponseWriter.Close method -func (w *response) Close() error { - if w.closed { - return &Error{err: "connection already closed"} - } - w.closed = true - - switch { - case w.udp != nil: - // Can't close the udp conn, as that is actually the listener. - return nil - case w.tcp != nil: - return w.tcp.Close() - default: - panic("dns: internal error: udp and tcp both nil") - } -} - -// ConnectionState() implements the ConnectionStater.ConnectionState() interface. -func (w *response) ConnectionState() *tls.ConnectionState { - type tlsConnectionStater interface { - ConnectionState() tls.ConnectionState - } - if v, ok := w.tcp.(tlsConnectionStater); ok { - t := v.ConnectionState() - return &t - } - return nil -} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go deleted file mode 100644 index 55cf1c386..000000000 --- a/vendor/github.com/miekg/dns/sig0.go +++ /dev/null @@ -1,209 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "encoding/binary" - "math/big" - "strings" - "time" -) - -// Sign signs a dns.Msg. It fills the signature with the appropriate data. -// The SIG record should have the SignerName, KeyTag, Algorithm, Inception -// and Expiration set. -func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { - if k == nil { - return nil, ErrPrivKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return nil, ErrKey - } - - rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0} - rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0 - - buf := make([]byte, m.Len()+Len(rr)) - mbuf, err := m.PackBuffer(buf) - if err != nil { - return nil, err - } - if &buf[0] != &mbuf[0] { - return nil, ErrBuf - } - off, err := PackRR(rr, buf, len(mbuf), nil, false) - if err != nil { - return nil, err - } - buf = buf[:off:cap(buf)] - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return nil, ErrAlg - } - - hasher := hash.New() - // Write SIG rdata - hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) - // Write message - hasher.Write(buf[:len(mbuf)]) - - signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) - if err != nil { - return nil, err - } - - rr.Signature = toBase64(signature) - - buf = append(buf, signature...) - if len(buf) > int(^uint16(0)) { - return nil, ErrBuf - } - // Adjust sig data length - rdoff := len(mbuf) + 1 + 2 + 2 + 4 - rdlen := binary.BigEndian.Uint16(buf[rdoff:]) - rdlen += uint16(len(signature)) - binary.BigEndian.PutUint16(buf[rdoff:], rdlen) - // Adjust additional count - adc := binary.BigEndian.Uint16(buf[10:]) - adc++ - binary.BigEndian.PutUint16(buf[10:], adc) - return buf, nil -} - -// Verify validates the message buf using the key k. -// It's assumed that buf is a valid message from which rr was unpacked. -func (rr *SIG) Verify(k *KEY, buf []byte) error { - if k == nil { - return ErrKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - var hash crypto.Hash - switch rr.Algorithm { - case DSA, RSASHA1: - hash = crypto.SHA1 - case RSASHA256, ECDSAP256SHA256: - hash = crypto.SHA256 - case ECDSAP384SHA384: - hash = crypto.SHA384 - case RSASHA512: - hash = crypto.SHA512 - default: - return ErrAlg - } - hasher := hash.New() - - buflen := len(buf) - qdc := binary.BigEndian.Uint16(buf[4:]) - anc := binary.BigEndian.Uint16(buf[6:]) - auc := binary.BigEndian.Uint16(buf[8:]) - adc := binary.BigEndian.Uint16(buf[10:]) - offset := headerSize - var err error - for i := uint16(0); i < qdc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type and Class - offset += 2 + 2 - } - for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type, Class and TTL - offset += 2 + 2 + 4 - if offset+1 >= buflen { - continue - } - rdlen := binary.BigEndian.Uint16(buf[offset:]) - offset += 2 - offset += int(rdlen) - } - if offset >= buflen { - return &Error{err: "overflowing unpacking signed message"} - } - - // offset should be just prior to SIG - bodyend := offset - // owner name SHOULD be root - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip Type, Class, TTL, RDLen - offset += 2 + 2 + 4 + 2 - sigstart := offset - // Skip Type Covered, Algorithm, Labels, Original TTL - offset += 2 + 1 + 1 + 4 - if offset+4+4 >= buflen { - return &Error{err: "overflow unpacking signed message"} - } - expire := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - incept := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - now := uint32(time.Now().Unix()) - if now < incept || now > expire { - return ErrTime - } - // Skip key tag - offset += 2 - var signername string - signername, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // If key has come from the DNS name compression might - // have mangled the case of the name - if !strings.EqualFold(signername, k.Header().Name) { - return &Error{err: "signer name doesn't match key name"} - } - sigend := offset - hasher.Write(buf[sigstart:sigend]) - hasher.Write(buf[:10]) - hasher.Write([]byte{ - byte((adc - 1) << 8), - byte(adc - 1), - }) - hasher.Write(buf[12:bodyend]) - - hashed := hasher.Sum(nil) - sig := buf[sigend:] - switch k.Algorithm { - case DSA: - pk := k.publicKeyDSA() - sig = sig[1:] - r := new(big.Int).SetBytes(sig[:len(sig)/2]) - s := new(big.Int).SetBytes(sig[len(sig)/2:]) - if pk != nil { - if dsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - case RSASHA1, RSASHA256, RSASHA512: - pk := k.publicKeyRSA() - if pk != nil { - return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) - } - case ECDSAP256SHA256, ECDSAP384SHA384: - pk := k.publicKeyECDSA() - r := new(big.Int).SetBytes(sig[:len(sig)/2]) - s := new(big.Int).SetBytes(sig[len(sig)/2:]) - if pk != nil { - if ecdsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - } - return ErrKeyAlg -} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go deleted file mode 100644 index febcc300f..000000000 --- a/vendor/github.com/miekg/dns/singleinflight.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for dns package usage by Miek Gieben. - -package dns - -import "sync" -import "time" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - val *Msg - rtt time.Duration - err error - dups int -} - -// singleflight represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type singleflight struct { - sync.Mutex // protects m - m map[string]*call // lazily initialized - - dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { - g.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.Unlock() - c.wg.Wait() - return c.val, c.rtt, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.Unlock() - - c.val, c.rtt, c.err = fn() - c.wg.Done() - - if !g.dontDeleteForTesting { - g.Lock() - delete(g.m, key) - g.Unlock() - } - - return c.val, c.rtt, c.err, c.dups > 0 -} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go deleted file mode 100644 index 89f09f0d1..000000000 --- a/vendor/github.com/miekg/dns/smimea.go +++ /dev/null @@ -1,44 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/x509" - "encoding/hex" -) - -// Sign creates a SMIMEA record from an SSL certificate. -func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeSMIMEA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - return err -} - -// Verify verifies a SMIMEA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *SMIMEA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// SMIMEAName returns the ownername of a SMIMEA resource record as per the -// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 -func SMIMEAName(email, domain string) (string, error) { - hasher := sha256.New() - hasher.Write([]byte(email)) - - // RFC Section 3: "The local-part is hashed using the SHA2-256 - // algorithm with the hash truncated to 28 octets and - // represented in its hexadecimal representation to become the - // left-most label in the prepared domain name" - return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil -} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go deleted file mode 100644 index 4e07983b9..000000000 --- a/vendor/github.com/miekg/dns/tlsa.go +++ /dev/null @@ -1,44 +0,0 @@ -package dns - -import ( - "crypto/x509" - "net" - "strconv" -) - -// Sign creates a TLSA record from an SSL certificate. -func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeTLSA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - return err -} - -// Verify verifies a TLSA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *TLSA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// TLSAName returns the ownername of a TLSA resource record as per the -// rules specified in RFC 6698, Section 3. -func TLSAName(name, service, network string) (string, error) { - if !IsFqdn(name) { - return "", ErrFqdn - } - p, err := net.LookupPort(network, service) - if err != nil { - return "", err - } - return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil -} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go deleted file mode 100644 index afa462fa0..000000000 --- a/vendor/github.com/miekg/dns/tsig.go +++ /dev/null @@ -1,389 +0,0 @@ -package dns - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/binary" - "encoding/hex" - "hash" - "strconv" - "strings" - "time" -) - -// HMAC hashing codes. These are transmitted as domain names. -const ( - HmacMD5 = "hmac-md5.sig-alg.reg.int." - HmacSHA1 = "hmac-sha1." - HmacSHA256 = "hmac-sha256." - HmacSHA512 = "hmac-sha512." -) - -// TSIG is the RR the holds the transaction signature of a message. -// See RFC 2845 and RFC 4635. -type TSIG struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` - OrigId uint16 - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// TSIG has no official presentation format, but this will suffice. - -func (rr *TSIG) String() string { - s := "\n;; TSIG PSEUDOSECTION:\n" - s += rr.Hdr.String() + - " " + rr.Algorithm + - " " + tsigTimeToString(rr.TimeSigned) + - " " + strconv.Itoa(int(rr.Fudge)) + - " " + strconv.Itoa(int(rr.MACSize)) + - " " + strings.ToUpper(rr.MAC) + - " " + strconv.Itoa(int(rr.OrigId)) + - " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR - " " + strconv.Itoa(int(rr.OtherLen)) + - " " + rr.OtherData - return s -} - -func (rr *TSIG) parse(c *zlexer, origin, file string) *ParseError { - panic("dns: internal error: parse should never be called on TSIG") -} - -// The following values must be put in wireformat, so that the MAC can be calculated. -// RFC 2845, section 3.4.2. TSIG Variables. -type tsigWireFmt struct { - // From RR_Header - Name string `dns:"domain-name"` - Class uint16 - Ttl uint32 - // Rdata of the TSIG - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - // MACSize, MAC and OrigId excluded - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC -type macWireFmt struct { - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` -} - -// 3.3. Time values used in TSIG calculations -type timerWireFmt struct { - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 -} - -// TsigGenerate fills out the TSIG record attached to the message. -// The message should contain -// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), -// time fudge (defaults to 300 seconds) and the current time -// The TSIG MAC is saved in that Tsig RR. -// When TsigGenerate is called for the first time requestMAC is set to the empty string and -// timersOnly is false. -// If something goes wrong an error is returned, otherwise it is nil. -func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { - if m.IsTsig() == nil { - panic("dns: TSIG not last RR in additional") - } - // If we barf here, the caller is to blame - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return nil, "", err - } - - rr := m.Extra[len(m.Extra)-1].(*TSIG) - m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg - mbuf, err := m.Pack() - if err != nil { - return nil, "", err - } - buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) - - t := new(TSIG) - var h hash.Hash - switch strings.ToLower(rr.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) - case HmacSHA1: - h = hmac.New(sha1.New, rawsecret) - case HmacSHA256: - h = hmac.New(sha256.New, rawsecret) - case HmacSHA512: - h = hmac.New(sha512.New, rawsecret) - default: - return nil, "", ErrKeyAlg - } - h.Write(buf) - t.MAC = hex.EncodeToString(h.Sum(nil)) - t.MACSize = uint16(len(t.MAC) / 2) // Size is half! - - t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} - t.Fudge = rr.Fudge - t.TimeSigned = rr.TimeSigned - t.Algorithm = rr.Algorithm - t.OrigId = m.Id - - tbuf := make([]byte, Len(t)) - off, err := PackRR(t, tbuf, 0, nil, false) - if err != nil { - return nil, "", err - } - mbuf = append(mbuf, tbuf[:off]...) - // Update the ArCount directly in the buffer. - binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) - - return mbuf, t.MAC, nil -} - -// TsigVerify verifies the TSIG on a message. -// If the signature does not validate err contains the -// error, otherwise it is nil. -func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return err - } - // Strip the TSIG from the incoming msg - stripped, tsig, err := stripTsig(msg) - if err != nil { - return err - } - - msgMAC, err := hex.DecodeString(tsig.MAC) - if err != nil { - return err - } - - buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) - - // Fudge factor works both ways. A message can arrive before it was signed because - // of clock skew. - now := uint64(time.Now().Unix()) - ti := now - tsig.TimeSigned - if now < tsig.TimeSigned { - ti = tsig.TimeSigned - now - } - if uint64(tsig.Fudge) < ti { - return ErrTime - } - - var h hash.Hash - switch strings.ToLower(tsig.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) - case HmacSHA1: - h = hmac.New(sha1.New, rawsecret) - case HmacSHA256: - h = hmac.New(sha256.New, rawsecret) - case HmacSHA512: - h = hmac.New(sha512.New, rawsecret) - default: - return ErrKeyAlg - } - h.Write(buf) - if !hmac.Equal(h.Sum(nil), msgMAC) { - return ErrSig - } - return nil -} - -// Create a wiredata buffer for the MAC calculation. -func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { - var buf []byte - if rr.TimeSigned == 0 { - rr.TimeSigned = uint64(time.Now().Unix()) - } - if rr.Fudge == 0 { - rr.Fudge = 300 // Standard (RFC) default. - } - - // Replace message ID in header with original ID from TSIG - binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId) - - if requestMAC != "" { - m := new(macWireFmt) - m.MACSize = uint16(len(requestMAC) / 2) - m.MAC = requestMAC - buf = make([]byte, len(requestMAC)) // long enough - n, _ := packMacWire(m, buf) - buf = buf[:n] - } - - tsigvar := make([]byte, DefaultMsgSize) - if timersOnly { - tsig := new(timerWireFmt) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - n, _ := packTimerWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } else { - tsig := new(tsigWireFmt) - tsig.Name = strings.ToLower(rr.Hdr.Name) - tsig.Class = ClassANY - tsig.Ttl = rr.Hdr.Ttl - tsig.Algorithm = strings.ToLower(rr.Algorithm) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - tsig.Error = rr.Error - tsig.OtherLen = rr.OtherLen - tsig.OtherData = rr.OtherData - n, _ := packTsigWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } - - if requestMAC != "" { - x := append(buf, msgbuf...) - buf = append(x, tsigvar...) - } else { - buf = append(msgbuf, tsigvar...) - } - return buf -} - -// Strip the TSIG from the raw message. -func stripTsig(msg []byte) ([]byte, *TSIG, error) { - // Copied from msg.go's Unpack() Header, but modified. - var ( - dh Header - err error - ) - off, tsigoff := 0, 0 - - if dh, off, err = unpackMsgHdr(msg, off); err != nil { - return nil, nil, err - } - if dh.Arcount == 0 { - return nil, nil, ErrNoSig - } - - // Rcode, see msg.go Unpack() - if int(dh.Bits&0xF) == RcodeNotAuth { - return nil, nil, ErrAuth - } - - for i := 0; i < int(dh.Qdcount); i++ { - _, off, err = unpackQuestion(msg, off) - if err != nil { - return nil, nil, err - } - } - - _, off, err = unpackRRslice(int(dh.Ancount), msg, off) - if err != nil { - return nil, nil, err - } - _, off, err = unpackRRslice(int(dh.Nscount), msg, off) - if err != nil { - return nil, nil, err - } - - rr := new(TSIG) - var extra RR - for i := 0; i < int(dh.Arcount); i++ { - tsigoff = off - extra, off, err = UnpackRR(msg, off) - if err != nil { - return nil, nil, err - } - if extra.Header().Rrtype == TypeTSIG { - rr = extra.(*TSIG) - // Adjust Arcount. - arcount := binary.BigEndian.Uint16(msg[10:]) - binary.BigEndian.PutUint16(msg[10:], arcount-1) - break - } - } - if rr == nil { - return nil, nil, ErrNoSig - } - return msg[:tsigoff], rr, nil -} - -// Translate the TSIG time signed into a date. There is no -// need for RFC1982 calculations as this date is 48 bits. -func tsigTimeToString(t uint64) string { - ti := time.Unix(int64(t), 0).UTC() - return ti.Format("20060102150405") -} - -func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go TSIG packing - // RR_Header - off, err := PackDomainName(tw.Name, msg, 0, nil, false) - if err != nil { - return off, err - } - off, err = packUint16(tw.Class, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(tw.Ttl, msg, off) - if err != nil { - return off, err - } - - off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) - if err != nil { - return off, err - } - off, err = packUint48(tw.TimeSigned, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - - off, err = packUint16(tw.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(tw.OtherData, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packMacWire(mw *macWireFmt, msg []byte) (int, error) { - off, err := packUint16(mw.MACSize, msg, 0) - if err != nil { - return off, err - } - off, err = packStringHex(mw.MAC, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { - off, err := packUint48(tw.TimeSigned, msg, 0) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go deleted file mode 100644 index 835f2fe7c..000000000 --- a/vendor/github.com/miekg/dns/types.go +++ /dev/null @@ -1,1434 +0,0 @@ -package dns - -import ( - "fmt" - "net" - "strconv" - "strings" - "time" -) - -type ( - // Type is a DNS type. - Type uint16 - // Class is a DNS class. - Class uint16 - // Name is a DNS domain name. - Name string -) - -// Packet formats - -// Wire constants and supported types. -const ( - // valid RR_Header.Rrtype and Question.qtype - - TypeNone uint16 = 0 - TypeA uint16 = 1 - TypeNS uint16 = 2 - TypeMD uint16 = 3 - TypeMF uint16 = 4 - TypeCNAME uint16 = 5 - TypeSOA uint16 = 6 - TypeMB uint16 = 7 - TypeMG uint16 = 8 - TypeMR uint16 = 9 - TypeNULL uint16 = 10 - TypePTR uint16 = 12 - TypeHINFO uint16 = 13 - TypeMINFO uint16 = 14 - TypeMX uint16 = 15 - TypeTXT uint16 = 16 - TypeRP uint16 = 17 - TypeAFSDB uint16 = 18 - TypeX25 uint16 = 19 - TypeISDN uint16 = 20 - TypeRT uint16 = 21 - TypeNSAPPTR uint16 = 23 - TypeSIG uint16 = 24 - TypeKEY uint16 = 25 - TypePX uint16 = 26 - TypeGPOS uint16 = 27 - TypeAAAA uint16 = 28 - TypeLOC uint16 = 29 - TypeNXT uint16 = 30 - TypeEID uint16 = 31 - TypeNIMLOC uint16 = 32 - TypeSRV uint16 = 33 - TypeATMA uint16 = 34 - TypeNAPTR uint16 = 35 - TypeKX uint16 = 36 - TypeCERT uint16 = 37 - TypeDNAME uint16 = 39 - TypeOPT uint16 = 41 // EDNS - TypeDS uint16 = 43 - TypeSSHFP uint16 = 44 - TypeRRSIG uint16 = 46 - TypeNSEC uint16 = 47 - TypeDNSKEY uint16 = 48 - TypeDHCID uint16 = 49 - TypeNSEC3 uint16 = 50 - TypeNSEC3PARAM uint16 = 51 - TypeTLSA uint16 = 52 - TypeSMIMEA uint16 = 53 - TypeHIP uint16 = 55 - TypeNINFO uint16 = 56 - TypeRKEY uint16 = 57 - TypeTALINK uint16 = 58 - TypeCDS uint16 = 59 - TypeCDNSKEY uint16 = 60 - TypeOPENPGPKEY uint16 = 61 - TypeCSYNC uint16 = 62 - TypeSPF uint16 = 99 - TypeUINFO uint16 = 100 - TypeUID uint16 = 101 - TypeGID uint16 = 102 - TypeUNSPEC uint16 = 103 - TypeNID uint16 = 104 - TypeL32 uint16 = 105 - TypeL64 uint16 = 106 - TypeLP uint16 = 107 - TypeEUI48 uint16 = 108 - TypeEUI64 uint16 = 109 - TypeURI uint16 = 256 - TypeCAA uint16 = 257 - TypeAVC uint16 = 258 - - TypeTKEY uint16 = 249 - TypeTSIG uint16 = 250 - - // valid Question.Qtype only - TypeIXFR uint16 = 251 - TypeAXFR uint16 = 252 - TypeMAILB uint16 = 253 - TypeMAILA uint16 = 254 - TypeANY uint16 = 255 - - TypeTA uint16 = 32768 - TypeDLV uint16 = 32769 - TypeReserved uint16 = 65535 - - // valid Question.Qclass - ClassINET = 1 - ClassCSNET = 2 - ClassCHAOS = 3 - ClassHESIOD = 4 - ClassNONE = 254 - ClassANY = 255 - - // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml - RcodeSuccess = 0 // NoError - No Error [DNS] - RcodeFormatError = 1 // FormErr - Format Error [DNS] - RcodeServerFailure = 2 // ServFail - Server Failure [DNS] - RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS] - RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS] - RcodeRefused = 5 // Refused - Query Refused [DNS] - RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update] - RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update] - RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] - RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] - RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] - RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] - RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] - RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] - RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] - RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] - RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY] - RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY] - RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG] - RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies] - - // Message Opcodes. There is no 3. - OpcodeQuery = 0 - OpcodeIQuery = 1 - OpcodeStatus = 2 - OpcodeNotify = 4 - OpcodeUpdate = 5 -) - -// Header is the wire format for the DNS packet header. -type Header struct { - Id uint16 - Bits uint16 - Qdcount, Ancount, Nscount, Arcount uint16 -} - -const ( - headerSize = 12 - - // Header.Bits - _QR = 1 << 15 // query/response (response=1) - _AA = 1 << 10 // authoritative - _TC = 1 << 9 // truncated - _RD = 1 << 8 // recursion desired - _RA = 1 << 7 // recursion available - _Z = 1 << 6 // Z - _AD = 1 << 5 // authticated data - _CD = 1 << 4 // checking disabled -) - -// Various constants used in the LOC RR, See RFC 1887. -const ( - LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. - LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. - LOC_HOURS = 60 * 1000 - LOC_DEGREES = 60 * LOC_HOURS - LOC_ALTITUDEBASE = 100000 -) - -// Different Certificate Types, see RFC 4398, Section 2.1 -const ( - CertPKIX = 1 + iota - CertSPKI - CertPGP - CertIPIX - CertISPKI - CertIPGP - CertACPKIX - CertIACPKIX - CertURI = 253 - CertOID = 254 -) - -// CertTypeToString converts the Cert Type to its string representation. -// See RFC 4398 and RFC 6944. -var CertTypeToString = map[uint16]string{ - CertPKIX: "PKIX", - CertSPKI: "SPKI", - CertPGP: "PGP", - CertIPIX: "IPIX", - CertISPKI: "ISPKI", - CertIPGP: "IPGP", - CertACPKIX: "ACPKIX", - CertIACPKIX: "IACPKIX", - CertURI: "URI", - CertOID: "OID", -} - -//go:generate go run types_generate.go - -// Question holds a DNS question. There can be multiple questions in the -// question section of a message. Usually there is just one. -type Question struct { - Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) - Qtype uint16 - Qclass uint16 -} - -func (q *Question) len(off int, compression map[string]struct{}) int { - l := domainNameLen(q.Name, off, compression, true) - l += 2 + 2 - return l -} - -func (q *Question) String() (s string) { - // prefix with ; (as in dig) - s = ";" + sprintName(q.Name) + "\t" - s += Class(q.Qclass).String() + "\t" - s += " " + Type(q.Qtype).String() - return s -} - -// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY -// is named "*" there. -type ANY struct { - Hdr RR_Header - // Does not have any rdata -} - -func (rr *ANY) String() string { return rr.Hdr.String() } - -func (rr *ANY) parse(c *zlexer, origin, file string) *ParseError { - panic("dns: internal error: parse should never be called on ANY") -} - -// NULL RR. See RFC 1035. -type NULL struct { - Hdr RR_Header - Data string `dns:"any"` -} - -func (rr *NULL) String() string { - // There is no presentation format; prefix string with a comment. - return ";" + rr.Hdr.String() + rr.Data -} - -func (rr *NULL) parse(c *zlexer, origin, file string) *ParseError { - panic("dns: internal error: parse should never be called on NULL") -} - -// CNAME RR. See RFC 1034. -type CNAME struct { - Hdr RR_Header - Target string `dns:"cdomain-name"` -} - -func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } - -// HINFO RR. See RFC 1034. -type HINFO struct { - Hdr RR_Header - Cpu string - Os string -} - -func (rr *HINFO) String() string { - return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) -} - -// MB RR. See RFC 1035. -type MB struct { - Hdr RR_Header - Mb string `dns:"cdomain-name"` -} - -func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } - -// MG RR. See RFC 1035. -type MG struct { - Hdr RR_Header - Mg string `dns:"cdomain-name"` -} - -func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } - -// MINFO RR. See RFC 1035. -type MINFO struct { - Hdr RR_Header - Rmail string `dns:"cdomain-name"` - Email string `dns:"cdomain-name"` -} - -func (rr *MINFO) String() string { - return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) -} - -// MR RR. See RFC 1035. -type MR struct { - Hdr RR_Header - Mr string `dns:"cdomain-name"` -} - -func (rr *MR) String() string { - return rr.Hdr.String() + sprintName(rr.Mr) -} - -// MF RR. See RFC 1035. -type MF struct { - Hdr RR_Header - Mf string `dns:"cdomain-name"` -} - -func (rr *MF) String() string { - return rr.Hdr.String() + sprintName(rr.Mf) -} - -// MD RR. See RFC 1035. -type MD struct { - Hdr RR_Header - Md string `dns:"cdomain-name"` -} - -func (rr *MD) String() string { - return rr.Hdr.String() + sprintName(rr.Md) -} - -// MX RR. See RFC 1035. -type MX struct { - Hdr RR_Header - Preference uint16 - Mx string `dns:"cdomain-name"` -} - -func (rr *MX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) -} - -// AFSDB RR. See RFC 1183. -type AFSDB struct { - Hdr RR_Header - Subtype uint16 - Hostname string `dns:"domain-name"` -} - -func (rr *AFSDB) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) -} - -// X25 RR. See RFC 1183, Section 3.1. -type X25 struct { - Hdr RR_Header - PSDNAddress string -} - -func (rr *X25) String() string { - return rr.Hdr.String() + rr.PSDNAddress -} - -// RT RR. See RFC 1183, Section 3.3. -type RT struct { - Hdr RR_Header - Preference uint16 - Host string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035. -} - -func (rr *RT) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) -} - -// NS RR. See RFC 1035. -type NS struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` -} - -func (rr *NS) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) -} - -// PTR RR. See RFC 1035. -type PTR struct { - Hdr RR_Header - Ptr string `dns:"cdomain-name"` -} - -func (rr *PTR) String() string { - return rr.Hdr.String() + sprintName(rr.Ptr) -} - -// RP RR. See RFC 1138, Section 2.2. -type RP struct { - Hdr RR_Header - Mbox string `dns:"domain-name"` - Txt string `dns:"domain-name"` -} - -func (rr *RP) String() string { - return rr.Hdr.String() + sprintName(rr.Mbox) + " " + sprintName(rr.Txt) -} - -// SOA RR. See RFC 1035. -type SOA struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` - Mbox string `dns:"cdomain-name"` - Serial uint32 - Refresh uint32 - Retry uint32 - Expire uint32 - Minttl uint32 -} - -func (rr *SOA) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + - " " + strconv.FormatInt(int64(rr.Serial), 10) + - " " + strconv.FormatInt(int64(rr.Refresh), 10) + - " " + strconv.FormatInt(int64(rr.Retry), 10) + - " " + strconv.FormatInt(int64(rr.Expire), 10) + - " " + strconv.FormatInt(int64(rr.Minttl), 10) -} - -// TXT RR. See RFC 1035. -type TXT struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -func sprintName(s string) string { - var dst strings.Builder - dst.Grow(len(s)) - for i := 0; i < len(s); { - if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { - dst.WriteString(s[i : i+2]) - i += 2 - continue - } - - b, n := nextByte(s, i) - switch { - case n == 0: - i++ // dangling back slash - case b == '.': - dst.WriteByte('.') - default: - writeDomainNameByte(&dst, b) - } - i += n - } - return dst.String() -} - -func sprintTxtOctet(s string) string { - var dst strings.Builder - dst.Grow(2 + len(s)) - dst.WriteByte('"') - for i := 0; i < len(s); { - if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { - dst.WriteString(s[i : i+2]) - i += 2 - continue - } - - b, n := nextByte(s, i) - switch { - case n == 0: - i++ // dangling back slash - case b == '.': - dst.WriteByte('.') - case b < ' ' || b > '~': - dst.WriteString(escapeByte(b)) - default: - dst.WriteByte(b) - } - i += n - } - dst.WriteByte('"') - return dst.String() -} - -func sprintTxt(txt []string) string { - var out strings.Builder - for i, s := range txt { - out.Grow(3 + len(s)) - if i > 0 { - out.WriteString(` "`) - } else { - out.WriteByte('"') - } - for j := 0; j < len(s); { - b, n := nextByte(s, j) - if n == 0 { - break - } - writeTXTStringByte(&out, b) - j += n - } - out.WriteByte('"') - } - return out.String() -} - -func writeDomainNameByte(s *strings.Builder, b byte) { - switch b { - case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape - s.WriteByte('\\') - s.WriteByte(b) - default: - writeTXTStringByte(s, b) - } -} - -func writeTXTStringByte(s *strings.Builder, b byte) { - switch { - case b == '"' || b == '\\': - s.WriteByte('\\') - s.WriteByte(b) - case b < ' ' || b > '~': - s.WriteString(escapeByte(b)) - default: - s.WriteByte(b) - } -} - -const ( - escapedByteSmall = "" + - `\000\001\002\003\004\005\006\007\008\009` + - `\010\011\012\013\014\015\016\017\018\019` + - `\020\021\022\023\024\025\026\027\028\029` + - `\030\031` - escapedByteLarge = `\127\128\129` + - `\130\131\132\133\134\135\136\137\138\139` + - `\140\141\142\143\144\145\146\147\148\149` + - `\150\151\152\153\154\155\156\157\158\159` + - `\160\161\162\163\164\165\166\167\168\169` + - `\170\171\172\173\174\175\176\177\178\179` + - `\180\181\182\183\184\185\186\187\188\189` + - `\190\191\192\193\194\195\196\197\198\199` + - `\200\201\202\203\204\205\206\207\208\209` + - `\210\211\212\213\214\215\216\217\218\219` + - `\220\221\222\223\224\225\226\227\228\229` + - `\230\231\232\233\234\235\236\237\238\239` + - `\240\241\242\243\244\245\246\247\248\249` + - `\250\251\252\253\254\255` -) - -// escapeByte returns the \DDD escaping of b which must -// satisfy b < ' ' || b > '~'. -func escapeByte(b byte) string { - if b < ' ' { - return escapedByteSmall[b*4 : b*4+4] - } - - b -= '~' + 1 - // The cast here is needed as b*4 may overflow byte. - return escapedByteLarge[int(b)*4 : int(b)*4+4] -} - -func nextByte(s string, offset int) (byte, int) { - if offset >= len(s) { - return 0, 0 - } - if s[offset] != '\\' { - // not an escape sequence - return s[offset], 1 - } - switch len(s) - offset { - case 1: // dangling escape - return 0, 0 - case 2, 3: // too short to be \ddd - default: // maybe \ddd - if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { - return dddStringToByte(s[offset+1:]), 4 - } - } - // not \ddd, just an RFC 1035 "quoted" character - return s[offset+1], 2 -} - -// SPF RR. See RFC 4408, Section 3.1.1. -type SPF struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template. -type AVC struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -// SRV RR. See RFC 2782. -type SRV struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Port uint16 - Target string `dns:"domain-name"` -} - -func (rr *SRV) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Priority)) + " " + - strconv.Itoa(int(rr.Weight)) + " " + - strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) -} - -// NAPTR RR. See RFC 2915. -type NAPTR struct { - Hdr RR_Header - Order uint16 - Preference uint16 - Flags string - Service string - Regexp string - Replacement string `dns:"domain-name"` -} - -func (rr *NAPTR) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Order)) + " " + - strconv.Itoa(int(rr.Preference)) + " " + - "\"" + rr.Flags + "\" " + - "\"" + rr.Service + "\" " + - "\"" + rr.Regexp + "\" " + - rr.Replacement -} - -// CERT RR. See RFC 4398. -type CERT struct { - Hdr RR_Header - Type uint16 - KeyTag uint16 - Algorithm uint8 - Certificate string `dns:"base64"` -} - -func (rr *CERT) String() string { - var ( - ok bool - certtype, algorithm string - ) - if certtype, ok = CertTypeToString[rr.Type]; !ok { - certtype = strconv.Itoa(int(rr.Type)) - } - if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { - algorithm = strconv.Itoa(int(rr.Algorithm)) - } - return rr.Hdr.String() + certtype + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + algorithm + - " " + rr.Certificate -} - -// DNAME RR. See RFC 2672. -type DNAME struct { - Hdr RR_Header - Target string `dns:"domain-name"` -} - -func (rr *DNAME) String() string { - return rr.Hdr.String() + sprintName(rr.Target) -} - -// A RR. See RFC 1035. -type A struct { - Hdr RR_Header - A net.IP `dns:"a"` -} - -func (rr *A) String() string { - if rr.A == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.A.String() -} - -// AAAA RR. See RFC 3596. -type AAAA struct { - Hdr RR_Header - AAAA net.IP `dns:"aaaa"` -} - -func (rr *AAAA) String() string { - if rr.AAAA == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.AAAA.String() -} - -// PX RR. See RFC 2163. -type PX struct { - Hdr RR_Header - Preference uint16 - Map822 string `dns:"domain-name"` - Mapx400 string `dns:"domain-name"` -} - -func (rr *PX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) -} - -// GPOS RR. See RFC 1712. -type GPOS struct { - Hdr RR_Header - Longitude string - Latitude string - Altitude string -} - -func (rr *GPOS) String() string { - return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude -} - -// LOC RR. See RFC RFC 1876. -type LOC struct { - Hdr RR_Header - Version uint8 - Size uint8 - HorizPre uint8 - VertPre uint8 - Latitude uint32 - Longitude uint32 - Altitude uint32 -} - -// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent -// format and returns a string in m (two decimals for the cm) -func cmToM(m, e uint8) string { - if e < 2 { - if e == 1 { - m *= 10 - } - - return fmt.Sprintf("0.%02d", m) - } - - s := fmt.Sprintf("%d", m) - for e > 2 { - s += "0" - e-- - } - return s -} - -func (rr *LOC) String() string { - s := rr.Hdr.String() - - lat := rr.Latitude - ns := "N" - if lat > LOC_EQUATOR { - lat = lat - LOC_EQUATOR - } else { - ns = "S" - lat = LOC_EQUATOR - lat - } - h := lat / LOC_DEGREES - lat = lat % LOC_DEGREES - m := lat / LOC_HOURS - lat = lat % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lat)/1000, ns) - - lon := rr.Longitude - ew := "E" - if lon > LOC_PRIMEMERIDIAN { - lon = lon - LOC_PRIMEMERIDIAN - } else { - ew = "W" - lon = LOC_PRIMEMERIDIAN - lon - } - h = lon / LOC_DEGREES - lon = lon % LOC_DEGREES - m = lon / LOC_HOURS - lon = lon % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lon)/1000, ew) - - var alt = float64(rr.Altitude) / 100 - alt -= LOC_ALTITUDEBASE - if rr.Altitude%100 != 0 { - s += fmt.Sprintf("%.2fm ", alt) - } else { - s += fmt.Sprintf("%.0fm ", alt) - } - - s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m " - s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m " - s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m" - - return s -} - -// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931. -type SIG struct { - RRSIG -} - -// RRSIG RR. See RFC 4034 and RFC 3755. -type RRSIG struct { - Hdr RR_Header - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - Signature string `dns:"base64"` -} - -func (rr *RRSIG) String() string { - s := rr.Hdr.String() - s += Type(rr.TypeCovered).String() - s += " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Labels)) + - " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + - " " + TimeToString(rr.Expiration) + - " " + TimeToString(rr.Inception) + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + sprintName(rr.SignerName) + - " " + rr.Signature - return s -} - -// NSEC RR. See RFC 4034 and RFC 3755. -type NSEC struct { - Hdr RR_Header - NextDomain string `dns:"domain-name"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC) String() string { - s := rr.Hdr.String() + sprintName(rr.NextDomain) - for _, t := range rr.TypeBitMap { - s += " " + Type(t).String() - } - return s -} - -func (rr *NSEC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.NextDomain, off+l, compression, false) - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -// DLV RR. See RFC 4431. -type DLV struct{ DS } - -// CDS RR. See RFC 7344. -type CDS struct{ DS } - -// DS RR. See RFC 4034 and RFC 3658. -type DS struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *DS) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -// KX RR. See RFC 2230. -type KX struct { - Hdr RR_Header - Preference uint16 - Exchanger string `dns:"domain-name"` -} - -func (rr *KX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + sprintName(rr.Exchanger) -} - -// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf. -type TA struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *TA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template. -type TALINK struct { - Hdr RR_Header - PreviousName string `dns:"domain-name"` - NextName string `dns:"domain-name"` -} - -func (rr *TALINK) String() string { - return rr.Hdr.String() + - sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) -} - -// SSHFP RR. See RFC RFC 4255. -type SSHFP struct { - Hdr RR_Header - Algorithm uint8 - Type uint8 - FingerPrint string `dns:"hex"` -} - -func (rr *SSHFP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Type)) + - " " + strings.ToUpper(rr.FingerPrint) -} - -// KEY RR. See RFC RFC 2535. -type KEY struct { - DNSKEY -} - -// CDNSKEY RR. See RFC 7344. -type CDNSKEY struct { - DNSKEY -} - -// DNSKEY RR. See RFC 4034 and RFC 3755. -type DNSKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *DNSKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. -type RKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *RKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -// NSAPPTR RR. See RFC 1348. -type NSAPPTR struct { - Hdr RR_Header - Ptr string `dns:"domain-name"` -} - -func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } - -// NSEC3 RR. See RFC 5155. -type NSEC3 struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` - HashLength uint8 - NextDomain string `dns:"size-base32:HashLength"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC3) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) + - " " + rr.NextDomain - for _, t := range rr.TypeBitMap { - s += " " + Type(t).String() - } - return s -} - -func (rr *NSEC3) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -// NSEC3PARAM RR. See RFC 5155. -type NSEC3PARAM struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` -} - -func (rr *NSEC3PARAM) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) - return s -} - -// TKEY RR. See RFC 2930. -type TKEY struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - Inception uint32 - Expiration uint32 - Mode uint16 - Error uint16 - KeySize uint16 - Key string `dns:"size-hex:KeySize"` - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// TKEY has no official presentation format, but this will suffice. -func (rr *TKEY) String() string { - s := ";" + rr.Hdr.String() + - " " + rr.Algorithm + - " " + TimeToString(rr.Inception) + - " " + TimeToString(rr.Expiration) + - " " + strconv.Itoa(int(rr.Mode)) + - " " + strconv.Itoa(int(rr.Error)) + - " " + strconv.Itoa(int(rr.KeySize)) + - " " + rr.Key + - " " + strconv.Itoa(int(rr.OtherLen)) + - " " + rr.OtherData - return s -} - -// RFC3597 represents an unknown/generic RR. See RFC 3597. -type RFC3597 struct { - Hdr RR_Header - Rdata string `dns:"hex"` -} - -func (rr *RFC3597) String() string { - // Let's call it a hack - s := rfc3597Header(rr.Hdr) - - s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata - return s -} - -func rfc3597Header(h RR_Header) string { - var s string - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" - s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" - return s -} - -// URI RR. See RFC 7553. -type URI struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Target string `dns:"octet"` -} - -func (rr *URI) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + - " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) -} - -// DHCID RR. See RFC 4701. -type DHCID struct { - Hdr RR_Header - Digest string `dns:"base64"` -} - -func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } - -// TLSA RR. See RFC 6698. -type TLSA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *TLSA) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) + - " " + rr.Certificate -} - -// SMIMEA RR. See RFC 8162. -type SMIMEA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *SMIMEA) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) - - // Every Nth char needs a space on this output. If we output - // this as one giant line, we can't read it can in because in some cases - // the cert length overflows scan.maxTok (2048). - sx := splitN(rr.Certificate, 1024) // conservative value here - s += " " + strings.Join(sx, " ") - return s -} - -// HIP RR. See RFC 8005. -type HIP struct { - Hdr RR_Header - HitLength uint8 - PublicKeyAlgorithm uint8 - PublicKeyLength uint16 - Hit string `dns:"size-hex:HitLength"` - PublicKey string `dns:"size-base64:PublicKeyLength"` - RendezvousServers []string `dns:"domain-name"` -} - -func (rr *HIP) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.PublicKeyAlgorithm)) + - " " + rr.Hit + - " " + rr.PublicKey - for _, d := range rr.RendezvousServers { - s += " " + sprintName(d) - } - return s -} - -// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template. -type NINFO struct { - Hdr RR_Header - ZSData []string `dns:"txt"` -} - -func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } - -// NID RR. See RFC RFC 6742. -type NID struct { - Hdr RR_Header - Preference uint16 - NodeID uint64 -} - -func (rr *NID) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16x", rr.NodeID) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -// L32 RR, See RFC 6742. -type L32 struct { - Hdr RR_Header - Preference uint16 - Locator32 net.IP `dns:"a"` -} - -func (rr *L32) String() string { - if rr.Locator32 == nil { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - } - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + rr.Locator32.String() -} - -// L64 RR, See RFC 6742. -type L64 struct { - Hdr RR_Header - Preference uint16 - Locator64 uint64 -} - -func (rr *L64) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16X", rr.Locator64) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -// LP RR. See RFC 6742. -type LP struct { - Hdr RR_Header - Preference uint16 - Fqdn string `dns:"domain-name"` -} - -func (rr *LP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) -} - -// EUI48 RR. See RFC 7043. -type EUI48 struct { - Hdr RR_Header - Address uint64 `dns:"uint48"` -} - -func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } - -// EUI64 RR. See RFC 7043. -type EUI64 struct { - Hdr RR_Header - Address uint64 -} - -func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } - -// CAA RR. See RFC 6844. -type CAA struct { - Hdr RR_Header - Flag uint8 - Tag string - Value string `dns:"octet"` -} - -func (rr *CAA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) -} - -// UID RR. Deprecated, IANA-Reserved. -type UID struct { - Hdr RR_Header - Uid uint32 -} - -func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } - -// GID RR. Deprecated, IANA-Reserved. -type GID struct { - Hdr RR_Header - Gid uint32 -} - -func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } - -// UINFO RR. Deprecated, IANA-Reserved. -type UINFO struct { - Hdr RR_Header - Uinfo string -} - -func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } - -// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. -type EID struct { - Hdr RR_Header - Endpoint string `dns:"hex"` -} - -func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } - -// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. -type NIMLOC struct { - Hdr RR_Header - Locator string `dns:"hex"` -} - -func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } - -// OPENPGPKEY RR. See RFC 7929. -type OPENPGPKEY struct { - Hdr RR_Header - PublicKey string `dns:"base64"` -} - -func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } - -// CSYNC RR. See RFC 7477. -type CSYNC struct { - Hdr RR_Header - Serial uint32 - Flags uint16 - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *CSYNC) String() string { - s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) - - for _, t := range rr.TypeBitMap { - s += " " + Type(t).String() - } - return s -} - -func (rr *CSYNC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 4 + 2 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -// TimeToString translates the RRSIG's incep. and expir. times to the -// string representation used when printing the record. -// It takes serial arithmetic (RFC 1982) into account. -func TimeToString(t uint32) string { - mod := (int64(t)-time.Now().Unix())/year68 - 1 - if mod < 0 { - mod = 0 - } - ti := time.Unix(int64(t)-mod*year68, 0).UTC() - return ti.Format("20060102150405") -} - -// StringToTime translates the RRSIG's incep. and expir. times from -// string values like "20110403154150" to an 32 bit integer. -// It takes serial arithmetic (RFC 1982) into account. -func StringToTime(s string) (uint32, error) { - t, err := time.Parse("20060102150405", s) - if err != nil { - return 0, err - } - mod := t.Unix()/year68 - 1 - if mod < 0 { - mod = 0 - } - return uint32(t.Unix() - mod*year68), nil -} - -// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. -func saltToString(s string) string { - if len(s) == 0 { - return "-" - } - return strings.ToUpper(s) -} - -func euiToString(eui uint64, bits int) (hex string) { - switch bits { - case 64: - hex = fmt.Sprintf("%16.16x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] - case 48: - hex = fmt.Sprintf("%12.12x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] - } - return -} - -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p -} - -// SplitN splits a string into N sized string chunks. -// This might become an exported function once. -func splitN(s string, n int) []string { - if len(s) < n { - return []string{s} - } - sx := []string{} - p, i := 0, n - for { - if i <= len(s) { - sx = append(sx, s[p:i]) - } else { - sx = append(sx, s[p:]) - break - - } - p, i = p+n, i+n - } - - return sx -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index cbb4a00c1..000000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,287 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, - "CSYNC": {}, -} - -var packageHdr = ` -// Code generated by "go run types_generate.go"; DO NOT EDIT. - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n") - case `dns:"domain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n") - case `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, true)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, false)\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored - o("l += len(rr.%s)/2\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2 + 1\n") - case st.Tag(i) == `dns:"any"`: - o("l += len(rr.%s)\n") - case st.Tag(i) == `dns:"a"`: - o("if len(rr.%s) != 0 { l += net.IPv4len }\n") - case st.Tag(i) == `dns:"aaaa"`: - o("if len(rr.%s) != 0 { l += net.IPv6len }\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l++ // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"rr.Hdr"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - // For the EDNS0 interface (used in the OPT RR), we need to call the copy method on each element. - if t == "EDNS0" { - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s));\nfor i,e := range rr.%s {\n %s[i] = e.copy()\n}\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go deleted file mode 100644 index a4826ee2f..000000000 --- a/vendor/github.com/miekg/dns/udp.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build !windows - -package dns - -import ( - "net" - - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -// This is the required size of the OOB buffer to pass to ReadMsgUDP. -var udpOOBSize = func() int { - // We can't know whether we'll get an IPv4 control message or an - // IPv6 control message ahead of time. To get around this, we size - // the buffer equal to the largest of the two. - - oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface) - oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface) - - if len(oob4) > len(oob6) { - return len(oob4) - } - - return len(oob6) -}() - -// SessionUDP holds the remote address and the associated -// out-of-band data. -type SessionUDP struct { - raddr *net.UDPAddr - context []byte -} - -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - oob := make([]byte, udpOOBSize) - n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) - if err != nil { - return n, nil, err - } - return n, &SessionUDP{raddr, oob[:oobn]}, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - oob := correctSource(session.context) - n, _, err := conn.WriteMsgUDP(b, oob, session.raddr) - return n, err -} - -func setUDPSocketOptions(conn *net.UDPConn) error { - // Try setting the flags for both families and ignore the errors unless they - // both error. - err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) - err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) - if err6 != nil && err4 != nil { - return err4 - } - return nil -} - -// parseDstFromOOB takes oob data and returns the destination IP. -func parseDstFromOOB(oob []byte) net.IP { - // Start with IPv6 and then fallback to IPv4 - // TODO(fastest963): Figure out a way to prefer one or the other. Looking at - // the lvl of the header for a 0 or 41 isn't cross-platform. - cm6 := new(ipv6.ControlMessage) - if cm6.Parse(oob) == nil && cm6.Dst != nil { - return cm6.Dst - } - cm4 := new(ipv4.ControlMessage) - if cm4.Parse(oob) == nil && cm4.Dst != nil { - return cm4.Dst - } - return nil -} - -// correctSource takes oob data and returns new oob data with the Src equal to the Dst -func correctSource(oob []byte) []byte { - dst := parseDstFromOOB(oob) - if dst == nil { - return nil - } - // If the dst is definitely an IPv6, then use ipv6's ControlMessage to - // respond otherwise use ipv4's because ipv6's marshal ignores ipv4 - // addresses. - if dst.To4() == nil { - cm := new(ipv6.ControlMessage) - cm.Src = dst - oob = cm.Marshal() - } else { - cm := new(ipv4.ControlMessage) - cm.Src = dst - oob = cm.Marshal() - } - return oob -} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go deleted file mode 100644 index e7dd8ca31..000000000 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package dns - -import "net" - -// SessionUDP holds the remote address -type SessionUDP struct { - raddr *net.UDPAddr -} - -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - n, raddr, err := conn.ReadFrom(b) - if err != nil { - return n, nil, err - } - return n, &SessionUDP{raddr.(*net.UDPAddr)}, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - return conn.WriteTo(b, session.raddr) -} - -// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods -// use the standard method in udp.go for these. -func setUDPSocketOptions(*net.UDPConn) error { return nil } -func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go deleted file mode 100644 index 69dd38652..000000000 --- a/vendor/github.com/miekg/dns/update.go +++ /dev/null @@ -1,110 +0,0 @@ -package dns - -// NameUsed sets the RRs in the prereq section to -// "Name is in use" RRs. RFC 2136 section 2.4.4. -func (u *Msg) NameUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// NameNotUsed sets the RRs in the prereq section to -// "Name is in not use" RRs. RFC 2136 section 2.4.5. -func (u *Msg) NameNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) - } -} - -// Used sets the RRs in the prereq section to -// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. -func (u *Msg) Used(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Answer = append(u.Answer, r) - } -} - -// RRsetUsed sets the RRs in the prereq section to -// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. -func (u *Msg) RRsetUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - h := r.Header() - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) - } -} - -// RRsetNotUsed sets the RRs in the prereq section to -// "RRset does not exist" RRs. RFC 2136 section 2.4.3. -func (u *Msg) RRsetNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - h := r.Header() - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}}) - } -} - -// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. -func (u *Msg) Insert(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Ns = append(u.Ns, r) - } -} - -// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. -func (u *Msg) RemoveRRset(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - h := r.Header() - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}}) - } -} - -// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 -func (u *Msg) RemoveName(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 -func (u *Msg) Remove(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - h := r.Header() - h.Class = ClassNONE - h.Ttl = 0 - u.Ns = append(u.Ns, r) - } -} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go deleted file mode 100644 index a25162fc4..000000000 --- a/vendor/github.com/miekg/dns/version.go +++ /dev/null @@ -1,15 +0,0 @@ -package dns - -import "fmt" - -// Version is current version of this library. -var Version = V{1, 1, 9} - -// V holds the version of this library. -type V struct { - Major, Minor, Patch int -} - -func (v V) String() string { - return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) -} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go deleted file mode 100644 index 82afc52ea..000000000 --- a/vendor/github.com/miekg/dns/xfr.go +++ /dev/null @@ -1,260 +0,0 @@ -package dns - -import ( - "fmt" - "time" -) - -// Envelope is used when doing a zone transfer with a remote server. -type Envelope struct { - RR []RR // The set of RRs in the answer section of the xfr reply message. - Error error // If something went wrong, this contains the error. -} - -// A Transfer defines parameters that are used during a zone transfer. -type Transfer struct { - *Conn - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - tsigTimersOnly bool -} - -// Think we need to away to stop the transfer - -// In performs an incoming transfer with the server in a. -// If you would like to set the source IP, or some other attribute -// of a Dialer for a Transfer, you can do so by specifying the attributes -// in the Transfer.Conn: -// -// d := net.Dialer{LocalAddr: transfer_source} -// con, err := d.Dial("tcp", master) -// dnscon := &dns.Conn{Conn:con} -// transfer = &dns.Transfer{Conn: dnscon} -// channel, err := transfer.In(message, master) -// -func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { - switch q.Question[0].Qtype { - case TypeAXFR, TypeIXFR: - default: - return nil, &Error{"unsupported question type"} - } - - timeout := dnsTimeout - if t.DialTimeout != 0 { - timeout = t.DialTimeout - } - - if t.Conn == nil { - t.Conn, err = DialTimeout("tcp", a, timeout) - if err != nil { - return nil, err - } - } - - if err := t.WriteMsg(q); err != nil { - return nil, err - } - - env = make(chan *Envelope) - switch q.Question[0].Qtype { - case TypeAXFR: - go t.inAxfr(q, env) - case TypeIXFR: - go t.inIxfr(q, env) - } - - return env, nil -} - -func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.Conn.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if q.Id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - first = !first - // only one answer that is SOA, receive more - if len(in.Answer) == 1 { - t.tsigTimersOnly = true - c <- &Envelope{in.Answer, nil} - continue - } - } - - if !first { - t.tsigTimersOnly = true // Subsequent envelopes use this. - if isSOALast(in) { - c <- &Envelope{in.Answer, nil} - return - } - c <- &Envelope{in.Answer, nil} - } - } -} - -func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { - var serial uint32 // The first serial seen is the current server serial - axfr := true - n := 0 - qser := q.Ns[0].(*SOA).Serial - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if q.Id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } - if n == 0 { - // Check if the returned answer is ok - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - // This serial is important - serial = in.Answer[0].(*SOA).Serial - // Check if there are no changes in zone - if qser >= serial { - c <- &Envelope{in.Answer, nil} - return - } - } - // Now we need to check each message for SOA records, to see what we need to do - t.tsigTimersOnly = true - for _, rr := range in.Answer { - if v, ok := rr.(*SOA); ok { - if v.Serial == serial { - n++ - // quit if it's a full axfr or the the servers' SOA is repeated the third time - if axfr && n == 2 || n == 3 { - c <- &Envelope{in.Answer, nil} - return - } - } else if axfr { - // it's an ixfr - axfr = false - } - } - } - c <- &Envelope{in.Answer, nil} - } -} - -// Out performs an outgoing transfer with the client connecting in w. -// Basic use pattern: -// -// ch := make(chan *dns.Envelope) -// tr := new(dns.Transfer) -// go tr.Out(w, r, ch) -// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} -// close(ch) -// w.Hijack() -// // w.Close() // Client closes connection -// -// The server is responsible for sending the correct sequence of RRs through the -// channel ch. -func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { - for x := range ch { - r := new(Msg) - // Compress? - r.SetReply(q) - r.Authoritative = true - // assume it fits TODO(miek): fix - r.Answer = append(r.Answer, x.RR...) - if err := w.WriteMsg(r); err != nil { - return err - } - } - w.TsigTimersOnly(true) - return nil -} - -// ReadMsg reads a message from the transfer connection t. -func (t *Transfer) ReadMsg() (*Msg, error) { - m := new(Msg) - p := make([]byte, MaxMsgSize) - n, err := t.Read(p) - if err != nil && n == 0 { - return nil, err - } - p = p[:n] - if err := m.Unpack(p); err != nil { - return nil, err - } - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - t.tsigRequestMAC = ts.MAC - } - return m, err -} - -// WriteMsg writes a message through the transfer connection t. -func (t *Transfer) WriteMsg(m *Msg) (err error) { - var out []byte - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return ErrSecret - } - out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - _, err = t.Write(out) - return err -} - -func isSOAFirst(in *Msg) bool { - return len(in.Answer) > 0 && - in.Answer[0].Header().Rrtype == TypeSOA -} - -func isSOALast(in *Msg) bool { - return len(in.Answer) > 0 && - in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA -} - -const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go deleted file mode 100644 index 74389162f..000000000 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ /dev/null @@ -1,1140 +0,0 @@ -// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. - -package dns - -// isDuplicate() functions - -func (r1 *A) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*A) - if !ok { - return false - } - _ = r2 - if !r1.A.Equal(r2.A) { - return false - } - return true -} - -func (r1 *AAAA) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*AAAA) - if !ok { - return false - } - _ = r2 - if !r1.AAAA.Equal(r2.AAAA) { - return false - } - return true -} - -func (r1 *AFSDB) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*AFSDB) - if !ok { - return false - } - _ = r2 - if r1.Subtype != r2.Subtype { - return false - } - if !isDuplicateName(r1.Hostname, r2.Hostname) { - return false - } - return true -} - -func (r1 *ANY) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*ANY) - if !ok { - return false - } - _ = r2 - return true -} - -func (r1 *AVC) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*AVC) - if !ok { - return false - } - _ = r2 - if len(r1.Txt) != len(r2.Txt) { - return false - } - for i := 0; i < len(r1.Txt); i++ { - if r1.Txt[i] != r2.Txt[i] { - return false - } - } - return true -} - -func (r1 *CAA) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*CAA) - if !ok { - return false - } - _ = r2 - if r1.Flag != r2.Flag { - return false - } - if r1.Tag != r2.Tag { - return false - } - if r1.Value != r2.Value { - return false - } - return true -} - -func (r1 *CERT) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*CERT) - if !ok { - return false - } - _ = r2 - if r1.Type != r2.Type { - return false - } - if r1.KeyTag != r2.KeyTag { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.Certificate != r2.Certificate { - return false - } - return true -} - -func (r1 *CNAME) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*CNAME) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Target, r2.Target) { - return false - } - return true -} - -func (r1 *CSYNC) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*CSYNC) - if !ok { - return false - } - _ = r2 - if r1.Serial != r2.Serial { - return false - } - if r1.Flags != r2.Flags { - return false - } - if len(r1.TypeBitMap) != len(r2.TypeBitMap) { - return false - } - for i := 0; i < len(r1.TypeBitMap); i++ { - if r1.TypeBitMap[i] != r2.TypeBitMap[i] { - return false - } - } - return true -} - -func (r1 *DHCID) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*DHCID) - if !ok { - return false - } - _ = r2 - if r1.Digest != r2.Digest { - return false - } - return true -} - -func (r1 *DNAME) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*DNAME) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Target, r2.Target) { - return false - } - return true -} - -func (r1 *DNSKEY) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*DNSKEY) - if !ok { - return false - } - _ = r2 - if r1.Flags != r2.Flags { - return false - } - if r1.Protocol != r2.Protocol { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.PublicKey != r2.PublicKey { - return false - } - return true -} - -func (r1 *DS) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*DS) - if !ok { - return false - } - _ = r2 - if r1.KeyTag != r2.KeyTag { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.DigestType != r2.DigestType { - return false - } - if r1.Digest != r2.Digest { - return false - } - return true -} - -func (r1 *EID) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*EID) - if !ok { - return false - } - _ = r2 - if r1.Endpoint != r2.Endpoint { - return false - } - return true -} - -func (r1 *EUI48) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*EUI48) - if !ok { - return false - } - _ = r2 - if r1.Address != r2.Address { - return false - } - return true -} - -func (r1 *EUI64) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*EUI64) - if !ok { - return false - } - _ = r2 - if r1.Address != r2.Address { - return false - } - return true -} - -func (r1 *GID) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*GID) - if !ok { - return false - } - _ = r2 - if r1.Gid != r2.Gid { - return false - } - return true -} - -func (r1 *GPOS) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*GPOS) - if !ok { - return false - } - _ = r2 - if r1.Longitude != r2.Longitude { - return false - } - if r1.Latitude != r2.Latitude { - return false - } - if r1.Altitude != r2.Altitude { - return false - } - return true -} - -func (r1 *HINFO) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*HINFO) - if !ok { - return false - } - _ = r2 - if r1.Cpu != r2.Cpu { - return false - } - if r1.Os != r2.Os { - return false - } - return true -} - -func (r1 *HIP) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*HIP) - if !ok { - return false - } - _ = r2 - if r1.HitLength != r2.HitLength { - return false - } - if r1.PublicKeyAlgorithm != r2.PublicKeyAlgorithm { - return false - } - if r1.PublicKeyLength != r2.PublicKeyLength { - return false - } - if r1.Hit != r2.Hit { - return false - } - if r1.PublicKey != r2.PublicKey { - return false - } - if len(r1.RendezvousServers) != len(r2.RendezvousServers) { - return false - } - for i := 0; i < len(r1.RendezvousServers); i++ { - if !isDuplicateName(r1.RendezvousServers[i], r2.RendezvousServers[i]) { - return false - } - } - return true -} - -func (r1 *KX) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*KX) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if !isDuplicateName(r1.Exchanger, r2.Exchanger) { - return false - } - return true -} - -func (r1 *L32) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*L32) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if !r1.Locator32.Equal(r2.Locator32) { - return false - } - return true -} - -func (r1 *L64) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*L64) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if r1.Locator64 != r2.Locator64 { - return false - } - return true -} - -func (r1 *LOC) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*LOC) - if !ok { - return false - } - _ = r2 - if r1.Version != r2.Version { - return false - } - if r1.Size != r2.Size { - return false - } - if r1.HorizPre != r2.HorizPre { - return false - } - if r1.VertPre != r2.VertPre { - return false - } - if r1.Latitude != r2.Latitude { - return false - } - if r1.Longitude != r2.Longitude { - return false - } - if r1.Altitude != r2.Altitude { - return false - } - return true -} - -func (r1 *LP) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*LP) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if !isDuplicateName(r1.Fqdn, r2.Fqdn) { - return false - } - return true -} - -func (r1 *MB) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*MB) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Mb, r2.Mb) { - return false - } - return true -} - -func (r1 *MD) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*MD) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Md, r2.Md) { - return false - } - return true -} - -func (r1 *MF) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*MF) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Mf, r2.Mf) { - return false - } - return true -} - -func (r1 *MG) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*MG) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Mg, r2.Mg) { - return false - } - return true -} - -func (r1 *MINFO) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*MINFO) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Rmail, r2.Rmail) { - return false - } - if !isDuplicateName(r1.Email, r2.Email) { - return false - } - return true -} - -func (r1 *MR) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*MR) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Mr, r2.Mr) { - return false - } - return true -} - -func (r1 *MX) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*MX) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if !isDuplicateName(r1.Mx, r2.Mx) { - return false - } - return true -} - -func (r1 *NAPTR) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NAPTR) - if !ok { - return false - } - _ = r2 - if r1.Order != r2.Order { - return false - } - if r1.Preference != r2.Preference { - return false - } - if r1.Flags != r2.Flags { - return false - } - if r1.Service != r2.Service { - return false - } - if r1.Regexp != r2.Regexp { - return false - } - if !isDuplicateName(r1.Replacement, r2.Replacement) { - return false - } - return true -} - -func (r1 *NID) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NID) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if r1.NodeID != r2.NodeID { - return false - } - return true -} - -func (r1 *NIMLOC) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NIMLOC) - if !ok { - return false - } - _ = r2 - if r1.Locator != r2.Locator { - return false - } - return true -} - -func (r1 *NINFO) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NINFO) - if !ok { - return false - } - _ = r2 - if len(r1.ZSData) != len(r2.ZSData) { - return false - } - for i := 0; i < len(r1.ZSData); i++ { - if r1.ZSData[i] != r2.ZSData[i] { - return false - } - } - return true -} - -func (r1 *NS) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NS) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Ns, r2.Ns) { - return false - } - return true -} - -func (r1 *NSAPPTR) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NSAPPTR) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Ptr, r2.Ptr) { - return false - } - return true -} - -func (r1 *NSEC) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NSEC) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.NextDomain, r2.NextDomain) { - return false - } - if len(r1.TypeBitMap) != len(r2.TypeBitMap) { - return false - } - for i := 0; i < len(r1.TypeBitMap); i++ { - if r1.TypeBitMap[i] != r2.TypeBitMap[i] { - return false - } - } - return true -} - -func (r1 *NSEC3) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NSEC3) - if !ok { - return false - } - _ = r2 - if r1.Hash != r2.Hash { - return false - } - if r1.Flags != r2.Flags { - return false - } - if r1.Iterations != r2.Iterations { - return false - } - if r1.SaltLength != r2.SaltLength { - return false - } - if r1.Salt != r2.Salt { - return false - } - if r1.HashLength != r2.HashLength { - return false - } - if r1.NextDomain != r2.NextDomain { - return false - } - if len(r1.TypeBitMap) != len(r2.TypeBitMap) { - return false - } - for i := 0; i < len(r1.TypeBitMap); i++ { - if r1.TypeBitMap[i] != r2.TypeBitMap[i] { - return false - } - } - return true -} - -func (r1 *NSEC3PARAM) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NSEC3PARAM) - if !ok { - return false - } - _ = r2 - if r1.Hash != r2.Hash { - return false - } - if r1.Flags != r2.Flags { - return false - } - if r1.Iterations != r2.Iterations { - return false - } - if r1.SaltLength != r2.SaltLength { - return false - } - if r1.Salt != r2.Salt { - return false - } - return true -} - -func (r1 *NULL) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*NULL) - if !ok { - return false - } - _ = r2 - if r1.Data != r2.Data { - return false - } - return true -} - -func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*OPENPGPKEY) - if !ok { - return false - } - _ = r2 - if r1.PublicKey != r2.PublicKey { - return false - } - return true -} - -func (r1 *PTR) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*PTR) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Ptr, r2.Ptr) { - return false - } - return true -} - -func (r1 *PX) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*PX) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if !isDuplicateName(r1.Map822, r2.Map822) { - return false - } - if !isDuplicateName(r1.Mapx400, r2.Mapx400) { - return false - } - return true -} - -func (r1 *RFC3597) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*RFC3597) - if !ok { - return false - } - _ = r2 - if r1.Rdata != r2.Rdata { - return false - } - return true -} - -func (r1 *RKEY) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*RKEY) - if !ok { - return false - } - _ = r2 - if r1.Flags != r2.Flags { - return false - } - if r1.Protocol != r2.Protocol { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.PublicKey != r2.PublicKey { - return false - } - return true -} - -func (r1 *RP) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*RP) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Mbox, r2.Mbox) { - return false - } - if !isDuplicateName(r1.Txt, r2.Txt) { - return false - } - return true -} - -func (r1 *RRSIG) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*RRSIG) - if !ok { - return false - } - _ = r2 - if r1.TypeCovered != r2.TypeCovered { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.Labels != r2.Labels { - return false - } - if r1.OrigTtl != r2.OrigTtl { - return false - } - if r1.Expiration != r2.Expiration { - return false - } - if r1.Inception != r2.Inception { - return false - } - if r1.KeyTag != r2.KeyTag { - return false - } - if !isDuplicateName(r1.SignerName, r2.SignerName) { - return false - } - if r1.Signature != r2.Signature { - return false - } - return true -} - -func (r1 *RT) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*RT) - if !ok { - return false - } - _ = r2 - if r1.Preference != r2.Preference { - return false - } - if !isDuplicateName(r1.Host, r2.Host) { - return false - } - return true -} - -func (r1 *SMIMEA) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*SMIMEA) - if !ok { - return false - } - _ = r2 - if r1.Usage != r2.Usage { - return false - } - if r1.Selector != r2.Selector { - return false - } - if r1.MatchingType != r2.MatchingType { - return false - } - if r1.Certificate != r2.Certificate { - return false - } - return true -} - -func (r1 *SOA) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*SOA) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Ns, r2.Ns) { - return false - } - if !isDuplicateName(r1.Mbox, r2.Mbox) { - return false - } - if r1.Serial != r2.Serial { - return false - } - if r1.Refresh != r2.Refresh { - return false - } - if r1.Retry != r2.Retry { - return false - } - if r1.Expire != r2.Expire { - return false - } - if r1.Minttl != r2.Minttl { - return false - } - return true -} - -func (r1 *SPF) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*SPF) - if !ok { - return false - } - _ = r2 - if len(r1.Txt) != len(r2.Txt) { - return false - } - for i := 0; i < len(r1.Txt); i++ { - if r1.Txt[i] != r2.Txt[i] { - return false - } - } - return true -} - -func (r1 *SRV) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*SRV) - if !ok { - return false - } - _ = r2 - if r1.Priority != r2.Priority { - return false - } - if r1.Weight != r2.Weight { - return false - } - if r1.Port != r2.Port { - return false - } - if !isDuplicateName(r1.Target, r2.Target) { - return false - } - return true -} - -func (r1 *SSHFP) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*SSHFP) - if !ok { - return false - } - _ = r2 - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.Type != r2.Type { - return false - } - if r1.FingerPrint != r2.FingerPrint { - return false - } - return true -} - -func (r1 *TA) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*TA) - if !ok { - return false - } - _ = r2 - if r1.KeyTag != r2.KeyTag { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.DigestType != r2.DigestType { - return false - } - if r1.Digest != r2.Digest { - return false - } - return true -} - -func (r1 *TALINK) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*TALINK) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.PreviousName, r2.PreviousName) { - return false - } - if !isDuplicateName(r1.NextName, r2.NextName) { - return false - } - return true -} - -func (r1 *TKEY) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*TKEY) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Algorithm, r2.Algorithm) { - return false - } - if r1.Inception != r2.Inception { - return false - } - if r1.Expiration != r2.Expiration { - return false - } - if r1.Mode != r2.Mode { - return false - } - if r1.Error != r2.Error { - return false - } - if r1.KeySize != r2.KeySize { - return false - } - if r1.Key != r2.Key { - return false - } - if r1.OtherLen != r2.OtherLen { - return false - } - if r1.OtherData != r2.OtherData { - return false - } - return true -} - -func (r1 *TLSA) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*TLSA) - if !ok { - return false - } - _ = r2 - if r1.Usage != r2.Usage { - return false - } - if r1.Selector != r2.Selector { - return false - } - if r1.MatchingType != r2.MatchingType { - return false - } - if r1.Certificate != r2.Certificate { - return false - } - return true -} - -func (r1 *TSIG) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*TSIG) - if !ok { - return false - } - _ = r2 - if !isDuplicateName(r1.Algorithm, r2.Algorithm) { - return false - } - if r1.TimeSigned != r2.TimeSigned { - return false - } - if r1.Fudge != r2.Fudge { - return false - } - if r1.MACSize != r2.MACSize { - return false - } - if r1.MAC != r2.MAC { - return false - } - if r1.OrigId != r2.OrigId { - return false - } - if r1.Error != r2.Error { - return false - } - if r1.OtherLen != r2.OtherLen { - return false - } - if r1.OtherData != r2.OtherData { - return false - } - return true -} - -func (r1 *TXT) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*TXT) - if !ok { - return false - } - _ = r2 - if len(r1.Txt) != len(r2.Txt) { - return false - } - for i := 0; i < len(r1.Txt); i++ { - if r1.Txt[i] != r2.Txt[i] { - return false - } - } - return true -} - -func (r1 *UID) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*UID) - if !ok { - return false - } - _ = r2 - if r1.Uid != r2.Uid { - return false - } - return true -} - -func (r1 *UINFO) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*UINFO) - if !ok { - return false - } - _ = r2 - if r1.Uinfo != r2.Uinfo { - return false - } - return true -} - -func (r1 *URI) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*URI) - if !ok { - return false - } - _ = r2 - if r1.Priority != r2.Priority { - return false - } - if r1.Weight != r2.Weight { - return false - } - if r1.Target != r2.Target { - return false - } - return true -} - -func (r1 *X25) isDuplicate(_r2 RR) bool { - r2, ok := _r2.(*X25) - if !ok { - return false - } - _ = r2 - if r1.PSDNAddress != r2.PSDNAddress { - return false - } - return true -} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go deleted file mode 100644 index c4cf4757e..000000000 --- a/vendor/github.com/miekg/dns/zmsg.go +++ /dev/null @@ -1,2722 +0,0 @@ -// Code generated by "go run msg_generate.go"; DO NOT EDIT. - -package dns - -// pack*() functions - -func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDataA(rr.A, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *AAAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDataAAAA(rr.AAAA, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Subtype, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Hostname, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - return off, nil -} - -func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.Flag, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Tag, msg, off) - if err != nil { - return off, err - } - off, err = packStringOctet(rr.Value, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CDNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CDS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CERT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Type, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Certificate, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Target, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CSYNC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint32(rr.Serial, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DHCID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringBase64(rr.Digest, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DLV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Target, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *EID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringHex(rr.Endpoint, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *EUI48) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint48(rr.Address, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *EUI64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint64(rr.Address, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *GID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint32(rr.Gid, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *GPOS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packString(rr.Longitude, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Latitude, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Altitude, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *HINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packString(rr.Cpu, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Os, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.HitLength, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.PublicKeyLength, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Hit, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *KX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Exchanger, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *L32) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDataA(rr.Locator32, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *L64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packUint64(rr.Locator64, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *LOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.Version, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Size, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.HorizPre, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.VertPre, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Latitude, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Longitude, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Altitude, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *LP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Fqdn, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Mb, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MD) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Md, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Mf, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Mg, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Rmail, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Email, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Mr, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Mx, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NAPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Order, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Service, msg, off) - if err != nil { - return off, err - } - off, err = packString(rr.Regexp, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Replacement, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packUint64(rr.NodeID, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NIMLOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringHex(rr.Locator, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringTxt(rr.ZSData, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSAPPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Ptr, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSEC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.NextDomain, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSEC3) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return off, err - } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return off, err - } - } - off, err = packUint8(rr.HashLength, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase32(rr.NextDomain, msg, off) - if err != nil { - return off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSEC3PARAM) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return off, err - } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return off, err - } - } - return off, nil -} - -func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringAny(rr.Data, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *OPT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDataOpt(rr.Option, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *PTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Ptr, msg, off, compression, compress) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Map822, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Mapx400, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringHex(rr.Rdata, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Mbox, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Txt, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RRSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.SignerName, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Host, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.SignerName, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SMIMEA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SOA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Mbox, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint32(rr.Serial, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Refresh, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Retry, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expire, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Minttl, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SPF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SRV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Port, msg, off) - if err != nil { - return off, err - } - off, err = packDomainName(rr.Target, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Type, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.FingerPrint, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TALINK) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.PreviousName, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packDomainName(rr.NextName, msg, off, compression, false) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Algorithm, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Mode, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.KeySize, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Key, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.OtherData, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TLSA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packDomainName(rr.Algorithm, msg, off, compression, false) - if err != nil { - return off, err - } - off, err = packUint48(rr.TimeSigned, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Fudge, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.MACSize, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.MAC, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OrigId, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(rr.OtherData, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *UID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint32(rr.Uid, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *UINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packString(rr.Uinfo, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *URI) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return off, err - } - off, err = packStringOctet(rr.Target, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { - off, err = packString(rr.PSDNAddress, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -// unpack*() functions - -func (rr *A) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.A, off, err = unpackDataA(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *AAAA) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.AAAA, off, err = unpackDataAAAA(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Subtype, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Hostname, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - return off, nil -} - -func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CAA) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Flag, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Tag, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Value, off, err = unpackStringOctet(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CDNSKEY) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CDS) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CERT) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Type, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CNAME) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *CSYNC) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Serial, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DHCID) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DLV) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DNAME) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DNSKEY) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *DS) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *EID) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *EUI48) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint48(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *EUI64) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint64(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *GID) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Gid, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *GPOS) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Longitude, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Latitude, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Altitude, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *HINFO) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Cpu, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Os, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.HitLength, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.PublicKeyLength, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) - if err != nil { - return off, err - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) - if err != nil { - return off, err - } - rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *KX) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Exchanger, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *L32) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Locator32, off, err = unpackDataA(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *L64) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Locator64, off, err = unpackUint64(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *LOC) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Version, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Size, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.HorizPre, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.VertPre, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Latitude, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Longitude, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Altitude, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *LP) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Fqdn, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MB) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Mb, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MD) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Md, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MF) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Mf, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MG) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Mg, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MINFO) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Rmail, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Email, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MR) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Mr, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *MX) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Mx, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NAPTR) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Order, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Flags, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Service, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Regexp, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Replacement, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NID) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.NodeID, off, err = unpackUint64(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NIMLOC) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NINFO) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.ZSData, off, err = unpackStringTxt(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NS) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSAPPTR) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSEC) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.NextDomain, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return off, err - } - rr.HashLength, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) - if err != nil { - return off, err - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NSEC3PARAM) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Data, off, err = unpackStringAny(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *OPT) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Option, off, err = unpackDataOpt(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *PTR) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Map822, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Mapx400, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RKEY) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RP) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Txt, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RRSIG) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *RT) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Host, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SIG) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SMIMEA) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SOA) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Serial, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Refresh, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Retry, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Expire, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Minttl, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SPF) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SRV) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Port, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Type, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TALINK) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.PreviousName, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.NextName, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Mode, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.KeySize, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) - if err != nil { - return off, err - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TLSA) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.TimeSigned, off, err = unpackUint48(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Fudge, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.MACSize, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) - if err != nil { - return off, err - } - rr.OrigId, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *TXT) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *UID) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Uid, off, err = unpackUint32(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *UINFO) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Uinfo, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *URI) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return off, err - } - if off == len(msg) { - return off, nil - } - rr.Target, off, err = unpackStringOctet(msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) { - rdStart := off - _ = rdStart - - rr.PSDNAddress, off, err = unpackString(msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go deleted file mode 100644 index 495a83e30..000000000 --- a/vendor/github.com/miekg/dns/ztypes.go +++ /dev/null @@ -1,881 +0,0 @@ -// Code generated by "go run types_generate.go"; DO NOT EDIT. - -package dns - -import ( - "encoding/base64" - "net" -) - -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ - TypeA: func() RR { return new(A) }, - TypeAAAA: func() RR { return new(AAAA) }, - TypeAFSDB: func() RR { return new(AFSDB) }, - TypeANY: func() RR { return new(ANY) }, - TypeAVC: func() RR { return new(AVC) }, - TypeCAA: func() RR { return new(CAA) }, - TypeCDNSKEY: func() RR { return new(CDNSKEY) }, - TypeCDS: func() RR { return new(CDS) }, - TypeCERT: func() RR { return new(CERT) }, - TypeCNAME: func() RR { return new(CNAME) }, - TypeCSYNC: func() RR { return new(CSYNC) }, - TypeDHCID: func() RR { return new(DHCID) }, - TypeDLV: func() RR { return new(DLV) }, - TypeDNAME: func() RR { return new(DNAME) }, - TypeDNSKEY: func() RR { return new(DNSKEY) }, - TypeDS: func() RR { return new(DS) }, - TypeEID: func() RR { return new(EID) }, - TypeEUI48: func() RR { return new(EUI48) }, - TypeEUI64: func() RR { return new(EUI64) }, - TypeGID: func() RR { return new(GID) }, - TypeGPOS: func() RR { return new(GPOS) }, - TypeHINFO: func() RR { return new(HINFO) }, - TypeHIP: func() RR { return new(HIP) }, - TypeKEY: func() RR { return new(KEY) }, - TypeKX: func() RR { return new(KX) }, - TypeL32: func() RR { return new(L32) }, - TypeL64: func() RR { return new(L64) }, - TypeLOC: func() RR { return new(LOC) }, - TypeLP: func() RR { return new(LP) }, - TypeMB: func() RR { return new(MB) }, - TypeMD: func() RR { return new(MD) }, - TypeMF: func() RR { return new(MF) }, - TypeMG: func() RR { return new(MG) }, - TypeMINFO: func() RR { return new(MINFO) }, - TypeMR: func() RR { return new(MR) }, - TypeMX: func() RR { return new(MX) }, - TypeNAPTR: func() RR { return new(NAPTR) }, - TypeNID: func() RR { return new(NID) }, - TypeNIMLOC: func() RR { return new(NIMLOC) }, - TypeNINFO: func() RR { return new(NINFO) }, - TypeNS: func() RR { return new(NS) }, - TypeNSAPPTR: func() RR { return new(NSAPPTR) }, - TypeNSEC: func() RR { return new(NSEC) }, - TypeNSEC3: func() RR { return new(NSEC3) }, - TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, - TypeNULL: func() RR { return new(NULL) }, - TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, - TypeOPT: func() RR { return new(OPT) }, - TypePTR: func() RR { return new(PTR) }, - TypePX: func() RR { return new(PX) }, - TypeRKEY: func() RR { return new(RKEY) }, - TypeRP: func() RR { return new(RP) }, - TypeRRSIG: func() RR { return new(RRSIG) }, - TypeRT: func() RR { return new(RT) }, - TypeSIG: func() RR { return new(SIG) }, - TypeSMIMEA: func() RR { return new(SMIMEA) }, - TypeSOA: func() RR { return new(SOA) }, - TypeSPF: func() RR { return new(SPF) }, - TypeSRV: func() RR { return new(SRV) }, - TypeSSHFP: func() RR { return new(SSHFP) }, - TypeTA: func() RR { return new(TA) }, - TypeTALINK: func() RR { return new(TALINK) }, - TypeTKEY: func() RR { return new(TKEY) }, - TypeTLSA: func() RR { return new(TLSA) }, - TypeTSIG: func() RR { return new(TSIG) }, - TypeTXT: func() RR { return new(TXT) }, - TypeUID: func() RR { return new(UID) }, - TypeUINFO: func() RR { return new(UINFO) }, - TypeURI: func() RR { return new(URI) }, - TypeX25: func() RR { return new(X25) }, -} - -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ - TypeA: "A", - TypeAAAA: "AAAA", - TypeAFSDB: "AFSDB", - TypeANY: "ANY", - TypeATMA: "ATMA", - TypeAVC: "AVC", - TypeAXFR: "AXFR", - TypeCAA: "CAA", - TypeCDNSKEY: "CDNSKEY", - TypeCDS: "CDS", - TypeCERT: "CERT", - TypeCNAME: "CNAME", - TypeCSYNC: "CSYNC", - TypeDHCID: "DHCID", - TypeDLV: "DLV", - TypeDNAME: "DNAME", - TypeDNSKEY: "DNSKEY", - TypeDS: "DS", - TypeEID: "EID", - TypeEUI48: "EUI48", - TypeEUI64: "EUI64", - TypeGID: "GID", - TypeGPOS: "GPOS", - TypeHINFO: "HINFO", - TypeHIP: "HIP", - TypeISDN: "ISDN", - TypeIXFR: "IXFR", - TypeKEY: "KEY", - TypeKX: "KX", - TypeL32: "L32", - TypeL64: "L64", - TypeLOC: "LOC", - TypeLP: "LP", - TypeMAILA: "MAILA", - TypeMAILB: "MAILB", - TypeMB: "MB", - TypeMD: "MD", - TypeMF: "MF", - TypeMG: "MG", - TypeMINFO: "MINFO", - TypeMR: "MR", - TypeMX: "MX", - TypeNAPTR: "NAPTR", - TypeNID: "NID", - TypeNIMLOC: "NIMLOC", - TypeNINFO: "NINFO", - TypeNS: "NS", - TypeNSEC: "NSEC", - TypeNSEC3: "NSEC3", - TypeNSEC3PARAM: "NSEC3PARAM", - TypeNULL: "NULL", - TypeNXT: "NXT", - TypeNone: "None", - TypeOPENPGPKEY: "OPENPGPKEY", - TypeOPT: "OPT", - TypePTR: "PTR", - TypePX: "PX", - TypeRKEY: "RKEY", - TypeRP: "RP", - TypeRRSIG: "RRSIG", - TypeRT: "RT", - TypeReserved: "Reserved", - TypeSIG: "SIG", - TypeSMIMEA: "SMIMEA", - TypeSOA: "SOA", - TypeSPF: "SPF", - TypeSRV: "SRV", - TypeSSHFP: "SSHFP", - TypeTA: "TA", - TypeTALINK: "TALINK", - TypeTKEY: "TKEY", - TypeTLSA: "TLSA", - TypeTSIG: "TSIG", - TypeTXT: "TXT", - TypeUID: "UID", - TypeUINFO: "UINFO", - TypeUNSPEC: "UNSPEC", - TypeURI: "URI", - TypeX25: "X25", - TypeNSAPPTR: "NSAP-PTR", -} - -func (rr *A) Header() *RR_Header { return &rr.Hdr } -func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } -func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } -func (rr *ANY) Header() *RR_Header { return &rr.Hdr } -func (rr *AVC) Header() *RR_Header { return &rr.Hdr } -func (rr *CAA) Header() *RR_Header { return &rr.Hdr } -func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *CDS) Header() *RR_Header { return &rr.Hdr } -func (rr *CERT) Header() *RR_Header { return &rr.Hdr } -func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr } -func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } -func (rr *DLV) Header() *RR_Header { return &rr.Hdr } -func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *DS) Header() *RR_Header { return &rr.Hdr } -func (rr *EID) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } -func (rr *GID) Header() *RR_Header { return &rr.Hdr } -func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } -func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *HIP) Header() *RR_Header { return &rr.Hdr } -func (rr *KEY) Header() *RR_Header { return &rr.Hdr } -func (rr *KX) Header() *RR_Header { return &rr.Hdr } -func (rr *L32) Header() *RR_Header { return &rr.Hdr } -func (rr *L64) Header() *RR_Header { return &rr.Hdr } -func (rr *LOC) Header() *RR_Header { return &rr.Hdr } -func (rr *LP) Header() *RR_Header { return &rr.Hdr } -func (rr *MB) Header() *RR_Header { return &rr.Hdr } -func (rr *MD) Header() *RR_Header { return &rr.Hdr } -func (rr *MF) Header() *RR_Header { return &rr.Hdr } -func (rr *MG) Header() *RR_Header { return &rr.Hdr } -func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *MR) Header() *RR_Header { return &rr.Hdr } -func (rr *MX) Header() *RR_Header { return &rr.Hdr } -func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NID) Header() *RR_Header { return &rr.Hdr } -func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } -func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *NS) Header() *RR_Header { return &rr.Hdr } -func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } -func (rr *NULL) Header() *RR_Header { return &rr.Hdr } -func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *OPT) Header() *RR_Header { return &rr.Hdr } -func (rr *PTR) Header() *RR_Header { return &rr.Hdr } -func (rr *PX) Header() *RR_Header { return &rr.Hdr } -func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } -func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *RP) Header() *RR_Header { return &rr.Hdr } -func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *RT) Header() *RR_Header { return &rr.Hdr } -func (rr *SIG) Header() *RR_Header { return &rr.Hdr } -func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } -func (rr *SOA) Header() *RR_Header { return &rr.Hdr } -func (rr *SPF) Header() *RR_Header { return &rr.Hdr } -func (rr *SRV) Header() *RR_Header { return &rr.Hdr } -func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } -func (rr *TA) Header() *RR_Header { return &rr.Hdr } -func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } -func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } -func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *TXT) Header() *RR_Header { return &rr.Hdr } -func (rr *UID) Header() *RR_Header { return &rr.Hdr } -func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *URI) Header() *RR_Header { return &rr.Hdr } -func (rr *X25) Header() *RR_Header { return &rr.Hdr } - -// len() functions -func (rr *A) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - if len(rr.A) != 0 { - l += net.IPv4len - } - return l -} -func (rr *AAAA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - if len(rr.AAAA) != 0 { - l += net.IPv6len - } - return l -} -func (rr *AFSDB) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Subtype - l += domainNameLen(rr.Hostname, off+l, compression, false) - return l -} -func (rr *ANY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - return l -} -func (rr *AVC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *CAA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Flag - l += len(rr.Tag) + 1 - l += len(rr.Value) - return l -} -func (rr *CERT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Type - l += 2 // KeyTag - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) - return l -} -func (rr *CNAME) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Target, off+l, compression, true) - return l -} -func (rr *DHCID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += base64.StdEncoding.DecodedLen(len(rr.Digest)) - return l -} -func (rr *DNAME) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Target, off+l, compression, false) - return l -} -func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Flags - l++ // Protocol - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *DS) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *EID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Endpoint)/2 + 1 - return l -} -func (rr *EUI48) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 6 // Address - return l -} -func (rr *EUI64) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 8 // Address - return l -} -func (rr *GID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 4 // Gid - return l -} -func (rr *GPOS) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Longitude) + 1 - l += len(rr.Latitude) + 1 - l += len(rr.Altitude) + 1 - return l -} -func (rr *HINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Cpu) + 1 - l += len(rr.Os) + 1 - return l -} -func (rr *HIP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // HitLength - l++ // PublicKeyAlgorithm - l += 2 // PublicKeyLength - l += len(rr.Hit) / 2 - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - for _, x := range rr.RendezvousServers { - l += domainNameLen(x, off+l, compression, false) - } - return l -} -func (rr *KX) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Exchanger, off+l, compression, false) - return l -} -func (rr *L32) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - if len(rr.Locator32) != 0 { - l += net.IPv4len - } - return l -} -func (rr *L64) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += 8 // Locator64 - return l -} -func (rr *LOC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Version - l++ // Size - l++ // HorizPre - l++ // VertPre - l += 4 // Latitude - l += 4 // Longitude - l += 4 // Altitude - return l -} -func (rr *LP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Fqdn, off+l, compression, false) - return l -} -func (rr *MB) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mb, off+l, compression, true) - return l -} -func (rr *MD) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Md, off+l, compression, true) - return l -} -func (rr *MF) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mf, off+l, compression, true) - return l -} -func (rr *MG) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mg, off+l, compression, true) - return l -} -func (rr *MINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Rmail, off+l, compression, true) - l += domainNameLen(rr.Email, off+l, compression, true) - return l -} -func (rr *MR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mr, off+l, compression, true) - return l -} -func (rr *MX) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Mx, off+l, compression, true) - return l -} -func (rr *NAPTR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Order - l += 2 // Preference - l += len(rr.Flags) + 1 - l += len(rr.Service) + 1 - l += len(rr.Regexp) + 1 - l += domainNameLen(rr.Replacement, off+l, compression, false) - return l -} -func (rr *NID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += 8 // NodeID - return l -} -func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Locator)/2 + 1 - return l -} -func (rr *NINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.ZSData { - l += len(x) + 1 - } - return l -} -func (rr *NS) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ns, off+l, compression, true) - return l -} -func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ptr, off+l, compression, false) - return l -} -func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Hash - l++ // Flags - l += 2 // Iterations - l++ // SaltLength - l += len(rr.Salt) / 2 - return l -} -func (rr *NULL) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Data) - return l -} -func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *PTR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ptr, off+l, compression, true) - return l -} -func (rr *PX) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Map822, off+l, compression, false) - l += domainNameLen(rr.Mapx400, off+l, compression, false) - return l -} -func (rr *RFC3597) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Rdata)/2 + 1 - return l -} -func (rr *RKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Flags - l++ // Protocol - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *RP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mbox, off+l, compression, false) - l += domainNameLen(rr.Txt, off+l, compression, false) - return l -} -func (rr *RRSIG) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // TypeCovered - l++ // Algorithm - l++ // Labels - l += 4 // OrigTtl - l += 4 // Expiration - l += 4 // Inception - l += 2 // KeyTag - l += domainNameLen(rr.SignerName, off+l, compression, false) - l += base64.StdEncoding.DecodedLen(len(rr.Signature)) - return l -} -func (rr *RT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Host, off+l, compression, false) - return l -} -func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Usage - l++ // Selector - l++ // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *SOA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ns, off+l, compression, true) - l += domainNameLen(rr.Mbox, off+l, compression, true) - l += 4 // Serial - l += 4 // Refresh - l += 4 // Retry - l += 4 // Expire - l += 4 // Minttl - return l -} -func (rr *SPF) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *SRV) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Priority - l += 2 // Weight - l += 2 // Port - l += domainNameLen(rr.Target, off+l, compression, false) - return l -} -func (rr *SSHFP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Algorithm - l++ // Type - l += len(rr.FingerPrint)/2 + 1 - return l -} -func (rr *TA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *TALINK) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.PreviousName, off+l, compression, false) - l += domainNameLen(rr.NextName, off+l, compression, false) - return l -} -func (rr *TKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Algorithm, off+l, compression, false) - l += 4 // Inception - l += 4 // Expiration - l += 2 // Mode - l += 2 // Error - l += 2 // KeySize - l += len(rr.Key) / 2 - l += 2 // OtherLen - l += len(rr.OtherData) / 2 - return l -} -func (rr *TLSA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Usage - l++ // Selector - l++ // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *TSIG) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Algorithm, off+l, compression, false) - l += 6 // TimeSigned - l += 2 // Fudge - l += 2 // MACSize - l += len(rr.MAC) / 2 - l += 2 // OrigId - l += 2 // Error - l += 2 // OtherLen - l += len(rr.OtherData) / 2 - return l -} -func (rr *TXT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *UID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 4 // Uid - return l -} -func (rr *UINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Uinfo) + 1 - return l -} -func (rr *URI) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Priority - l += 2 // Weight - l += len(rr.Target) - return l -} -func (rr *X25) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.PSDNAddress) + 1 - return l -} - -// copy() functions -func (rr *A) copy() RR { - return &A{rr.Hdr, copyIP(rr.A)} -} -func (rr *AAAA) copy() RR { - return &AAAA{rr.Hdr, copyIP(rr.AAAA)} -} -func (rr *AFSDB) copy() RR { - return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} -} -func (rr *ANY) copy() RR { - return &ANY{rr.Hdr} -} -func (rr *AVC) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &AVC{rr.Hdr, Txt} -} -func (rr *CAA) copy() RR { - return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} -} -func (rr *CERT) copy() RR { - return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} -} -func (rr *CNAME) copy() RR { - return &CNAME{rr.Hdr, rr.Target} -} -func (rr *CSYNC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} -} -func (rr *DHCID) copy() RR { - return &DHCID{rr.Hdr, rr.Digest} -} -func (rr *DNAME) copy() RR { - return &DNAME{rr.Hdr, rr.Target} -} -func (rr *DNSKEY) copy() RR { - return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *DS) copy() RR { - return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *EID) copy() RR { - return &EID{rr.Hdr, rr.Endpoint} -} -func (rr *EUI48) copy() RR { - return &EUI48{rr.Hdr, rr.Address} -} -func (rr *EUI64) copy() RR { - return &EUI64{rr.Hdr, rr.Address} -} -func (rr *GID) copy() RR { - return &GID{rr.Hdr, rr.Gid} -} -func (rr *GPOS) copy() RR { - return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} -} -func (rr *HINFO) copy() RR { - return &HINFO{rr.Hdr, rr.Cpu, rr.Os} -} -func (rr *HIP) copy() RR { - RendezvousServers := make([]string, len(rr.RendezvousServers)) - copy(RendezvousServers, rr.RendezvousServers) - return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} -} -func (rr *KX) copy() RR { - return &KX{rr.Hdr, rr.Preference, rr.Exchanger} -} -func (rr *L32) copy() RR { - return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} -} -func (rr *L64) copy() RR { - return &L64{rr.Hdr, rr.Preference, rr.Locator64} -} -func (rr *LOC) copy() RR { - return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} -} -func (rr *LP) copy() RR { - return &LP{rr.Hdr, rr.Preference, rr.Fqdn} -} -func (rr *MB) copy() RR { - return &MB{rr.Hdr, rr.Mb} -} -func (rr *MD) copy() RR { - return &MD{rr.Hdr, rr.Md} -} -func (rr *MF) copy() RR { - return &MF{rr.Hdr, rr.Mf} -} -func (rr *MG) copy() RR { - return &MG{rr.Hdr, rr.Mg} -} -func (rr *MINFO) copy() RR { - return &MINFO{rr.Hdr, rr.Rmail, rr.Email} -} -func (rr *MR) copy() RR { - return &MR{rr.Hdr, rr.Mr} -} -func (rr *MX) copy() RR { - return &MX{rr.Hdr, rr.Preference, rr.Mx} -} -func (rr *NAPTR) copy() RR { - return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} -} -func (rr *NID) copy() RR { - return &NID{rr.Hdr, rr.Preference, rr.NodeID} -} -func (rr *NIMLOC) copy() RR { - return &NIMLOC{rr.Hdr, rr.Locator} -} -func (rr *NINFO) copy() RR { - ZSData := make([]string, len(rr.ZSData)) - copy(ZSData, rr.ZSData) - return &NINFO{rr.Hdr, ZSData} -} -func (rr *NS) copy() RR { - return &NS{rr.Hdr, rr.Ns} -} -func (rr *NSAPPTR) copy() RR { - return &NSAPPTR{rr.Hdr, rr.Ptr} -} -func (rr *NSEC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} -} -func (rr *NULL) copy() RR { - return &NULL{rr.Hdr, rr.Data} -} -func (rr *OPENPGPKEY) copy() RR { - return &OPENPGPKEY{rr.Hdr, rr.PublicKey} -} -func (rr *OPT) copy() RR { - Option := make([]EDNS0, len(rr.Option)) - for i, e := range rr.Option { - Option[i] = e.copy() - } - return &OPT{rr.Hdr, Option} -} -func (rr *PTR) copy() RR { - return &PTR{rr.Hdr, rr.Ptr} -} -func (rr *PX) copy() RR { - return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} -} -func (rr *RFC3597) copy() RR { - return &RFC3597{rr.Hdr, rr.Rdata} -} -func (rr *RKEY) copy() RR { - return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *RP) copy() RR { - return &RP{rr.Hdr, rr.Mbox, rr.Txt} -} -func (rr *RRSIG) copy() RR { - return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} -} -func (rr *RT) copy() RR { - return &RT{rr.Hdr, rr.Preference, rr.Host} -} -func (rr *SMIMEA) copy() RR { - return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *SOA) copy() RR { - return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} -} -func (rr *SPF) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &SPF{rr.Hdr, Txt} -} -func (rr *SRV) copy() RR { - return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} -} -func (rr *SSHFP) copy() RR { - return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} -} -func (rr *TA) copy() RR { - return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *TALINK) copy() RR { - return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} -} -func (rr *TKEY) copy() RR { - return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} -} -func (rr *TLSA) copy() RR { - return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *TSIG) copy() RR { - return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} -} -func (rr *TXT) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &TXT{rr.Hdr, Txt} -} -func (rr *UID) copy() RR { - return &UID{rr.Hdr, rr.Uid} -} -func (rr *UINFO) copy() RR { - return &UINFO{rr.Hdr, rr.Uinfo} -} -func (rr *URI) copy() RR { - return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} -} -func (rr *X25) copy() RR { - return &X25{rr.Hdr, rr.PSDNAddress} -} diff --git a/vendor/github.com/naoina/go-stringutil/.travis.yml b/vendor/github.com/naoina/go-stringutil/.travis.yml deleted file mode 100644 index 0489ad5ea..000000000 --- a/vendor/github.com/naoina/go-stringutil/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.4 - - 1.5 - - tip -install: - - go get -v github.com/naoina/go-stringutil -script: - - go test -v -bench . -benchmem ./... diff --git a/vendor/github.com/naoina/go-stringutil/LICENSE b/vendor/github.com/naoina/go-stringutil/LICENSE deleted file mode 100644 index 0fff1c58b..000000000 --- a/vendor/github.com/naoina/go-stringutil/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2015 Naoya Inada - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/naoina/go-stringutil/README.md b/vendor/github.com/naoina/go-stringutil/README.md deleted file mode 100644 index ecf7a5fae..000000000 --- a/vendor/github.com/naoina/go-stringutil/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# stringutil [![Build Status](https://travis-ci.org/naoina/go-stringutil.svg?branch=master)](https://travis-ci.org/naoina/go-stringutil) - -## Installation - - go get -u github.com/naoina/go-stringutil - -## Documentation - -See https://godoc.org/github.com/naoina/go-stringutil - -## License - -MIT diff --git a/vendor/github.com/naoina/go-stringutil/da.go b/vendor/github.com/naoina/go-stringutil/da.go deleted file mode 100644 index 8fe651659..000000000 --- a/vendor/github.com/naoina/go-stringutil/da.go +++ /dev/null @@ -1,253 +0,0 @@ -package stringutil - -import ( - "fmt" - "sort" - "unicode/utf8" -) - -const ( - terminationCharacter = '#' -) - -func mustDoubleArray(da *doubleArray, err error) *doubleArray { - if err != nil { - panic(err) - } - return da -} - -func (da *doubleArray) Build(keys []string) error { - records := makeRecords(keys) - if err := da.build(records, 1, 0, make(map[int]struct{})); err != nil { - return err - } - return nil -} - -type doubleArray struct { - bc []baseCheck - node []int -} - -func newDoubleArray(keys []string) (*doubleArray, error) { - da := &doubleArray{ - bc: []baseCheck{0}, - node: []int{-1}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. - } - if err := da.Build(keys); err != nil { - return nil, err - } - return da, nil -} - -// baseCheck contains BASE, CHECK and Extra flags. -// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. -// -// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) -// |----------------------|--|--------| -// 32 10 8 0 -type baseCheck uint32 - -func (bc baseCheck) Base() int { - return int(bc >> 10) -} - -func (bc *baseCheck) SetBase(base int) { - *bc |= baseCheck(base) << 10 -} - -func (bc baseCheck) Check() byte { - return byte(bc) -} - -func (bc *baseCheck) SetCheck(check byte) { - *bc |= baseCheck(check) -} - -func (bc baseCheck) IsEmpty() bool { - return bc&0xfffffcff == 0 -} - -func (da *doubleArray) Lookup(path string) (length int) { - idx := 1 - tmpIdx := idx - for i := 0; i < len(path); i++ { - c := path[i] - tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c) - if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c { - break - } - idx = tmpIdx - } - if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter { - return da.node[da.bc[next].Base()] - } - return -1 -} - -func (da *doubleArray) LookupByBytes(path []byte) (length int) { - idx := 1 - tmpIdx := idx - for i := 0; i < len(path); i++ { - c := path[i] - tmpIdx = da.nextIndex(da.bc[tmpIdx].Base(), c) - if tmpIdx >= len(da.bc) || da.bc[tmpIdx].Check() != c { - break - } - idx = tmpIdx - } - if next := da.nextIndex(da.bc[idx].Base(), terminationCharacter); next < len(da.bc) && da.bc[next].Check() == terminationCharacter { - return da.node[da.bc[next].Base()] - } - return -1 -} - -func (da *doubleArray) build(srcs []record, idx, depth int, usedBase map[int]struct{}) error { - sort.Stable(recordSlice(srcs)) - base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) - if err != nil { - return err - } - if leaf != nil { - da.bc[idx].SetBase(len(da.node)) - da.node = append(da.node, leaf.value) - } - for _, sib := range siblings { - da.setCheck(da.nextIndex(base, sib.c), sib.c) - } - for _, sib := range siblings { - if err := da.build(srcs[sib.start:sib.end], da.nextIndex(base, sib.c), depth+1, usedBase); err != nil { - return err - } - } - return nil -} - -func (da *doubleArray) setBase(i, base int) { - da.bc[i].SetBase(base) -} - -func (da *doubleArray) setCheck(i int, check byte) { - da.bc[i].SetCheck(check) -} - -func (da *doubleArray) findEmptyIndex(start int) int { - i := start - for ; i < len(da.bc); i++ { - if da.bc[i].IsEmpty() { - break - } - } - return i -} - -// findBase returns good BASE. -func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { - for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { - base = da.nextIndex(idx, firstChar) - if _, used := usedBase[base]; used { - continue - } - i := 0 - for ; i < len(siblings); i++ { - next := da.nextIndex(base, siblings[i].c) - if len(da.bc) <= next { - da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) - } - if !da.bc[next].IsEmpty() { - break - } - } - if i == len(siblings) { - break - } - } - usedBase[base] = struct{}{} - return base -} - -func (da *doubleArray) arrange(records []record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { - siblings, leaf, err = makeSiblings(records, depth) - if err != nil { - return -1, nil, nil, err - } - if len(siblings) < 1 { - return -1, nil, leaf, nil - } - base = da.findBase(siblings, idx, usedBase) - da.setBase(idx, base) - return base, siblings, leaf, err -} - -type sibling struct { - start int - end int - c byte -} - -func (da *doubleArray) nextIndex(base int, c byte) int { - return base ^ int(c) -} - -func makeSiblings(records []record, depth int) (sib []sibling, leaf *record, err error) { - var ( - pc byte - n int - ) - for i, r := range records { - if len(r.key) <= depth { - leaf = &r - continue - } - c := r.key[depth] - switch { - case pc < c: - sib = append(sib, sibling{start: i, c: c}) - case pc == c: - continue - default: - return nil, nil, fmt.Errorf("stringutil: BUG: records hasn't been sorted") - } - if n > 0 { - sib[n-1].end = i - } - pc = c - n++ - } - if n == 0 { - return nil, leaf, nil - } - sib[n-1].end = len(records) - return sib, leaf, nil -} - -type record struct { - key string - value int -} - -func makeRecords(srcs []string) (records []record) { - termChar := string(terminationCharacter) - for _, s := range srcs { - records = append(records, record{ - key: string(s + termChar), - value: utf8.RuneCountInString(s), - }) - } - return records -} - -type recordSlice []record - -func (rs recordSlice) Len() int { - return len(rs) -} - -func (rs recordSlice) Less(i, j int) bool { - return rs[i].key < rs[j].key -} - -func (rs recordSlice) Swap(i, j int) { - rs[i], rs[j] = rs[j], rs[i] -} diff --git a/vendor/github.com/naoina/go-stringutil/strings.go b/vendor/github.com/naoina/go-stringutil/strings.go deleted file mode 100644 index 881ca2c8f..000000000 --- a/vendor/github.com/naoina/go-stringutil/strings.go +++ /dev/null @@ -1,320 +0,0 @@ -package stringutil - -import ( - "sync" - "unicode" - "unicode/utf8" -) - -var ( - mu sync.Mutex - - // Based on https://github.com/golang/lint/blob/32a87160691b3c96046c0c678fe57c5bef761456/lint.go#L702 - commonInitialismMap = map[string]struct{}{ - "API": struct{}{}, - "ASCII": struct{}{}, - "CPU": struct{}{}, - "CSRF": struct{}{}, - "CSS": struct{}{}, - "DNS": struct{}{}, - "EOF": struct{}{}, - "GUID": struct{}{}, - "HTML": struct{}{}, - "HTTP": struct{}{}, - "HTTPS": struct{}{}, - "ID": struct{}{}, - "IP": struct{}{}, - "JSON": struct{}{}, - "LHS": struct{}{}, - "QPS": struct{}{}, - "RAM": struct{}{}, - "RHS": struct{}{}, - "RPC": struct{}{}, - "SLA": struct{}{}, - "SMTP": struct{}{}, - "SQL": struct{}{}, - "SSH": struct{}{}, - "TCP": struct{}{}, - "TLS": struct{}{}, - "TTL": struct{}{}, - "UDP": struct{}{}, - "UI": struct{}{}, - "UID": struct{}{}, - "UUID": struct{}{}, - "URI": struct{}{}, - "URL": struct{}{}, - "UTF8": struct{}{}, - "VM": struct{}{}, - "XML": struct{}{}, - "XSRF": struct{}{}, - "XSS": struct{}{}, - } - commonInitialisms = keys(commonInitialismMap) - commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms)) - longestLen = longestLength(commonInitialisms) - shortestLen = shortestLength(commonInitialisms, longestLen) -) - -// ToUpperCamelCase returns a copy of the string s with all Unicode letters mapped to their camel case. -// It will convert to upper case previous letter of '_' and first letter, and remove letter of '_'. -func ToUpperCamelCase(s string) string { - if s == "" { - return "" - } - upper := true - start := 0 - result := make([]byte, 0, len(s)) - var runeBuf [utf8.UTFMax]byte - var initialism []byte - for _, c := range s { - if c == '_' { - upper = true - candidate := string(result[start:]) - initialism = initialism[:0] - for _, r := range candidate { - if r < utf8.RuneSelf { - initialism = append(initialism, toUpperASCII(byte(r))) - } else { - n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(r)) - initialism = append(initialism, runeBuf[:n]...) - } - } - if length := commonInitialism.LookupByBytes(initialism); length > 0 { - result = append(result[:start], initialism...) - } - start = len(result) - continue - } - if upper { - if c < utf8.RuneSelf { - result = append(result, toUpperASCII(byte(c))) - } else { - n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(c)) - result = append(result, runeBuf[:n]...) - } - upper = false - continue - } - if c < utf8.RuneSelf { - result = append(result, byte(c)) - } else { - n := utf8.EncodeRune(runeBuf[:], c) - result = append(result, runeBuf[:n]...) - } - } - candidate := string(result[start:]) - initialism = initialism[:0] - for _, r := range candidate { - if r < utf8.RuneSelf { - initialism = append(initialism, toUpperASCII(byte(r))) - } else { - n := utf8.EncodeRune(runeBuf[:], unicode.ToUpper(r)) - initialism = append(initialism, runeBuf[:n]...) - } - } - if length := commonInitialism.LookupByBytes(initialism); length > 0 { - result = append(result[:start], initialism...) - } - return string(result) -} - -// ToUpperCamelCaseASCII is similar to ToUpperCamelCase, but optimized for -// only the ASCII characters. -// ToUpperCamelCaseASCII is faster than ToUpperCamelCase, but doesn't work if -// contains non-ASCII characters. -func ToUpperCamelCaseASCII(s string) string { - if s == "" { - return "" - } - upper := true - start := 0 - result := make([]byte, 0, len(s)) - var initialism []byte - for i := 0; i < len(s); i++ { - c := s[i] - if c == '_' { - upper = true - candidate := result[start:] - initialism = initialism[:0] - for _, b := range candidate { - initialism = append(initialism, toUpperASCII(b)) - } - if length := commonInitialism.LookupByBytes(initialism); length > 0 { - result = append(result[:start], initialism...) - } - start = len(result) - continue - } - if upper { - result = append(result, toUpperASCII(c)) - upper = false - continue - } - result = append(result, c) - } - candidate := result[start:] - initialism = initialism[:0] - for _, b := range candidate { - initialism = append(initialism, toUpperASCII(b)) - } - if length := commonInitialism.LookupByBytes(initialism); length > 0 { - result = append(result[:start], initialism...) - } - return string(result) -} - -// ToSnakeCase returns a copy of the string s with all Unicode letters mapped to their snake case. -// It will insert letter of '_' at position of previous letter of uppercase and all -// letters convert to lower case. -// ToSnakeCase does not insert '_' letter into a common initialism word like ID, URL and so on. -func ToSnakeCase(s string) string { - if s == "" { - return "" - } - result := make([]byte, 0, len(s)) - var runeBuf [utf8.UTFMax]byte - var j, skipCount int - for i, c := range s { - if i < skipCount { - continue - } - if unicode.IsUpper(c) { - if i != 0 { - result = append(result, '_') - } - next := nextIndex(j, len(s)) - if length := commonInitialism.Lookup(s[j:next]); length > 0 { - for _, r := range s[j : j+length] { - if r < utf8.RuneSelf { - result = append(result, toLowerASCII(byte(r))) - } else { - n := utf8.EncodeRune(runeBuf[:], unicode.ToLower(r)) - result = append(result, runeBuf[:n]...) - } - } - j += length - 1 - skipCount = i + length - continue - } - } - if c < utf8.RuneSelf { - result = append(result, toLowerASCII(byte(c))) - } else { - n := utf8.EncodeRune(runeBuf[:], unicode.ToLower(c)) - result = append(result, runeBuf[:n]...) - } - j++ - } - return string(result) -} - -// ToSnakeCaseASCII is similar to ToSnakeCase, but optimized for only the ASCII -// characters. -// ToSnakeCaseASCII is faster than ToSnakeCase, but doesn't work correctly if -// contains non-ASCII characters. -func ToSnakeCaseASCII(s string) string { - if s == "" { - return "" - } - result := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - c := s[i] - if isUpperASCII(c) { - if i != 0 { - result = append(result, '_') - } - if k := i + shortestLen - 1; k < len(s) && isUpperASCII(s[k]) { - if length := commonInitialism.Lookup(s[i:nextIndex(i, len(s))]); length > 0 { - for j, buf := 0, s[i:i+length]; j < len(buf); j++ { - result = append(result, toLowerASCII(buf[j])) - } - i += length - 1 - continue - } - } - } - result = append(result, toLowerASCII(c)) - } - return string(result) -} - -// AddCommonInitialism adds ss to list of common initialisms. -func AddCommonInitialism(ss ...string) { - mu.Lock() - defer mu.Unlock() - for _, s := range ss { - commonInitialismMap[s] = struct{}{} - } - commonInitialisms = keys(commonInitialismMap) - commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms)) - longestLen = longestLength(commonInitialisms) - shortestLen = shortestLength(commonInitialisms, longestLen) -} - -// DelCommonInitialism deletes ss from list of common initialisms. -func DelCommonInitialism(ss ...string) { - mu.Lock() - defer mu.Unlock() - for _, s := range ss { - delete(commonInitialismMap, s) - } - commonInitialisms = keys(commonInitialismMap) - commonInitialism = mustDoubleArray(newDoubleArray(commonInitialisms)) - longestLen = longestLength(commonInitialisms) - shortestLen = shortestLength(commonInitialisms, longestLen) -} - -func isUpperASCII(c byte) bool { - return 'A' <= c && c <= 'Z' -} - -func isLowerASCII(c byte) bool { - return 'a' <= c && c <= 'z' -} - -func toUpperASCII(c byte) byte { - if isLowerASCII(c) { - return c - ('a' - 'A') - } - return c -} - -func toLowerASCII(c byte) byte { - if isUpperASCII(c) { - return c + 'a' - 'A' - } - return c -} - -func nextIndex(i, maxlen int) int { - if n := i + longestLen; n < maxlen { - return n - } - return maxlen -} - -func keys(m map[string]struct{}) []string { - result := make([]string, 0, len(m)) - for k := range m { - result = append(result, k) - } - return result -} - -func shortestLength(strs []string, shortest int) int { - for _, s := range strs { - if candidate := utf8.RuneCountInString(s); candidate < shortest { - shortest = candidate - } - } - return shortest -} - -func longestLength(strs []string) (longest int) { - for _, s := range strs { - if candidate := utf8.RuneCountInString(s); candidate > longest { - longest = candidate - } - } - return longest -} diff --git a/vendor/github.com/naoina/toml/.travis.yml b/vendor/github.com/naoina/toml/.travis.yml deleted file mode 100644 index 7b7551caa..000000000 --- a/vendor/github.com/naoina/toml/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -go: - - 1.3 - - 1.x - -install: - - go get -t -v ./... - -script: - - go test ./... diff --git a/vendor/github.com/naoina/toml/LICENSE b/vendor/github.com/naoina/toml/LICENSE deleted file mode 100644 index e65039ad8..000000000 --- a/vendor/github.com/naoina/toml/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Naoya Inada - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/naoina/toml/README.md b/vendor/github.com/naoina/toml/README.md deleted file mode 100644 index 1c0143348..000000000 --- a/vendor/github.com/naoina/toml/README.md +++ /dev/null @@ -1,392 +0,0 @@ -# TOML parser and encoder library for Golang [![Build Status](https://travis-ci.org/naoina/toml.png?branch=master)](https://travis-ci.org/naoina/toml) - -[TOML](https://github.com/toml-lang/toml) parser and encoder library for [Golang](http://golang.org/). - -This library is compatible with TOML version [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md). - -## Installation - - go get -u github.com/naoina/toml - -## Usage - -The following TOML save as `example.toml`. - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Lance Uppercut" -dob = 1979-05-27T07:32:00-08:00 # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -Then above TOML will mapping to `tomlConfig` struct using `toml.Unmarshal`. - -```go -package main - -import ( - "io/ioutil" - "os" - "time" - - "github.com/naoina/toml" -) - -type tomlConfig struct { - Title string - Owner struct { - Name string - Dob time.Time - } - Database struct { - Server string - Ports []int - ConnectionMax uint - Enabled bool - } - Servers map[string]ServerInfo - Clients struct { - Data [][]interface{} - Hosts []string - } -} - -type ServerInfo struct { - IP net.IP - DC string -} - -func main() { - f, err := os.Open("example.toml") - if err != nil { - panic(err) - } - defer f.Close() - var config Config - if err := toml.NewDecoder(f).Decode(&config); err != nil { - panic(err) - } - - // then to use the unmarshaled config... - fmt.Println("IP of server 'alpha':", config.Servers["alpha"].IP) -} -``` - -## Mappings - -A key and value of TOML will map to the corresponding field. -The fields of struct for mapping must be exported. - -The rules of the mapping of key are following: - -#### Exact matching - -```toml -timeout_seconds = 256 -``` - -```go -type Config struct { - Timeout_seconds int -} -``` - -#### Camelcase matching - -```toml -server_name = "srv1" -``` - -```go -type Config struct { - ServerName string -} -``` - -#### Uppercase matching - -```toml -ip = "10.0.0.1" -``` - -```go -type Config struct { - IP string -} -``` - -See the following examples for the value mappings. - -### String - -```toml -val = "string" -``` - -```go -type Config struct { - Val string -} -``` - -### Integer - -```toml -val = 100 -``` - -```go -type Config struct { - Val int -} -``` - -All types that can be used are following: - -* int8 (from `-128` to `127`) -* int16 (from `-32768` to `32767`) -* int32 (from `-2147483648` to `2147483647`) -* int64 (from `-9223372036854775808` to `9223372036854775807`) -* int (same as `int32` on 32bit environment, or `int64` on 64bit environment) -* uint8 (from `0` to `255`) -* uint16 (from `0` to `65535`) -* uint32 (from `0` to `4294967295`) -* uint64 (from `0` to `18446744073709551615`) -* uint (same as `uint` on 32bit environment, or `uint64` on 64bit environment) - -### Float - -```toml -val = 3.1415 -``` - -```go -type Config struct { - Val float32 -} -``` - -All types that can be used are following: - -* float32 -* float64 - -### Boolean - -```toml -val = true -``` - -```go -type Config struct { - Val bool -} -``` - -### Datetime - -```toml -val = 2014-09-28T21:27:39Z -``` - -```go -type Config struct { - Val time.Time -} -``` - -### Array - -```toml -val = ["a", "b", "c"] -``` - -```go -type Config struct { - Val []string -} -``` - -Also following examples all can be mapped: - -```toml -val1 = [1, 2, 3] -val2 = [["a", "b"], ["c", "d"]] -val3 = [[1, 2, 3], ["a", "b", "c"]] -val4 = [[1, 2, 3], [["a", "b"], [true, false]]] -``` - -```go -type Config struct { - Val1 []int - Val2 [][]string - Val3 [][]interface{} - Val4 [][]interface{} -} -``` - -### Table - -```toml -[server] -type = "app" - - [server.development] - ip = "10.0.0.1" - - [server.production] - ip = "10.0.0.2" -``` - -```go -type Config struct { - Server map[string]Server -} - -type Server struct { - IP string -} -``` - -You can also use the following struct instead of map of struct. - -```go -type Config struct { - Server struct { - Development Server - Production Server - } -} - -type Server struct { - IP string -} -``` - -### Array of Tables - -```toml -[[fruit]] - name = "apple" - - [fruit.physical] - color = "red" - shape = "round" - - [[fruit.variety]] - name = "red delicious" - - [[fruit.variety]] - name = "granny smith" - -[[fruit]] - name = "banana" - - [[fruit.variety]] - name = "plantain" -``` - -```go -type Config struct { - Fruit []struct { - Name string - Physical struct { - Color string - Shape string - } - Variety []struct { - Name string - } - } -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Package toml supports `encoding.TextUnmarshaler` (and `encoding.TextMarshaler`). You can -use it to apply custom marshaling rules for certain types. The `UnmarshalText` method is -called with the value text found in the TOML input. TOML strings are passed unquoted. - -```toml -duration = "10s" -``` - -```go -import time - -type Duration time.Duration - -// UnmarshalText implements encoding.TextUnmarshaler -func (d *Duration) UnmarshalText(data []byte) error { - duration, err := time.ParseDuration(string(data)) - if err == nil { - *d = Duration(duration) - } - return err -} - -// MarshalText implements encoding.TextMarshaler -func (d Duration) MarshalText() ([]byte, error) { - return []byte(time.Duration(d).String()), nil -} - -type ConfigWithDuration struct { - Duration Duration -} -``` -### Using the `toml.UnmarshalerRec` interface - -You can also override marshaling rules specifically for TOML using the `UnmarshalerRec` -and `MarshalerRec` interfaces. These are useful if you want to control how structs or -arrays are handled. You can apply additional validation or set unexported struct fields. - -Note: `encoding.TextUnmarshaler` and `encoding.TextMarshaler` should be preferred for -simple (scalar) values because they're also compatible with other formats like JSON or -YAML. - -[See the UnmarshalerRec example](https://godoc.org/github.com/naoina/toml/#example_UnmarshalerRec). - -### Using the `toml.Unmarshaler` interface - -If you want to deal with raw TOML syntax, use the `Unmarshaler` and `Marshaler` -interfaces. Their input and output is raw TOML syntax. As such, these interfaces are -useful if you want to handle TOML at the syntax level. - -[See the Unmarshaler example](https://godoc.org/github.com/naoina/toml/#example_Unmarshaler). - -## API documentation - -See [Godoc](http://godoc.org/github.com/naoina/toml). - -## License - -MIT diff --git a/vendor/github.com/naoina/toml/ast/ast.go b/vendor/github.com/naoina/toml/ast/ast.go deleted file mode 100644 index 4868e2e1a..000000000 --- a/vendor/github.com/naoina/toml/ast/ast.go +++ /dev/null @@ -1,192 +0,0 @@ -package ast - -import ( - "strconv" - "strings" - "time" -) - -type Position struct { - Begin int - End int -} - -type Value interface { - Pos() int - End() int - Source() string -} - -type String struct { - Position Position - Value string - Data []rune -} - -func (s *String) Pos() int { - return s.Position.Begin -} - -func (s *String) End() int { - return s.Position.End -} - -func (s *String) Source() string { - return string(s.Data) -} - -type Integer struct { - Position Position - Value string - Data []rune -} - -func (i *Integer) Pos() int { - return i.Position.Begin -} - -func (i *Integer) End() int { - return i.Position.End -} - -func (i *Integer) Source() string { - return string(i.Data) -} - -func (i *Integer) Int() (int64, error) { - return strconv.ParseInt(i.Value, 10, 64) -} - -type Float struct { - Position Position - Value string - Data []rune -} - -func (f *Float) Pos() int { - return f.Position.Begin -} - -func (f *Float) End() int { - return f.Position.End -} - -func (f *Float) Source() string { - return string(f.Data) -} - -func (f *Float) Float() (float64, error) { - return strconv.ParseFloat(f.Value, 64) -} - -type Boolean struct { - Position Position - Value string - Data []rune -} - -func (b *Boolean) Pos() int { - return b.Position.Begin -} - -func (b *Boolean) End() int { - return b.Position.End -} - -func (b *Boolean) Source() string { - return string(b.Data) -} - -func (b *Boolean) Boolean() (bool, error) { - return strconv.ParseBool(b.Value) -} - -type Datetime struct { - Position Position - Value string - Data []rune -} - -func (d *Datetime) Pos() int { - return d.Position.Begin -} - -func (d *Datetime) End() int { - return d.Position.End -} - -func (d *Datetime) Source() string { - return string(d.Data) -} - -func (d *Datetime) Time() (time.Time, error) { - switch { - case !strings.Contains(d.Value, ":"): - return time.Parse("2006-01-02", d.Value) - case !strings.Contains(d.Value, "-"): - return time.Parse("15:04:05.999999999", d.Value) - default: - return time.Parse(time.RFC3339Nano, d.Value) - } -} - -type Array struct { - Position Position - Value []Value - Data []rune -} - -func (a *Array) Pos() int { - return a.Position.Begin -} - -func (a *Array) End() int { - return a.Position.End -} - -func (a *Array) Source() string { - return string(a.Data) -} - -type TableType uint8 - -const ( - TableTypeNormal TableType = iota - TableTypeArray -) - -var tableTypes = [...]string{ - "normal", - "array", -} - -func (t TableType) String() string { - return tableTypes[t] -} - -type Table struct { - Position Position - Line int - Name string - Fields map[string]interface{} - Type TableType - Data []rune -} - -func (t *Table) Pos() int { - return t.Position.Begin -} - -func (t *Table) End() int { - return t.Position.End -} - -func (t *Table) Source() string { - return string(t.Data) -} - -type KeyValue struct { - Key string - Value Value - Line int -} diff --git a/vendor/github.com/naoina/toml/config.go b/vendor/github.com/naoina/toml/config.go deleted file mode 100644 index 06bb9493b..000000000 --- a/vendor/github.com/naoina/toml/config.go +++ /dev/null @@ -1,86 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "reflect" - "strings" - - stringutil "github.com/naoina/go-stringutil" - "github.com/naoina/toml/ast" -) - -// Config contains options for encoding and decoding. -type Config struct { - // NormFieldName is used to match TOML keys to struct fields. The function runs for - // both input keys and struct field names and should return a string that makes the - // two match. You must set this field to use the decoder. - // - // Example: The function in the default config removes _ and lowercases all keys. This - // allows a key called 'api_key' to match the struct field 'APIKey' because both are - // normalized to 'apikey'. - // - // Note that NormFieldName is not used for fields which define a TOML - // key through the struct tag. - NormFieldName func(typ reflect.Type, keyOrField string) string - - // FieldToKey determines the TOML key of a struct field when encoding. - // You must set this field to use the encoder. - // - // Note that FieldToKey is not used for fields which define a TOML - // key through the struct tag. - FieldToKey func(typ reflect.Type, field string) string - - // MissingField, if non-nil, is called when the decoder encounters a key for which no - // matching struct field exists. The default behavior is to return an error. - MissingField func(typ reflect.Type, key string) error -} - -// DefaultConfig contains the default options for encoding and decoding. -// Snake case (i.e. 'foo_bar') is used for key names. -var DefaultConfig = Config{ - NormFieldName: defaultNormFieldName, - FieldToKey: snakeCase, -} - -func defaultNormFieldName(typ reflect.Type, s string) string { - return strings.Replace(strings.ToLower(s), "_", "", -1) -} - -func snakeCase(typ reflect.Type, s string) string { - return stringutil.ToSnakeCase(s) -} - -func defaultMissingField(typ reflect.Type, key string) error { - return fmt.Errorf("field corresponding to `%s' is not defined in %v", key, typ) -} - -// NewEncoder returns a new Encoder that writes to w. -// It is shorthand for DefaultConfig.NewEncoder(w). -func NewEncoder(w io.Writer) *Encoder { - return DefaultConfig.NewEncoder(w) -} - -// Marshal returns the TOML encoding of v. -// It is shorthand for DefaultConfig.Marshal(v). -func Marshal(v interface{}) ([]byte, error) { - return DefaultConfig.Marshal(v) -} - -// Unmarshal parses the TOML data and stores the result in the value pointed to by v. -// It is shorthand for DefaultConfig.Unmarshal(data, v). -func Unmarshal(data []byte, v interface{}) error { - return DefaultConfig.Unmarshal(data, v) -} - -// UnmarshalTable applies the contents of an ast.Table to the value pointed at by v. -// It is shorthand for DefaultConfig.UnmarshalTable(t, v). -func UnmarshalTable(t *ast.Table, v interface{}) error { - return DefaultConfig.UnmarshalTable(t, v) -} - -// NewDecoder returns a new Decoder that reads from r. -// It is shorthand for DefaultConfig.NewDecoder(r). -func NewDecoder(r io.Reader) *Decoder { - return DefaultConfig.NewDecoder(r) -} diff --git a/vendor/github.com/naoina/toml/decode.go b/vendor/github.com/naoina/toml/decode.go deleted file mode 100644 index b3c169eb1..000000000 --- a/vendor/github.com/naoina/toml/decode.go +++ /dev/null @@ -1,478 +0,0 @@ -// Package toml encodes and decodes the TOML configuration format using reflection. -// -// This library is compatible with TOML version v0.4.0. -package toml - -import ( - "encoding" - "fmt" - "io" - "io/ioutil" - "reflect" - "strconv" - "strings" - "time" - - "github.com/naoina/toml/ast" -) - -const ( - tableSeparator = '.' -) - -var ( - escapeReplacer = strings.NewReplacer( - "\b", "\\n", - "\f", "\\f", - "\n", "\\n", - "\r", "\\r", - "\t", "\\t", - ) - underscoreReplacer = strings.NewReplacer( - "_", "", - ) -) - -var timeType = reflect.TypeOf(time.Time{}) - -// Unmarshal parses the TOML data and stores the result in the value pointed to by v. -// -// Unmarshal will mapped to v that according to following rules: -// -// TOML strings to string -// TOML integers to any int type -// TOML floats to float32 or float64 -// TOML booleans to bool -// TOML datetimes to time.Time -// TOML arrays to any type of slice -// TOML tables to struct or map -// TOML array tables to slice of struct or map -func (cfg *Config) Unmarshal(data []byte, v interface{}) error { - table, err := Parse(data) - if err != nil { - return err - } - if err := cfg.UnmarshalTable(table, v); err != nil { - return err - } - return nil -} - -// A Decoder reads and decodes TOML from an input stream. -type Decoder struct { - r io.Reader - cfg *Config -} - -// NewDecoder returns a new Decoder that reads from r. -// Note that it reads all from r before parsing it. -func (cfg *Config) NewDecoder(r io.Reader) *Decoder { - return &Decoder{r, cfg} -} - -// Decode parses the TOML data from its input and stores it in the value pointed to by v. -// See the documentation for Unmarshal for details about the conversion of TOML into a Go value. -func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) - if err != nil { - return err - } - return d.cfg.Unmarshal(b, v) -} - -// UnmarshalerRec may be implemented by types to customize their behavior when being -// unmarshaled from TOML. You can use it to implement custom validation or to set -// unexported fields. -// -// UnmarshalTOML receives a function that can be called to unmarshal the original TOML -// value into a field or variable. It is safe to call the function more than once if -// necessary. -type UnmarshalerRec interface { - UnmarshalTOML(fn func(interface{}) error) error -} - -// Unmarshaler can be used to capture and process raw TOML source of a table or value. -// UnmarshalTOML must copy the input if it wishes to retain it after returning. -// -// Note: this interface is retained for backwards compatibility. You probably want -// to implement encoding.TextUnmarshaler or UnmarshalerRec instead. -type Unmarshaler interface { - UnmarshalTOML(input []byte) error -} - -// UnmarshalTable applies the contents of an ast.Table to the value pointed at by v. -// -// UnmarshalTable will mapped to v that according to following rules: -// -// TOML strings to string -// TOML integers to any int type -// TOML floats to float32 or float64 -// TOML booleans to bool -// TOML datetimes to time.Time -// TOML arrays to any type of slice -// TOML tables to struct or map -// TOML array tables to slice of struct or map -func (cfg *Config) UnmarshalTable(t *ast.Table, v interface{}) error { - rv := reflect.ValueOf(v) - toplevelMap := rv.Kind() == reflect.Map - if (!toplevelMap && rv.Kind() != reflect.Ptr) || rv.IsNil() { - return &invalidUnmarshalError{reflect.TypeOf(v)} - } - return unmarshalTable(cfg, rv, t, toplevelMap) -} - -// used for UnmarshalerRec. -func unmarshalTableOrValue(cfg *Config, rv reflect.Value, av interface{}) error { - if (rv.Kind() != reflect.Ptr && rv.Kind() != reflect.Map) || rv.IsNil() { - return &invalidUnmarshalError{rv.Type()} - } - rv = indirect(rv) - - switch av.(type) { - case *ast.KeyValue, *ast.Table, []*ast.Table: - if err := unmarshalField(cfg, rv, av); err != nil { - return lineError(fieldLineNumber(av), err) - } - return nil - case ast.Value: - return setValue(cfg, rv, av.(ast.Value)) - default: - panic(fmt.Sprintf("BUG: unhandled AST node type %T", av)) - } -} - -// unmarshalTable unmarshals the fields of a table into a struct or map. -// -// toplevelMap is true when rv is an (unadressable) map given to UnmarshalTable. In this -// (special) case, the map is used as-is instead of creating a new map. -func unmarshalTable(cfg *Config, rv reflect.Value, t *ast.Table, toplevelMap bool) error { - rv = indirect(rv) - if err, ok := setUnmarshaler(cfg, rv, t); ok { - return lineError(t.Line, err) - } - switch { - case rv.Kind() == reflect.Struct: - fc := makeFieldCache(cfg, rv.Type()) - for key, fieldAst := range t.Fields { - fv, fieldName, err := fc.findField(cfg, rv, key) - if err != nil { - return lineError(fieldLineNumber(fieldAst), err) - } - if fv.IsValid() { - if err := unmarshalField(cfg, fv, fieldAst); err != nil { - return lineErrorField(fieldLineNumber(fieldAst), rv.Type().String()+"."+fieldName, err) - } - } - } - case rv.Kind() == reflect.Map || isEface(rv): - m := rv - if !toplevelMap { - if rv.Kind() == reflect.Interface { - m = reflect.ValueOf(make(map[string]interface{})) - } else { - m = reflect.MakeMap(rv.Type()) - } - } - elemtyp := m.Type().Elem() - for key, fieldAst := range t.Fields { - kv, err := unmarshalMapKey(m.Type().Key(), key) - if err != nil { - return lineError(fieldLineNumber(fieldAst), err) - } - fv := reflect.New(elemtyp).Elem() - if err := unmarshalField(cfg, fv, fieldAst); err != nil { - return lineError(fieldLineNumber(fieldAst), err) - } - m.SetMapIndex(kv, fv) - } - if !toplevelMap { - rv.Set(m) - } - default: - return lineError(t.Line, &unmarshalTypeError{"table", "struct or map", rv.Type()}) - } - return nil -} - -func fieldLineNumber(fieldAst interface{}) int { - switch av := fieldAst.(type) { - case *ast.KeyValue: - return av.Line - case *ast.Table: - return av.Line - case []*ast.Table: - return av[0].Line - default: - panic(fmt.Sprintf("BUG: unhandled node type %T", fieldAst)) - } -} - -func unmarshalField(cfg *Config, rv reflect.Value, fieldAst interface{}) error { - switch av := fieldAst.(type) { - case *ast.KeyValue: - return setValue(cfg, rv, av.Value) - case *ast.Table: - return unmarshalTable(cfg, rv, av, false) - case []*ast.Table: - rv = indirect(rv) - if err, ok := setUnmarshaler(cfg, rv, fieldAst); ok { - return err - } - var slice reflect.Value - switch { - case rv.Kind() == reflect.Slice: - slice = reflect.MakeSlice(rv.Type(), len(av), len(av)) - case isEface(rv): - slice = reflect.ValueOf(make([]interface{}, len(av))) - default: - return &unmarshalTypeError{"array table", "slice", rv.Type()} - } - for i, tbl := range av { - vv := reflect.New(slice.Type().Elem()).Elem() - if err := unmarshalTable(cfg, vv, tbl, false); err != nil { - return err - } - slice.Index(i).Set(vv) - } - rv.Set(slice) - default: - panic(fmt.Sprintf("BUG: unhandled AST node type %T", av)) - } - return nil -} - -func unmarshalMapKey(typ reflect.Type, key string) (reflect.Value, error) { - rv := reflect.New(typ).Elem() - if u, ok := rv.Addr().Interface().(encoding.TextUnmarshaler); ok { - return rv, u.UnmarshalText([]byte(key)) - } - switch typ.Kind() { - case reflect.String: - rv.SetString(key) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - i, err := strconv.ParseInt(key, 10, int(typ.Size()*8)) - if err != nil { - return rv, convertNumError(typ.Kind(), err) - } - rv.SetInt(i) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - i, err := strconv.ParseUint(key, 10, int(typ.Size()*8)) - if err != nil { - return rv, convertNumError(typ.Kind(), err) - } - rv.SetUint(i) - default: - return rv, fmt.Errorf("invalid map key type %s", typ) - } - return rv, nil -} - -func setValue(cfg *Config, lhs reflect.Value, val ast.Value) error { - lhs = indirect(lhs) - if err, ok := setUnmarshaler(cfg, lhs, val); ok { - return err - } - if err, ok := setTextUnmarshaler(lhs, val); ok { - return err - } - switch v := val.(type) { - case *ast.Integer: - return setInt(lhs, v) - case *ast.Float: - return setFloat(lhs, v) - case *ast.String: - return setString(lhs, v) - case *ast.Boolean: - return setBoolean(lhs, v) - case *ast.Datetime: - return setDatetime(lhs, v) - case *ast.Array: - return setArray(cfg, lhs, v) - default: - panic(fmt.Sprintf("BUG: unhandled node type %T", v)) - } -} - -func indirect(rv reflect.Value) reflect.Value { - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - return rv -} - -func setUnmarshaler(cfg *Config, lhs reflect.Value, av interface{}) (error, bool) { - if lhs.CanAddr() { - if u, ok := lhs.Addr().Interface().(UnmarshalerRec); ok { - err := u.UnmarshalTOML(func(v interface{}) error { - return unmarshalTableOrValue(cfg, reflect.ValueOf(v), av) - }) - return err, true - } - if u, ok := lhs.Addr().Interface().(Unmarshaler); ok { - return u.UnmarshalTOML(unmarshalerSource(av)), true - } - } - return nil, false -} - -func unmarshalerSource(av interface{}) []byte { - var source []byte - switch av := av.(type) { - case []*ast.Table: - for i, tab := range av { - source = append(source, tab.Source()...) - if i != len(av)-1 { - source = append(source, '\n') - } - } - case ast.Value: - source = []byte(av.Source()) - default: - panic(fmt.Sprintf("BUG: unhandled node type %T", av)) - } - return source -} - -func setTextUnmarshaler(lhs reflect.Value, val ast.Value) (error, bool) { - if !lhs.CanAddr() { - return nil, false - } - u, ok := lhs.Addr().Interface().(encoding.TextUnmarshaler) - if !ok || lhs.Type() == timeType { - return nil, false - } - var data string - switch val := val.(type) { - case *ast.Array: - return &unmarshalTypeError{"array", "", lhs.Type()}, true - case *ast.String: - data = val.Value - default: - data = val.Source() - } - return u.UnmarshalText([]byte(data)), true -} - -func setInt(fv reflect.Value, v *ast.Integer) error { - k := fv.Kind() - switch { - case k >= reflect.Int && k <= reflect.Int64: - i, err := strconv.ParseInt(v.Value, 10, int(fv.Type().Size()*8)) - if err != nil { - return convertNumError(fv.Kind(), err) - } - fv.SetInt(i) - case k >= reflect.Uint && k <= reflect.Uintptr: - i, err := strconv.ParseUint(v.Value, 10, int(fv.Type().Size()*8)) - if err != nil { - return convertNumError(fv.Kind(), err) - } - fv.SetUint(i) - case isEface(fv): - i, err := strconv.ParseInt(v.Value, 10, 64) - if err != nil { - return convertNumError(reflect.Int64, err) - } - fv.Set(reflect.ValueOf(i)) - default: - return &unmarshalTypeError{"integer", "", fv.Type()} - } - return nil -} - -func setFloat(fv reflect.Value, v *ast.Float) error { - f, err := v.Float() - if err != nil { - return err - } - switch { - case fv.Kind() == reflect.Float32 || fv.Kind() == reflect.Float64: - if fv.OverflowFloat(f) { - return &overflowError{fv.Kind(), v.Value} - } - fv.SetFloat(f) - case isEface(fv): - fv.Set(reflect.ValueOf(f)) - default: - return &unmarshalTypeError{"float", "", fv.Type()} - } - return nil -} - -func setString(fv reflect.Value, v *ast.String) error { - switch { - case fv.Kind() == reflect.String: - fv.SetString(v.Value) - case isEface(fv): - fv.Set(reflect.ValueOf(v.Value)) - default: - return &unmarshalTypeError{"string", "", fv.Type()} - } - return nil -} - -func setBoolean(fv reflect.Value, v *ast.Boolean) error { - b, _ := v.Boolean() - switch { - case fv.Kind() == reflect.Bool: - fv.SetBool(b) - case isEface(fv): - fv.Set(reflect.ValueOf(b)) - default: - return &unmarshalTypeError{"boolean", "", fv.Type()} - } - return nil -} - -func setDatetime(rv reflect.Value, v *ast.Datetime) error { - t, err := v.Time() - if err != nil { - return err - } - if !timeType.AssignableTo(rv.Type()) { - return &unmarshalTypeError{"datetime", "", rv.Type()} - } - rv.Set(reflect.ValueOf(t)) - return nil -} - -func setArray(cfg *Config, rv reflect.Value, v *ast.Array) error { - var slicetyp reflect.Type - switch { - case rv.Kind() == reflect.Slice: - slicetyp = rv.Type() - case isEface(rv): - slicetyp = reflect.SliceOf(rv.Type()) - default: - return &unmarshalTypeError{"array", "slice", rv.Type()} - } - - if len(v.Value) == 0 { - // Ensure defined slices are always set to a non-nil value. - rv.Set(reflect.MakeSlice(slicetyp, 0, 0)) - return nil - } - - tomltyp := reflect.TypeOf(v.Value[0]) - slice := reflect.MakeSlice(slicetyp, len(v.Value), len(v.Value)) - typ := slicetyp.Elem() - for i, vv := range v.Value { - if i > 0 && tomltyp != reflect.TypeOf(vv) { - return errArrayMultiType - } - tmp := reflect.New(typ).Elem() - if err := setValue(cfg, tmp, vv); err != nil { - return err - } - slice.Index(i).Set(tmp) - } - rv.Set(slice) - return nil -} - -func isEface(rv reflect.Value) bool { - return rv.Kind() == reflect.Interface && rv.Type().NumMethod() == 0 -} diff --git a/vendor/github.com/naoina/toml/encode.go b/vendor/github.com/naoina/toml/encode.go deleted file mode 100644 index ae6bfd575..000000000 --- a/vendor/github.com/naoina/toml/encode.go +++ /dev/null @@ -1,398 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "time" - - "github.com/naoina/toml/ast" -) - -const ( - tagOmitempty = "omitempty" - tagSkip = "-" -) - -// Marshal returns the TOML encoding of v. -// -// Struct values encode as TOML. Each exported struct field becomes a field of -// the TOML structure unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// -// The "toml" key in the struct field's tag value is the key name, followed by -// an optional comma and options. Examples: -// -// // Field is ignored by this package. -// Field int `toml:"-"` -// -// // Field appears in TOML as key "myName". -// Field int `toml:"myName"` -// -// // Field appears in TOML as key "myName" and the field is omitted from the -// // result of encoding if its value is empty. -// Field int `toml:"myName,omitempty"` -// -// // Field appears in TOML as key "field", but the field is skipped if -// // empty. Note the leading comma. -// Field int `toml:",omitempty"` -func (cfg *Config) Marshal(v interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - err := cfg.NewEncoder(buf).Encode(v) - return buf.Bytes(), err -} - -// A Encoder writes TOML to an output stream. -type Encoder struct { - w io.Writer - cfg *Config -} - -// NewEncoder returns a new Encoder that writes to w. -func (cfg *Config) NewEncoder(w io.Writer) *Encoder { - return &Encoder{w, cfg} -} - -// Encode writes the TOML of v to the stream. -// See the documentation for Marshal for details about the conversion of Go values to TOML. -func (e *Encoder) Encode(v interface{}) error { - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return &marshalNilError{rv.Type()} - } - rv = rv.Elem() - } - buf := &tableBuf{typ: ast.TableTypeNormal} - var err error - switch rv.Kind() { - case reflect.Struct: - err = buf.structFields(e.cfg, rv) - case reflect.Map: - err = buf.mapFields(e.cfg, rv) - default: - err = &marshalTableError{rv.Type()} - } - if err != nil { - return err - } - return buf.writeTo(e.w, "") -} - -// Marshaler can be implemented to override the encoding of TOML values. The returned text -// must be a simple TOML value (i.e. not a table) and is inserted into marshaler output. -// -// This interface exists for backwards-compatibility reasons. You probably want to -// implement encoding.TextMarshaler or MarshalerRec instead. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// MarshalerRec can be implemented to override the TOML encoding of a type. -// The returned value is marshaled in place of the receiver. -type MarshalerRec interface { - MarshalTOML() (interface{}, error) -} - -type tableBuf struct { - name string // already escaped / quoted - body []byte - children []*tableBuf - typ ast.TableType - arrayDepth int -} - -func (b *tableBuf) writeTo(w io.Writer, prefix string) error { - key := b.name // TODO: escape dots - if prefix != "" { - key = prefix + "." + key - } - - if b.name != "" { - head := "[" + key + "]" - if b.typ == ast.TableTypeArray { - head = "[" + head + "]" - } - head += "\n" - if _, err := io.WriteString(w, head); err != nil { - return err - } - } - if _, err := w.Write(b.body); err != nil { - return err - } - - for i, child := range b.children { - if len(b.body) > 0 || i > 0 { - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - } - if err := child.writeTo(w, key); err != nil { - return err - } - } - return nil -} - -func (b *tableBuf) newChild(name string) *tableBuf { - child := &tableBuf{name: quoteName(name), typ: ast.TableTypeNormal} - if b.arrayDepth > 0 { - child.typ = ast.TableTypeArray - } - return child -} - -func (b *tableBuf) addChild(child *tableBuf) { - // Empty table elision: we can avoid writing a table that doesn't have any keys on its - // own. Array tables can't be elided because they define array elements (which would - // be missing if elided). - if len(child.body) == 0 && child.typ == ast.TableTypeNormal { - for _, gchild := range child.children { - gchild.name = child.name + "." + gchild.name - b.addChild(gchild) - } - return - } - b.children = append(b.children, child) -} - -func (b *tableBuf) structFields(cfg *Config, rv reflect.Value) error { - rt := rv.Type() - for i := 0; i < rv.NumField(); i++ { - ft := rt.Field(i) - if ft.PkgPath != "" && !ft.Anonymous { // not exported - continue - } - name, rest := extractTag(ft.Tag.Get(fieldTagName)) - if name == tagSkip { - continue - } - fv := rv.Field(i) - if rest == tagOmitempty && isEmptyValue(fv) { - continue - } - if name == "" { - name = cfg.FieldToKey(rt, ft.Name) - } - if err := b.field(cfg, name, fv); err != nil { - return err - } - } - return nil -} - -type mapKeyList []struct { - key string - value reflect.Value -} - -func (l mapKeyList) Len() int { return len(l) } -func (l mapKeyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l mapKeyList) Less(i, j int) bool { return l[i].key < l[j].key } - -func (b *tableBuf) mapFields(cfg *Config, rv reflect.Value) error { - keys := rv.MapKeys() - keylist := make(mapKeyList, len(keys)) - for i, key := range keys { - var err error - keylist[i].key, err = encodeMapKey(key) - if err != nil { - return err - } - keylist[i].value = rv.MapIndex(key) - } - sort.Sort(keylist) - - for _, kv := range keylist { - if err := b.field(cfg, kv.key, kv.value); err != nil { - return err - } - } - return nil -} - -func (b *tableBuf) field(cfg *Config, name string, rv reflect.Value) error { - off := len(b.body) - b.body = append(b.body, quoteName(name)...) - b.body = append(b.body, " = "...) - isTable, err := b.value(cfg, rv, name) - if isTable { - b.body = b.body[:off] // rub out "key =" - } else { - b.body = append(b.body, '\n') - } - return err -} - -func (b *tableBuf) value(cfg *Config, rv reflect.Value, name string) (bool, error) { - isMarshaler, isTable, err := b.marshaler(cfg, rv, name) - if isMarshaler { - return isTable, err - } - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - b.body = strconv.AppendInt(b.body, rv.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - b.body = strconv.AppendUint(b.body, rv.Uint(), 10) - case reflect.Float32, reflect.Float64: - b.body = strconv.AppendFloat(b.body, rv.Float(), 'e', -1, 64) - case reflect.Bool: - b.body = strconv.AppendBool(b.body, rv.Bool()) - case reflect.String: - b.body = strconv.AppendQuote(b.body, rv.String()) - case reflect.Ptr, reflect.Interface: - if rv.IsNil() { - return false, &marshalNilError{rv.Type()} - } - return b.value(cfg, rv.Elem(), name) - case reflect.Slice, reflect.Array: - rvlen := rv.Len() - if rvlen == 0 { - b.body = append(b.body, '[', ']') - return false, nil - } - - b.arrayDepth++ - wroteElem := false - b.body = append(b.body, '[') - for i := 0; i < rvlen; i++ { - isTable, err := b.value(cfg, rv.Index(i), name) - if err != nil { - return isTable, err - } - wroteElem = wroteElem || !isTable - if wroteElem { - if i < rvlen-1 { - b.body = append(b.body, ',', ' ') - } else { - b.body = append(b.body, ']') - } - } - } - if !wroteElem { - b.body = b.body[:len(b.body)-1] // rub out '[' - } - b.arrayDepth-- - return !wroteElem, nil - case reflect.Struct: - child := b.newChild(name) - err := child.structFields(cfg, rv) - b.addChild(child) - return true, err - case reflect.Map: - child := b.newChild(name) - err := child.mapFields(cfg, rv) - b.addChild(child) - return true, err - default: - return false, fmt.Errorf("toml: marshal: unsupported type %v", rv.Kind()) - } - return false, nil -} - -func (b *tableBuf) marshaler(cfg *Config, rv reflect.Value, name string) (handled, isTable bool, err error) { - switch t := rv.Interface().(type) { - case encoding.TextMarshaler: - enc, err := t.MarshalText() - if err != nil { - return true, false, err - } - b.body = encodeTextMarshaler(b.body, string(enc)) - return true, false, nil - case MarshalerRec: - newval, err := t.MarshalTOML() - if err != nil { - return true, false, err - } - isTable, err = b.value(cfg, reflect.ValueOf(newval), name) - return true, isTable, err - case Marshaler: - enc, err := t.MarshalTOML() - if err != nil { - return true, false, err - } - b.body = append(b.body, enc...) - return true, false, nil - } - return false, false, nil -} - -func encodeTextMarshaler(buf []byte, v string) []byte { - // Emit the value without quotes if possible. - if v == "true" || v == "false" { - return append(buf, v...) - } else if _, err := time.Parse(time.RFC3339Nano, v); err == nil { - return append(buf, v...) - } else if _, err := strconv.ParseInt(v, 10, 64); err == nil { - return append(buf, v...) - } else if _, err := strconv.ParseUint(v, 10, 64); err == nil { - return append(buf, v...) - } else if _, err := strconv.ParseFloat(v, 64); err == nil { - return append(buf, v...) - } - return strconv.AppendQuote(buf, v) -} - -func encodeMapKey(rv reflect.Value) (string, error) { - if rv.Kind() == reflect.String { - return rv.String(), nil - } - if tm, ok := rv.Interface().(encoding.TextMarshaler); ok { - b, err := tm.MarshalText() - return string(b), err - } - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(rv.Int(), 10), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(rv.Uint(), 10), nil - } - return "", fmt.Errorf("toml: invalid map key type %v", rv.Type()) -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array: - // encoding/json treats all arrays with non-zero length as non-empty. We check the - // array content here because zero-length arrays are almost never used. - len := v.Len() - for i := 0; i < len; i++ { - if !isEmptyValue(v.Index(i)) { - return false - } - } - return true - case reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func quoteName(s string) string { - if len(s) == 0 { - return strconv.Quote(s) - } - for _, r := range s { - if r >= '0' && r <= '9' || r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' || r == '-' || r == '_' { - continue - } - return strconv.Quote(s) - } - return s -} diff --git a/vendor/github.com/naoina/toml/error.go b/vendor/github.com/naoina/toml/error.go deleted file mode 100644 index cb73b5e0a..000000000 --- a/vendor/github.com/naoina/toml/error.go +++ /dev/null @@ -1,107 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "reflect" - "strconv" -) - -var ( - errArrayMultiType = errors.New("array can't contain multiple types") -) - -// LineError is returned by Unmarshal, UnmarshalTable and Parse -// if the error is local to a line. -type LineError struct { - Line int - StructField string - Err error -} - -func (err *LineError) Error() string { - field := "" - if err.StructField != "" { - field = "(" + err.StructField + ") " - } - return fmt.Sprintf("line %d: %s%v", err.Line, field, err.Err) -} - -func lineError(line int, err error) error { - if err == nil { - return nil - } - if _, ok := err.(*LineError); ok { - return err - } - return &LineError{Line: line, Err: err} -} - -func lineErrorField(line int, field string, err error) error { - if lerr, ok := err.(*LineError); ok { - return lerr - } else if err != nil { - err = &LineError{Line: line, StructField: field, Err: err} - } - return err -} - -type overflowError struct { - kind reflect.Kind - v string -} - -func (err *overflowError) Error() string { - return fmt.Sprintf("value %s is out of range for %v", err.v, err.kind) -} - -func convertNumError(kind reflect.Kind, err error) error { - if numerr, ok := err.(*strconv.NumError); ok && numerr.Err == strconv.ErrRange { - return &overflowError{kind, numerr.Num} - } - return err -} - -type invalidUnmarshalError struct { - typ reflect.Type -} - -func (err *invalidUnmarshalError) Error() string { - if err.typ == nil { - return "toml: Unmarshal(nil)" - } - if err.typ.Kind() != reflect.Ptr { - return "toml: Unmarshal(non-pointer " + err.typ.String() + ")" - } - return "toml: Unmarshal(nil " + err.typ.String() + ")" -} - -type unmarshalTypeError struct { - what string - want string - typ reflect.Type -} - -func (err *unmarshalTypeError) Error() string { - msg := fmt.Sprintf("cannot unmarshal TOML %s into %s", err.what, err.typ) - if err.want != "" { - msg += " (need " + err.want + ")" - } - return msg -} - -type marshalNilError struct { - typ reflect.Type -} - -func (err *marshalNilError) Error() string { - return fmt.Sprintf("toml: cannot marshal nil %s", err.typ) -} - -type marshalTableError struct { - typ reflect.Type -} - -func (err *marshalTableError) Error() string { - return fmt.Sprintf("toml: cannot marshal %s as table, want struct or map type", err.typ) -} diff --git a/vendor/github.com/naoina/toml/parse.go b/vendor/github.com/naoina/toml/parse.go deleted file mode 100644 index de9108566..000000000 --- a/vendor/github.com/naoina/toml/parse.go +++ /dev/null @@ -1,362 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "strconv" - "strings" - - "github.com/naoina/toml/ast" -) - -// The parser is generated by github.com/pointlander/peg. To regenerate it, do: -// -// go get -u github.com/pointlander/peg -// go generate . - -//go:generate peg -switch -inline parse.peg - -var errParse = errors.New("invalid TOML syntax") - -// Parse returns an AST representation of TOML. -// The toplevel is represented by a table. -func Parse(data []byte) (*ast.Table, error) { - d := &parseState{p: &tomlParser{Buffer: string(data)}} - d.init() - - if err := d.parse(); err != nil { - return nil, err - } - - return d.p.toml.table, nil -} - -type parseState struct { - p *tomlParser -} - -func (d *parseState) init() { - d.p.Init() - d.p.toml.init(d.p.buffer) -} - -func (d *parseState) parse() error { - if err := d.p.Parse(); err != nil { - if err, ok := err.(*parseError); ok { - return lineError(err.Line(), errParse) - } - return err - } - return d.execute() -} - -func (d *parseState) execute() (err error) { - defer func() { - if e := recover(); e != nil { - lerr, ok := e.(*LineError) - if !ok { - panic(e) - } - err = lerr - } - }() - d.p.Execute() - return nil -} - -func (e *parseError) Line() int { - tokens := []token32{e.max} - positions, p := make([]int, 2*len(tokens)), 0 - for _, token := range tokens { - positions[p], p = int(token.begin), p+1 - positions[p], p = int(token.end), p+1 - } - for _, t := range translatePositions(e.p.buffer, positions) { - if e.p.line < t.line { - e.p.line = t.line - } - } - return e.p.line -} - -type stack struct { - key string - table *ast.Table -} - -type array struct { - parent *array - child *array - current *ast.Array - line int -} - -type toml struct { - table *ast.Table - line int - currentTable *ast.Table - s string - key string - tableKeys []string - val ast.Value - arr *array - stack []*stack - skip bool -} - -func (p *toml) init(data []rune) { - p.line = 1 - p.table = p.newTable(ast.TableTypeNormal, "") - p.table.Position.End = len(data) - 1 - p.table.Data = data[:len(data)-1] // truncate the end_symbol added by PEG parse generator. - p.currentTable = p.table -} - -func (p *toml) Error(err error) { - panic(lineError(p.line, err)) -} - -func (p *tomlParser) SetTime(begin, end int) { - p.val = &ast.Datetime{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: string(p.buffer[begin:end]), - } -} - -func (p *tomlParser) SetFloat64(begin, end int) { - p.val = &ast.Float{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: underscoreReplacer.Replace(string(p.buffer[begin:end])), - } -} - -func (p *tomlParser) SetInt64(begin, end int) { - p.val = &ast.Integer{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: underscoreReplacer.Replace(string(p.buffer[begin:end])), - } -} - -func (p *tomlParser) SetString(begin, end int) { - p.val = &ast.String{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: p.s, - } - p.s = "" -} - -func (p *tomlParser) SetBool(begin, end int) { - p.val = &ast.Boolean{ - Position: ast.Position{Begin: begin, End: end}, - Data: p.buffer[begin:end], - Value: string(p.buffer[begin:end]), - } -} - -func (p *tomlParser) StartArray() { - if p.arr == nil { - p.arr = &array{line: p.line, current: &ast.Array{}} - return - } - p.arr.child = &array{parent: p.arr, line: p.line, current: &ast.Array{}} - p.arr = p.arr.child -} - -func (p *tomlParser) AddArrayVal() { - if p.arr.current == nil { - p.arr.current = &ast.Array{} - } - p.arr.current.Value = append(p.arr.current.Value, p.val) -} - -func (p *tomlParser) SetArray(begin, end int) { - p.arr.current.Position = ast.Position{Begin: begin, End: end} - p.arr.current.Data = p.buffer[begin:end] - p.val = p.arr.current - p.arr = p.arr.parent -} - -func (p *toml) SetTable(buf []rune, begin, end int) { - rawName := string(buf[begin:end]) - p.setTable(p.table, rawName, p.tableKeys) - p.tableKeys = nil -} - -func (p *toml) setTable(parent *ast.Table, name string, names []string) { - parent, err := p.lookupTable(parent, names[:len(names)-1]) - if err != nil { - p.Error(err) - } - last := names[len(names)-1] - tbl := p.newTable(ast.TableTypeNormal, last) - switch v := parent.Fields[last].(type) { - case nil: - parent.Fields[last] = tbl - case []*ast.Table: - p.Error(fmt.Errorf("table `%s' is in conflict with array table in line %d", name, v[0].Line)) - case *ast.Table: - if (v.Position == ast.Position{}) { - // This table was created as an implicit parent. - // Replace it with the real defined table. - tbl.Fields = v.Fields - parent.Fields[last] = tbl - } else { - p.Error(fmt.Errorf("table `%s' is in conflict with table in line %d", name, v.Line)) - } - case *ast.KeyValue: - p.Error(fmt.Errorf("table `%s' is in conflict with line %d", name, v.Line)) - default: - p.Error(fmt.Errorf("BUG: table `%s' is in conflict but it's unknown type `%T'", last, v)) - } - p.currentTable = tbl -} - -func (p *toml) newTable(typ ast.TableType, name string) *ast.Table { - return &ast.Table{ - Line: p.line, - Name: name, - Type: typ, - Fields: make(map[string]interface{}), - } -} - -func (p *tomlParser) SetTableString(begin, end int) { - p.currentTable.Data = p.buffer[begin:end] - p.currentTable.Position.Begin = begin - p.currentTable.Position.End = end -} - -func (p *toml) SetArrayTable(buf []rune, begin, end int) { - rawName := string(buf[begin:end]) - p.setArrayTable(p.table, rawName, p.tableKeys) - p.tableKeys = nil -} - -func (p *toml) setArrayTable(parent *ast.Table, name string, names []string) { - parent, err := p.lookupTable(parent, names[:len(names)-1]) - if err != nil { - p.Error(err) - } - last := names[len(names)-1] - tbl := p.newTable(ast.TableTypeArray, last) - switch v := parent.Fields[last].(type) { - case nil: - parent.Fields[last] = []*ast.Table{tbl} - case []*ast.Table: - parent.Fields[last] = append(v, tbl) - case *ast.Table: - p.Error(fmt.Errorf("array table `%s' is in conflict with table in line %d", name, v.Line)) - case *ast.KeyValue: - p.Error(fmt.Errorf("array table `%s' is in conflict with line %d", name, v.Line)) - default: - p.Error(fmt.Errorf("BUG: array table `%s' is in conflict but it's unknown type `%T'", name, v)) - } - p.currentTable = tbl -} - -func (p *toml) StartInlineTable() { - p.skip = false - p.stack = append(p.stack, &stack{p.key, p.currentTable}) - names := []string{p.key} - if p.arr == nil { - p.setTable(p.currentTable, names[0], names) - } else { - p.setArrayTable(p.currentTable, names[0], names) - } -} - -func (p *toml) EndInlineTable() { - st := p.stack[len(p.stack)-1] - p.key, p.currentTable = st.key, st.table - p.stack[len(p.stack)-1] = nil - p.stack = p.stack[:len(p.stack)-1] - p.skip = true -} - -func (p *toml) AddLineCount(i int) { - p.line += i -} - -func (p *toml) SetKey(buf []rune, begin, end int) { - p.key = string(buf[begin:end]) - if len(p.key) > 0 && p.key[0] == '"' { - p.key = p.unquote(p.key) - } -} - -func (p *toml) AddTableKey() { - p.tableKeys = append(p.tableKeys, p.key) -} - -func (p *toml) AddKeyValue() { - if p.skip { - p.skip = false - return - } - if val, exists := p.currentTable.Fields[p.key]; exists { - switch v := val.(type) { - case *ast.Table: - p.Error(fmt.Errorf("key `%s' is in conflict with table in line %d", p.key, v.Line)) - case *ast.KeyValue: - p.Error(fmt.Errorf("key `%s' is in conflict with line %xd", p.key, v.Line)) - default: - p.Error(fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", p.key, v)) - } - } - p.currentTable.Fields[p.key] = &ast.KeyValue{Key: p.key, Value: p.val, Line: p.line} -} - -func (p *toml) SetBasicString(buf []rune, begin, end int) { - p.s = p.unquote(string(buf[begin:end])) -} - -func (p *toml) SetMultilineString() { - p.s = p.unquote(`"` + escapeReplacer.Replace(strings.TrimLeft(p.s, "\r\n")) + `"`) -} - -func (p *toml) AddMultilineBasicBody(buf []rune, begin, end int) { - p.s += string(buf[begin:end]) -} - -func (p *toml) SetLiteralString(buf []rune, begin, end int) { - p.s = string(buf[begin:end]) -} - -func (p *toml) SetMultilineLiteralString(buf []rune, begin, end int) { - p.s = strings.TrimLeft(string(buf[begin:end]), "\r\n") -} - -func (p *toml) unquote(s string) string { - s, err := strconv.Unquote(s) - if err != nil { - p.Error(err) - } - return s -} - -func (p *toml) lookupTable(t *ast.Table, keys []string) (*ast.Table, error) { - for _, s := range keys { - val, exists := t.Fields[s] - if !exists { - tbl := p.newTable(ast.TableTypeNormal, s) - t.Fields[s] = tbl - t = tbl - continue - } - switch v := val.(type) { - case *ast.Table: - t = v - case []*ast.Table: - t = v[len(v)-1] - case *ast.KeyValue: - return nil, fmt.Errorf("key `%s' is in conflict with line %d", s, v.Line) - default: - return nil, fmt.Errorf("BUG: key `%s' is in conflict but it's unknown type `%T'", s, v) - } - } - return t, nil -} diff --git a/vendor/github.com/naoina/toml/parse.peg b/vendor/github.com/naoina/toml/parse.peg deleted file mode 100644 index dd9eb8880..000000000 --- a/vendor/github.com/naoina/toml/parse.peg +++ /dev/null @@ -1,147 +0,0 @@ -package toml - -type tomlParser Peg { - toml -} - -TOML <- Expression (newline Expression)* newline? !. { _ = buffer } - -Expression <- ( - { p.SetTableString(begin, end) } - / ws keyval ws comment? - / ws comment? - / ws -) - -newline <- <[\r\n]+> { p.AddLineCount(end - begin) } - -ws <- [ \t]* -wsnl <- ( - [ \t] - / <[\r\n]> { p.AddLineCount(end - begin) } -)* - -comment <- '#' <[\t -\0x10FFFF]*> - -keyval <- key ws '=' ws val { p.AddKeyValue() } - -key <- bareKey / quotedKey - -bareKey <- <[0-9A-Za-z\-_]+> { p.SetKey(p.buffer, begin, end) } - -quotedKey <- < '"' basicChar* '"' > { p.SetKey(p.buffer, begin, end) } - -val <- ( - { p.SetTime(begin, end) } - / { p.SetFloat64(begin, end) } - / { p.SetInt64(begin, end) } - / { p.SetString(begin, end) } - / { p.SetBool(begin, end) } - / { p.SetArray(begin, end) } - / inlineTable -) - -table <- stdTable / arrayTable - -stdTable <- '[' ws ws ']' { p.SetTable(p.buffer, begin, end) } - -arrayTable <- '[[' ws ws ']]' { p.SetArrayTable(p.buffer, begin, end) } - -inlineTable <- ( - '{' { p.StartInlineTable() } - ws inlineTableKeyValues ws - '}' { p.EndInlineTable() } -) - -inlineTableKeyValues <- (keyval inlineTableValSep?)* - -tableKey <- tableKeyComp (tableKeySep tableKeyComp)* - -tableKeyComp <- key { p.AddTableKey() } - -tableKeySep <- ws '.' ws - -inlineTableValSep <- ws ',' ws - -integer <- [\-+]? int -int <- [1-9] (digit / '_' digit)+ / digit - -float <- integer (frac exp? / frac? exp) -frac <- '.' digit (digit / '_' digit)* -exp <- [eE] [\-+]? digit (digit / '_' digit)* - -string <- ( - mlLiteralString - / literalString - / mlBasicString - / basicString -) - -basicString <- <'"' basicChar* '"'> { p.SetBasicString(p.buffer, begin, end) } - -basicChar <- basicUnescaped / escaped -escaped <- escape ([btnfr"/\\] / 'u' hexQuad / 'U' hexQuad hexQuad) - -basicUnescaped <- [ -!#-\[\]-\0x10FFFF] - -escape <- '\\' - -mlBasicString <- '"""' mlBasicBody '"""' { p.SetMultilineString() } - -mlBasicBody <- ( - { p.AddMultilineBasicBody(p.buffer, begin, end) } - / escape newline wsnl -)* - -literalString <- "'" "'" { p.SetLiteralString(p.buffer, begin, end) } - -literalChar <- [\t -&(-\0x10FFFF] - -mlLiteralString <- "'''" "'''" { p.SetMultilineLiteralString(p.buffer, begin, end) } - -mlLiteralBody <- (!"'''" (mlLiteralChar / newline))* - -mlLiteralChar <- [\t -\0x10FFFF] - -hexdigit <- [0-9A-Fa-f] -hexQuad <- hexdigit hexdigit hexdigit hexdigit - -boolean <- 'true' / 'false' - -dateFullYear <- digitQuad -dateMonth <- digitDual -dateMDay <- digitDual -timeHour <- digitDual -timeMinute <- digitDual -timeSecond <- digitDual -timeSecfrac <- '.' digit+ -timeNumoffset <- [\-+] timeHour ':' timeMinute -timeOffset <- 'Z' / timeNumoffset -partialTime <- timeHour ':' timeMinute ':' timeSecond timeSecfrac? -fullDate <- dateFullYear '-' dateMonth '-' dateMDay -fullTime <- partialTime timeOffset -datetime <- (fullDate ('T' fullTime)?) / partialTime - -digit <- [0-9] -digitDual <- digit digit -digitQuad <- digitDual digitDual - -array <- ( - '[' { p.StartArray() } - wsnl arrayValues? wsnl - ']' -) - -arrayValues <- ( - val { p.AddArrayVal() } - ( - wsnl comment? - wsnl arraySep - wsnl comment? - wsnl val { p.AddArrayVal() } - )* - wsnl arraySep? - wsnl comment? -) - -arraySep <- ',' diff --git a/vendor/github.com/naoina/toml/parse.peg.go b/vendor/github.com/naoina/toml/parse.peg.go deleted file mode 100644 index d768b00a8..000000000 --- a/vendor/github.com/naoina/toml/parse.peg.go +++ /dev/null @@ -1,2579 +0,0 @@ -package toml - -import ( - "fmt" - "math" - "sort" - "strconv" -) - -const endSymbol rune = 1114112 - -/* The rule types inferred from the grammar are below. */ -type pegRule uint8 - -const ( - ruleUnknown pegRule = iota - ruleTOML - ruleExpression - rulenewline - rulews - rulewsnl - rulecomment - rulekeyval - rulekey - rulebareKey - rulequotedKey - ruleval - ruletable - rulestdTable - rulearrayTable - ruleinlineTable - ruleinlineTableKeyValues - ruletableKey - ruletableKeyComp - ruletableKeySep - ruleinlineTableValSep - ruleinteger - ruleint - rulefloat - rulefrac - ruleexp - rulestring - rulebasicString - rulebasicChar - ruleescaped - rulebasicUnescaped - ruleescape - rulemlBasicString - rulemlBasicBody - ruleliteralString - ruleliteralChar - rulemlLiteralString - rulemlLiteralBody - rulemlLiteralChar - rulehexdigit - rulehexQuad - ruleboolean - ruledateFullYear - ruledateMonth - ruledateMDay - ruletimeHour - ruletimeMinute - ruletimeSecond - ruletimeSecfrac - ruletimeNumoffset - ruletimeOffset - rulepartialTime - rulefullDate - rulefullTime - ruledatetime - ruledigit - ruledigitDual - ruledigitQuad - rulearray - rulearrayValues - rulearraySep - ruleAction0 - rulePegText - ruleAction1 - ruleAction2 - ruleAction3 - ruleAction4 - ruleAction5 - ruleAction6 - ruleAction7 - ruleAction8 - ruleAction9 - ruleAction10 - ruleAction11 - ruleAction12 - ruleAction13 - ruleAction14 - ruleAction15 - ruleAction16 - ruleAction17 - ruleAction18 - ruleAction19 - ruleAction20 - ruleAction21 - ruleAction22 - ruleAction23 - ruleAction24 - ruleAction25 -) - -var rul3s = [...]string{ - "Unknown", - "TOML", - "Expression", - "newline", - "ws", - "wsnl", - "comment", - "keyval", - "key", - "bareKey", - "quotedKey", - "val", - "table", - "stdTable", - "arrayTable", - "inlineTable", - "inlineTableKeyValues", - "tableKey", - "tableKeyComp", - "tableKeySep", - "inlineTableValSep", - "integer", - "int", - "float", - "frac", - "exp", - "string", - "basicString", - "basicChar", - "escaped", - "basicUnescaped", - "escape", - "mlBasicString", - "mlBasicBody", - "literalString", - "literalChar", - "mlLiteralString", - "mlLiteralBody", - "mlLiteralChar", - "hexdigit", - "hexQuad", - "boolean", - "dateFullYear", - "dateMonth", - "dateMDay", - "timeHour", - "timeMinute", - "timeSecond", - "timeSecfrac", - "timeNumoffset", - "timeOffset", - "partialTime", - "fullDate", - "fullTime", - "datetime", - "digit", - "digitDual", - "digitQuad", - "array", - "arrayValues", - "arraySep", - "Action0", - "PegText", - "Action1", - "Action2", - "Action3", - "Action4", - "Action5", - "Action6", - "Action7", - "Action8", - "Action9", - "Action10", - "Action11", - "Action12", - "Action13", - "Action14", - "Action15", - "Action16", - "Action17", - "Action18", - "Action19", - "Action20", - "Action21", - "Action22", - "Action23", - "Action24", - "Action25", -} - -type token32 struct { - pegRule - begin, end uint32 -} - -func (t *token32) String() string { - return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end) -} - -type node32 struct { - token32 - up, next *node32 -} - -func (node *node32) print(pretty bool, buffer string) { - var print func(node *node32, depth int) - print = func(node *node32, depth int) { - for node != nil { - for c := 0; c < depth; c++ { - fmt.Printf(" ") - } - rule := rul3s[node.pegRule] - quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end]))) - if !pretty { - fmt.Printf("%v %v\n", rule, quote) - } else { - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote) - } - if node.up != nil { - print(node.up, depth+1) - } - node = node.next - } - } - print(node, 0) -} - -func (node *node32) Print(buffer string) { - node.print(false, buffer) -} - -func (node *node32) PrettyPrint(buffer string) { - node.print(true, buffer) -} - -type tokens32 struct { - tree []token32 -} - -func (t *tokens32) Trim(length uint32) { - t.tree = t.tree[:length] -} - -func (t *tokens32) Print() { - for _, token := range t.tree { - fmt.Println(token.String()) - } -} - -func (t *tokens32) AST() *node32 { - type element struct { - node *node32 - down *element - } - tokens := t.Tokens() - var stack *element - for _, token := range tokens { - if token.begin == token.end { - continue - } - node := &node32{token32: token} - for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { - stack.node.next = node.up - node.up = stack.node - stack = stack.down - } - stack = &element{node: node, down: stack} - } - if stack != nil { - return stack.node - } - return nil -} - -func (t *tokens32) PrintSyntaxTree(buffer string) { - t.AST().Print(buffer) -} - -func (t *tokens32) PrettyPrintSyntaxTree(buffer string) { - t.AST().PrettyPrint(buffer) -} - -func (t *tokens32) Add(rule pegRule, begin, end, index uint32) { - if tree := t.tree; int(index) >= len(tree) { - expanded := make([]token32, 2*len(tree)) - copy(expanded, tree) - t.tree = expanded - } - t.tree[index] = token32{ - pegRule: rule, - begin: begin, - end: end, - } -} - -func (t *tokens32) Tokens() []token32 { - return t.tree -} - -type tomlParser struct { - toml - - Buffer string - buffer []rune - rules [88]func() bool - parse func(rule ...int) error - reset func() - Pretty bool - tokens32 -} - -func (p *tomlParser) Parse(rule ...int) error { - return p.parse(rule...) -} - -func (p *tomlParser) Reset() { - p.reset() -} - -type textPosition struct { - line, symbol int -} - -type textPositionMap map[int]textPosition - -func translatePositions(buffer []rune, positions []int) textPositionMap { - length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 - sort.Ints(positions) - -search: - for i, c := range buffer { - if c == '\n' { - line, symbol = line+1, 0 - } else { - symbol++ - } - if i == positions[j] { - translations[positions[j]] = textPosition{line, symbol} - for j++; j < length; j++ { - if i != positions[j] { - continue search - } - } - break search - } - } - - return translations -} - -type parseError struct { - p *tomlParser - max token32 -} - -func (e *parseError) Error() string { - tokens, error := []token32{e.max}, "\n" - positions, p := make([]int, 2*len(tokens)), 0 - for _, token := range tokens { - positions[p], p = int(token.begin), p+1 - positions[p], p = int(token.end), p+1 - } - translations := translatePositions(e.p.buffer, positions) - format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" - if e.p.Pretty { - format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" - } - for _, token := range tokens { - begin, end := int(token.begin), int(token.end) - error += fmt.Sprintf(format, - rul3s[token.pegRule], - translations[begin].line, translations[begin].symbol, - translations[end].line, translations[end].symbol, - strconv.Quote(string(e.p.buffer[begin:end]))) - } - - return error -} - -func (p *tomlParser) PrintSyntaxTree() { - if p.Pretty { - p.tokens32.PrettyPrintSyntaxTree(p.Buffer) - } else { - p.tokens32.PrintSyntaxTree(p.Buffer) - } -} - -func (p *tomlParser) Execute() { - buffer, _buffer, text, begin, end := p.Buffer, p.buffer, "", 0, 0 - for _, token := range p.Tokens() { - switch token.pegRule { - - case rulePegText: - begin, end = int(token.begin), int(token.end) - text = string(_buffer[begin:end]) - - case ruleAction0: - _ = buffer - case ruleAction1: - p.SetTableString(begin, end) - case ruleAction2: - p.AddLineCount(end - begin) - case ruleAction3: - p.AddLineCount(end - begin) - case ruleAction4: - p.AddKeyValue() - case ruleAction5: - p.SetKey(p.buffer, begin, end) - case ruleAction6: - p.SetKey(p.buffer, begin, end) - case ruleAction7: - p.SetTime(begin, end) - case ruleAction8: - p.SetFloat64(begin, end) - case ruleAction9: - p.SetInt64(begin, end) - case ruleAction10: - p.SetString(begin, end) - case ruleAction11: - p.SetBool(begin, end) - case ruleAction12: - p.SetArray(begin, end) - case ruleAction13: - p.SetTable(p.buffer, begin, end) - case ruleAction14: - p.SetArrayTable(p.buffer, begin, end) - case ruleAction15: - p.StartInlineTable() - case ruleAction16: - p.EndInlineTable() - case ruleAction17: - p.AddTableKey() - case ruleAction18: - p.SetBasicString(p.buffer, begin, end) - case ruleAction19: - p.SetMultilineString() - case ruleAction20: - p.AddMultilineBasicBody(p.buffer, begin, end) - case ruleAction21: - p.SetLiteralString(p.buffer, begin, end) - case ruleAction22: - p.SetMultilineLiteralString(p.buffer, begin, end) - case ruleAction23: - p.StartArray() - case ruleAction24: - p.AddArrayVal() - case ruleAction25: - p.AddArrayVal() - - } - } - _, _, _, _, _ = buffer, _buffer, text, begin, end -} - -func (p *tomlParser) Init() { - var ( - max token32 - position, tokenIndex uint32 - buffer []rune - ) - p.reset = func() { - max = token32{} - position, tokenIndex = 0, 0 - - p.buffer = []rune(p.Buffer) - if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { - p.buffer = append(p.buffer, endSymbol) - } - buffer = p.buffer - } - p.reset() - - _rules := p.rules - tree := tokens32{tree: make([]token32, math.MaxInt16)} - p.parse = func(rule ...int) error { - r := 1 - if len(rule) > 0 { - r = rule[0] - } - matches := p.rules[r]() - p.tokens32 = tree - if matches { - p.Trim(tokenIndex) - return nil - } - return &parseError{p, max} - } - - add := func(rule pegRule, begin uint32) { - tree.Add(rule, begin, position, tokenIndex) - tokenIndex++ - if begin != position && position > max.end { - max = token32{rule, begin, position} - } - } - - matchDot := func() bool { - if buffer[position] != endSymbol { - position++ - return true - } - return false - } - - /*matchChar := func(c byte) bool { - if buffer[position] == c { - position++ - return true - } - return false - }*/ - - /*matchRange := func(lower byte, upper byte) bool { - if c := buffer[position]; c >= lower && c <= upper { - position++ - return true - } - return false - }*/ - - _rules = [...]func() bool{ - nil, - /* 0 TOML <- <(Expression (newline Expression)* newline? !. Action0)> */ - func() bool { - position0, tokenIndex0 := position, tokenIndex - { - position1 := position - if !_rules[ruleExpression]() { - goto l0 - } - l2: - { - position3, tokenIndex3 := position, tokenIndex - if !_rules[rulenewline]() { - goto l3 - } - if !_rules[ruleExpression]() { - goto l3 - } - goto l2 - l3: - position, tokenIndex = position3, tokenIndex3 - } - { - position4, tokenIndex4 := position, tokenIndex - if !_rules[rulenewline]() { - goto l4 - } - goto l5 - l4: - position, tokenIndex = position4, tokenIndex4 - } - l5: - { - position6, tokenIndex6 := position, tokenIndex - if !matchDot() { - goto l6 - } - goto l0 - l6: - position, tokenIndex = position6, tokenIndex6 - } - { - add(ruleAction0, position) - } - add(ruleTOML, position1) - } - return true - l0: - position, tokenIndex = position0, tokenIndex0 - return false - }, - /* 1 Expression <- <((<(ws table ws comment? (wsnl keyval ws comment?)*)> Action1) / (ws keyval ws comment?) / (ws comment?) / ws)> */ - func() bool { - position8, tokenIndex8 := position, tokenIndex - { - position9 := position - { - position10, tokenIndex10 := position, tokenIndex - { - position12 := position - if !_rules[rulews]() { - goto l11 - } - { - position13 := position - { - position14, tokenIndex14 := position, tokenIndex - { - position16 := position - if buffer[position] != rune('[') { - goto l15 - } - position++ - if !_rules[rulews]() { - goto l15 - } - { - position17 := position - if !_rules[ruletableKey]() { - goto l15 - } - add(rulePegText, position17) - } - if !_rules[rulews]() { - goto l15 - } - if buffer[position] != rune(']') { - goto l15 - } - position++ - { - add(ruleAction13, position) - } - add(rulestdTable, position16) - } - goto l14 - l15: - position, tokenIndex = position14, tokenIndex14 - { - position19 := position - if buffer[position] != rune('[') { - goto l11 - } - position++ - if buffer[position] != rune('[') { - goto l11 - } - position++ - if !_rules[rulews]() { - goto l11 - } - { - position20 := position - if !_rules[ruletableKey]() { - goto l11 - } - add(rulePegText, position20) - } - if !_rules[rulews]() { - goto l11 - } - if buffer[position] != rune(']') { - goto l11 - } - position++ - if buffer[position] != rune(']') { - goto l11 - } - position++ - { - add(ruleAction14, position) - } - add(rulearrayTable, position19) - } - } - l14: - add(ruletable, position13) - } - if !_rules[rulews]() { - goto l11 - } - { - position22, tokenIndex22 := position, tokenIndex - if !_rules[rulecomment]() { - goto l22 - } - goto l23 - l22: - position, tokenIndex = position22, tokenIndex22 - } - l23: - l24: - { - position25, tokenIndex25 := position, tokenIndex - if !_rules[rulewsnl]() { - goto l25 - } - if !_rules[rulekeyval]() { - goto l25 - } - if !_rules[rulews]() { - goto l25 - } - { - position26, tokenIndex26 := position, tokenIndex - if !_rules[rulecomment]() { - goto l26 - } - goto l27 - l26: - position, tokenIndex = position26, tokenIndex26 - } - l27: - goto l24 - l25: - position, tokenIndex = position25, tokenIndex25 - } - add(rulePegText, position12) - } - { - add(ruleAction1, position) - } - goto l10 - l11: - position, tokenIndex = position10, tokenIndex10 - if !_rules[rulews]() { - goto l29 - } - if !_rules[rulekeyval]() { - goto l29 - } - if !_rules[rulews]() { - goto l29 - } - { - position30, tokenIndex30 := position, tokenIndex - if !_rules[rulecomment]() { - goto l30 - } - goto l31 - l30: - position, tokenIndex = position30, tokenIndex30 - } - l31: - goto l10 - l29: - position, tokenIndex = position10, tokenIndex10 - if !_rules[rulews]() { - goto l32 - } - { - position33, tokenIndex33 := position, tokenIndex - if !_rules[rulecomment]() { - goto l33 - } - goto l34 - l33: - position, tokenIndex = position33, tokenIndex33 - } - l34: - goto l10 - l32: - position, tokenIndex = position10, tokenIndex10 - if !_rules[rulews]() { - goto l8 - } - } - l10: - add(ruleExpression, position9) - } - return true - l8: - position, tokenIndex = position8, tokenIndex8 - return false - }, - /* 2 newline <- <(<('\r' / '\n')+> Action2)> */ - func() bool { - position35, tokenIndex35 := position, tokenIndex - { - position36 := position - { - position37 := position - { - position40, tokenIndex40 := position, tokenIndex - if buffer[position] != rune('\r') { - goto l41 - } - position++ - goto l40 - l41: - position, tokenIndex = position40, tokenIndex40 - if buffer[position] != rune('\n') { - goto l35 - } - position++ - } - l40: - l38: - { - position39, tokenIndex39 := position, tokenIndex - { - position42, tokenIndex42 := position, tokenIndex - if buffer[position] != rune('\r') { - goto l43 - } - position++ - goto l42 - l43: - position, tokenIndex = position42, tokenIndex42 - if buffer[position] != rune('\n') { - goto l39 - } - position++ - } - l42: - goto l38 - l39: - position, tokenIndex = position39, tokenIndex39 - } - add(rulePegText, position37) - } - { - add(ruleAction2, position) - } - add(rulenewline, position36) - } - return true - l35: - position, tokenIndex = position35, tokenIndex35 - return false - }, - /* 3 ws <- <(' ' / '\t')*> */ - func() bool { - { - position46 := position - l47: - { - position48, tokenIndex48 := position, tokenIndex - { - position49, tokenIndex49 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l50 - } - position++ - goto l49 - l50: - position, tokenIndex = position49, tokenIndex49 - if buffer[position] != rune('\t') { - goto l48 - } - position++ - } - l49: - goto l47 - l48: - position, tokenIndex = position48, tokenIndex48 - } - add(rulews, position46) - } - return true - }, - /* 4 wsnl <- <((&('\t') '\t') | (&(' ') ' ') | (&('\n' | '\r') (<('\r' / '\n')> Action3)))*> */ - func() bool { - { - position52 := position - l53: - { - position54, tokenIndex54 := position, tokenIndex - { - switch buffer[position] { - case '\t': - if buffer[position] != rune('\t') { - goto l54 - } - position++ - break - case ' ': - if buffer[position] != rune(' ') { - goto l54 - } - position++ - break - default: - { - position56 := position - { - position57, tokenIndex57 := position, tokenIndex - if buffer[position] != rune('\r') { - goto l58 - } - position++ - goto l57 - l58: - position, tokenIndex = position57, tokenIndex57 - if buffer[position] != rune('\n') { - goto l54 - } - position++ - } - l57: - add(rulePegText, position56) - } - { - add(ruleAction3, position) - } - break - } - } - - goto l53 - l54: - position, tokenIndex = position54, tokenIndex54 - } - add(rulewsnl, position52) - } - return true - }, - /* 5 comment <- <('#' <('\t' / [ -\U0010ffff])*>)> */ - func() bool { - position60, tokenIndex60 := position, tokenIndex - { - position61 := position - if buffer[position] != rune('#') { - goto l60 - } - position++ - { - position62 := position - l63: - { - position64, tokenIndex64 := position, tokenIndex - { - position65, tokenIndex65 := position, tokenIndex - if buffer[position] != rune('\t') { - goto l66 - } - position++ - goto l65 - l66: - position, tokenIndex = position65, tokenIndex65 - if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') { - goto l64 - } - position++ - } - l65: - goto l63 - l64: - position, tokenIndex = position64, tokenIndex64 - } - add(rulePegText, position62) - } - add(rulecomment, position61) - } - return true - l60: - position, tokenIndex = position60, tokenIndex60 - return false - }, - /* 6 keyval <- <(key ws '=' ws val Action4)> */ - func() bool { - position67, tokenIndex67 := position, tokenIndex - { - position68 := position - if !_rules[rulekey]() { - goto l67 - } - if !_rules[rulews]() { - goto l67 - } - if buffer[position] != rune('=') { - goto l67 - } - position++ - if !_rules[rulews]() { - goto l67 - } - if !_rules[ruleval]() { - goto l67 - } - { - add(ruleAction4, position) - } - add(rulekeyval, position68) - } - return true - l67: - position, tokenIndex = position67, tokenIndex67 - return false - }, - /* 7 key <- <(bareKey / quotedKey)> */ - func() bool { - position70, tokenIndex70 := position, tokenIndex - { - position71 := position - { - position72, tokenIndex72 := position, tokenIndex - { - position74 := position - { - position75 := position - { - switch buffer[position] { - case '_': - if buffer[position] != rune('_') { - goto l73 - } - position++ - break - case '-': - if buffer[position] != rune('-') { - goto l73 - } - position++ - break - case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z': - if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l73 - } - position++ - break - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l73 - } - position++ - break - default: - if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l73 - } - position++ - break - } - } - - l76: - { - position77, tokenIndex77 := position, tokenIndex - { - switch buffer[position] { - case '_': - if buffer[position] != rune('_') { - goto l77 - } - position++ - break - case '-': - if buffer[position] != rune('-') { - goto l77 - } - position++ - break - case 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z': - if c := buffer[position]; c < rune('a') || c > rune('z') { - goto l77 - } - position++ - break - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l77 - } - position++ - break - default: - if c := buffer[position]; c < rune('A') || c > rune('Z') { - goto l77 - } - position++ - break - } - } - - goto l76 - l77: - position, tokenIndex = position77, tokenIndex77 - } - add(rulePegText, position75) - } - { - add(ruleAction5, position) - } - add(rulebareKey, position74) - } - goto l72 - l73: - position, tokenIndex = position72, tokenIndex72 - { - position81 := position - { - position82 := position - if buffer[position] != rune('"') { - goto l70 - } - position++ - l83: - { - position84, tokenIndex84 := position, tokenIndex - if !_rules[rulebasicChar]() { - goto l84 - } - goto l83 - l84: - position, tokenIndex = position84, tokenIndex84 - } - if buffer[position] != rune('"') { - goto l70 - } - position++ - add(rulePegText, position82) - } - { - add(ruleAction6, position) - } - add(rulequotedKey, position81) - } - } - l72: - add(rulekey, position71) - } - return true - l70: - position, tokenIndex = position70, tokenIndex70 - return false - }, - /* 8 bareKey <- <(<((&('_') '_') | (&('-') '-') | (&('a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z') [a-z]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z') [A-Z]))+> Action5)> */ - nil, - /* 9 quotedKey <- <(<('"' basicChar* '"')> Action6)> */ - nil, - /* 10 val <- <(( Action7) / ( Action8) / ((&('{') inlineTable) | (&('[') ( Action12)) | (&('f' | 't') ( Action11)) | (&('"' | '\'') ( Action10)) | (&('+' | '-' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') ( Action9))))> */ - func() bool { - position88, tokenIndex88 := position, tokenIndex - { - position89 := position - { - position90, tokenIndex90 := position, tokenIndex - { - position92 := position - { - position93 := position - { - position94, tokenIndex94 := position, tokenIndex - { - position96 := position - { - position97 := position - { - position98 := position - if !_rules[ruledigitDual]() { - goto l95 - } - if !_rules[ruledigitDual]() { - goto l95 - } - add(ruledigitQuad, position98) - } - add(ruledateFullYear, position97) - } - if buffer[position] != rune('-') { - goto l95 - } - position++ - { - position99 := position - if !_rules[ruledigitDual]() { - goto l95 - } - add(ruledateMonth, position99) - } - if buffer[position] != rune('-') { - goto l95 - } - position++ - { - position100 := position - if !_rules[ruledigitDual]() { - goto l95 - } - add(ruledateMDay, position100) - } - add(rulefullDate, position96) - } - { - position101, tokenIndex101 := position, tokenIndex - if buffer[position] != rune('T') { - goto l101 - } - position++ - { - position103 := position - if !_rules[rulepartialTime]() { - goto l101 - } - { - position104 := position - { - position105, tokenIndex105 := position, tokenIndex - if buffer[position] != rune('Z') { - goto l106 - } - position++ - goto l105 - l106: - position, tokenIndex = position105, tokenIndex105 - { - position107 := position - { - position108, tokenIndex108 := position, tokenIndex - if buffer[position] != rune('-') { - goto l109 - } - position++ - goto l108 - l109: - position, tokenIndex = position108, tokenIndex108 - if buffer[position] != rune('+') { - goto l101 - } - position++ - } - l108: - if !_rules[ruletimeHour]() { - goto l101 - } - if buffer[position] != rune(':') { - goto l101 - } - position++ - if !_rules[ruletimeMinute]() { - goto l101 - } - add(ruletimeNumoffset, position107) - } - } - l105: - add(ruletimeOffset, position104) - } - add(rulefullTime, position103) - } - goto l102 - l101: - position, tokenIndex = position101, tokenIndex101 - } - l102: - goto l94 - l95: - position, tokenIndex = position94, tokenIndex94 - if !_rules[rulepartialTime]() { - goto l91 - } - } - l94: - add(ruledatetime, position93) - } - add(rulePegText, position92) - } - { - add(ruleAction7, position) - } - goto l90 - l91: - position, tokenIndex = position90, tokenIndex90 - { - position112 := position - { - position113 := position - if !_rules[ruleinteger]() { - goto l111 - } - { - position114, tokenIndex114 := position, tokenIndex - if !_rules[rulefrac]() { - goto l115 - } - { - position116, tokenIndex116 := position, tokenIndex - if !_rules[ruleexp]() { - goto l116 - } - goto l117 - l116: - position, tokenIndex = position116, tokenIndex116 - } - l117: - goto l114 - l115: - position, tokenIndex = position114, tokenIndex114 - { - position118, tokenIndex118 := position, tokenIndex - if !_rules[rulefrac]() { - goto l118 - } - goto l119 - l118: - position, tokenIndex = position118, tokenIndex118 - } - l119: - if !_rules[ruleexp]() { - goto l111 - } - } - l114: - add(rulefloat, position113) - } - add(rulePegText, position112) - } - { - add(ruleAction8, position) - } - goto l90 - l111: - position, tokenIndex = position90, tokenIndex90 - { - switch buffer[position] { - case '{': - { - position122 := position - if buffer[position] != rune('{') { - goto l88 - } - position++ - { - add(ruleAction15, position) - } - if !_rules[rulews]() { - goto l88 - } - { - position124 := position - l125: - { - position126, tokenIndex126 := position, tokenIndex - if !_rules[rulekeyval]() { - goto l126 - } - { - position127, tokenIndex127 := position, tokenIndex - { - position129 := position - if !_rules[rulews]() { - goto l127 - } - if buffer[position] != rune(',') { - goto l127 - } - position++ - if !_rules[rulews]() { - goto l127 - } - add(ruleinlineTableValSep, position129) - } - goto l128 - l127: - position, tokenIndex = position127, tokenIndex127 - } - l128: - goto l125 - l126: - position, tokenIndex = position126, tokenIndex126 - } - add(ruleinlineTableKeyValues, position124) - } - if !_rules[rulews]() { - goto l88 - } - if buffer[position] != rune('}') { - goto l88 - } - position++ - { - add(ruleAction16, position) - } - add(ruleinlineTable, position122) - } - break - case '[': - { - position131 := position - { - position132 := position - if buffer[position] != rune('[') { - goto l88 - } - position++ - { - add(ruleAction23, position) - } - if !_rules[rulewsnl]() { - goto l88 - } - { - position134, tokenIndex134 := position, tokenIndex - { - position136 := position - if !_rules[ruleval]() { - goto l134 - } - { - add(ruleAction24, position) - } - l138: - { - position139, tokenIndex139 := position, tokenIndex - if !_rules[rulewsnl]() { - goto l139 - } - { - position140, tokenIndex140 := position, tokenIndex - if !_rules[rulecomment]() { - goto l140 - } - goto l141 - l140: - position, tokenIndex = position140, tokenIndex140 - } - l141: - if !_rules[rulewsnl]() { - goto l139 - } - if !_rules[rulearraySep]() { - goto l139 - } - if !_rules[rulewsnl]() { - goto l139 - } - { - position142, tokenIndex142 := position, tokenIndex - if !_rules[rulecomment]() { - goto l142 - } - goto l143 - l142: - position, tokenIndex = position142, tokenIndex142 - } - l143: - if !_rules[rulewsnl]() { - goto l139 - } - if !_rules[ruleval]() { - goto l139 - } - { - add(ruleAction25, position) - } - goto l138 - l139: - position, tokenIndex = position139, tokenIndex139 - } - if !_rules[rulewsnl]() { - goto l134 - } - { - position145, tokenIndex145 := position, tokenIndex - if !_rules[rulearraySep]() { - goto l145 - } - goto l146 - l145: - position, tokenIndex = position145, tokenIndex145 - } - l146: - if !_rules[rulewsnl]() { - goto l134 - } - { - position147, tokenIndex147 := position, tokenIndex - if !_rules[rulecomment]() { - goto l147 - } - goto l148 - l147: - position, tokenIndex = position147, tokenIndex147 - } - l148: - add(rulearrayValues, position136) - } - goto l135 - l134: - position, tokenIndex = position134, tokenIndex134 - } - l135: - if !_rules[rulewsnl]() { - goto l88 - } - if buffer[position] != rune(']') { - goto l88 - } - position++ - add(rulearray, position132) - } - add(rulePegText, position131) - } - { - add(ruleAction12, position) - } - break - case 'f', 't': - { - position150 := position - { - position151 := position - { - position152, tokenIndex152 := position, tokenIndex - if buffer[position] != rune('t') { - goto l153 - } - position++ - if buffer[position] != rune('r') { - goto l153 - } - position++ - if buffer[position] != rune('u') { - goto l153 - } - position++ - if buffer[position] != rune('e') { - goto l153 - } - position++ - goto l152 - l153: - position, tokenIndex = position152, tokenIndex152 - if buffer[position] != rune('f') { - goto l88 - } - position++ - if buffer[position] != rune('a') { - goto l88 - } - position++ - if buffer[position] != rune('l') { - goto l88 - } - position++ - if buffer[position] != rune('s') { - goto l88 - } - position++ - if buffer[position] != rune('e') { - goto l88 - } - position++ - } - l152: - add(ruleboolean, position151) - } - add(rulePegText, position150) - } - { - add(ruleAction11, position) - } - break - case '"', '\'': - { - position155 := position - { - position156 := position - { - position157, tokenIndex157 := position, tokenIndex - { - position159 := position - if buffer[position] != rune('\'') { - goto l158 - } - position++ - if buffer[position] != rune('\'') { - goto l158 - } - position++ - if buffer[position] != rune('\'') { - goto l158 - } - position++ - { - position160 := position - { - position161 := position - l162: - { - position163, tokenIndex163 := position, tokenIndex - { - position164, tokenIndex164 := position, tokenIndex - if buffer[position] != rune('\'') { - goto l164 - } - position++ - if buffer[position] != rune('\'') { - goto l164 - } - position++ - if buffer[position] != rune('\'') { - goto l164 - } - position++ - goto l163 - l164: - position, tokenIndex = position164, tokenIndex164 - } - { - position165, tokenIndex165 := position, tokenIndex - { - position167 := position - { - position168, tokenIndex168 := position, tokenIndex - if buffer[position] != rune('\t') { - goto l169 - } - position++ - goto l168 - l169: - position, tokenIndex = position168, tokenIndex168 - if c := buffer[position]; c < rune(' ') || c > rune('\U0010ffff') { - goto l166 - } - position++ - } - l168: - add(rulemlLiteralChar, position167) - } - goto l165 - l166: - position, tokenIndex = position165, tokenIndex165 - if !_rules[rulenewline]() { - goto l163 - } - } - l165: - goto l162 - l163: - position, tokenIndex = position163, tokenIndex163 - } - add(rulemlLiteralBody, position161) - } - add(rulePegText, position160) - } - if buffer[position] != rune('\'') { - goto l158 - } - position++ - if buffer[position] != rune('\'') { - goto l158 - } - position++ - if buffer[position] != rune('\'') { - goto l158 - } - position++ - { - add(ruleAction22, position) - } - add(rulemlLiteralString, position159) - } - goto l157 - l158: - position, tokenIndex = position157, tokenIndex157 - { - position172 := position - if buffer[position] != rune('\'') { - goto l171 - } - position++ - { - position173 := position - l174: - { - position175, tokenIndex175 := position, tokenIndex - { - position176 := position - { - switch buffer[position] { - case '\t': - if buffer[position] != rune('\t') { - goto l175 - } - position++ - break - case ' ', '!', '"', '#', '$', '%', '&': - if c := buffer[position]; c < rune(' ') || c > rune('&') { - goto l175 - } - position++ - break - default: - if c := buffer[position]; c < rune('(') || c > rune('\U0010ffff') { - goto l175 - } - position++ - break - } - } - - add(ruleliteralChar, position176) - } - goto l174 - l175: - position, tokenIndex = position175, tokenIndex175 - } - add(rulePegText, position173) - } - if buffer[position] != rune('\'') { - goto l171 - } - position++ - { - add(ruleAction21, position) - } - add(ruleliteralString, position172) - } - goto l157 - l171: - position, tokenIndex = position157, tokenIndex157 - { - position180 := position - if buffer[position] != rune('"') { - goto l179 - } - position++ - if buffer[position] != rune('"') { - goto l179 - } - position++ - if buffer[position] != rune('"') { - goto l179 - } - position++ - { - position181 := position - l182: - { - position183, tokenIndex183 := position, tokenIndex - { - position184, tokenIndex184 := position, tokenIndex - { - position186 := position - { - position187, tokenIndex187 := position, tokenIndex - if !_rules[rulebasicChar]() { - goto l188 - } - goto l187 - l188: - position, tokenIndex = position187, tokenIndex187 - if !_rules[rulenewline]() { - goto l185 - } - } - l187: - add(rulePegText, position186) - } - { - add(ruleAction20, position) - } - goto l184 - l185: - position, tokenIndex = position184, tokenIndex184 - if !_rules[ruleescape]() { - goto l183 - } - if !_rules[rulenewline]() { - goto l183 - } - if !_rules[rulewsnl]() { - goto l183 - } - } - l184: - goto l182 - l183: - position, tokenIndex = position183, tokenIndex183 - } - add(rulemlBasicBody, position181) - } - if buffer[position] != rune('"') { - goto l179 - } - position++ - if buffer[position] != rune('"') { - goto l179 - } - position++ - if buffer[position] != rune('"') { - goto l179 - } - position++ - { - add(ruleAction19, position) - } - add(rulemlBasicString, position180) - } - goto l157 - l179: - position, tokenIndex = position157, tokenIndex157 - { - position191 := position - { - position192 := position - if buffer[position] != rune('"') { - goto l88 - } - position++ - l193: - { - position194, tokenIndex194 := position, tokenIndex - if !_rules[rulebasicChar]() { - goto l194 - } - goto l193 - l194: - position, tokenIndex = position194, tokenIndex194 - } - if buffer[position] != rune('"') { - goto l88 - } - position++ - add(rulePegText, position192) - } - { - add(ruleAction18, position) - } - add(rulebasicString, position191) - } - } - l157: - add(rulestring, position156) - } - add(rulePegText, position155) - } - { - add(ruleAction10, position) - } - break - default: - { - position197 := position - if !_rules[ruleinteger]() { - goto l88 - } - add(rulePegText, position197) - } - { - add(ruleAction9, position) - } - break - } - } - - } - l90: - add(ruleval, position89) - } - return true - l88: - position, tokenIndex = position88, tokenIndex88 - return false - }, - /* 11 table <- <(stdTable / arrayTable)> */ - nil, - /* 12 stdTable <- <('[' ws ws ']' Action13)> */ - nil, - /* 13 arrayTable <- <('[' '[' ws ws (']' ']') Action14)> */ - nil, - /* 14 inlineTable <- <('{' Action15 ws inlineTableKeyValues ws '}' Action16)> */ - nil, - /* 15 inlineTableKeyValues <- <(keyval inlineTableValSep?)*> */ - nil, - /* 16 tableKey <- <(tableKeyComp (tableKeySep tableKeyComp)*)> */ - func() bool { - position204, tokenIndex204 := position, tokenIndex - { - position205 := position - if !_rules[ruletableKeyComp]() { - goto l204 - } - l206: - { - position207, tokenIndex207 := position, tokenIndex - { - position208 := position - if !_rules[rulews]() { - goto l207 - } - if buffer[position] != rune('.') { - goto l207 - } - position++ - if !_rules[rulews]() { - goto l207 - } - add(ruletableKeySep, position208) - } - if !_rules[ruletableKeyComp]() { - goto l207 - } - goto l206 - l207: - position, tokenIndex = position207, tokenIndex207 - } - add(ruletableKey, position205) - } - return true - l204: - position, tokenIndex = position204, tokenIndex204 - return false - }, - /* 17 tableKeyComp <- <(key Action17)> */ - func() bool { - position209, tokenIndex209 := position, tokenIndex - { - position210 := position - if !_rules[rulekey]() { - goto l209 - } - { - add(ruleAction17, position) - } - add(ruletableKeyComp, position210) - } - return true - l209: - position, tokenIndex = position209, tokenIndex209 - return false - }, - /* 18 tableKeySep <- <(ws '.' ws)> */ - nil, - /* 19 inlineTableValSep <- <(ws ',' ws)> */ - nil, - /* 20 integer <- <(('-' / '+')? int)> */ - func() bool { - position214, tokenIndex214 := position, tokenIndex - { - position215 := position - { - position216, tokenIndex216 := position, tokenIndex - { - position218, tokenIndex218 := position, tokenIndex - if buffer[position] != rune('-') { - goto l219 - } - position++ - goto l218 - l219: - position, tokenIndex = position218, tokenIndex218 - if buffer[position] != rune('+') { - goto l216 - } - position++ - } - l218: - goto l217 - l216: - position, tokenIndex = position216, tokenIndex216 - } - l217: - { - position220 := position - { - position221, tokenIndex221 := position, tokenIndex - if c := buffer[position]; c < rune('1') || c > rune('9') { - goto l222 - } - position++ - { - position225, tokenIndex225 := position, tokenIndex - if !_rules[ruledigit]() { - goto l226 - } - goto l225 - l226: - position, tokenIndex = position225, tokenIndex225 - if buffer[position] != rune('_') { - goto l222 - } - position++ - if !_rules[ruledigit]() { - goto l222 - } - } - l225: - l223: - { - position224, tokenIndex224 := position, tokenIndex - { - position227, tokenIndex227 := position, tokenIndex - if !_rules[ruledigit]() { - goto l228 - } - goto l227 - l228: - position, tokenIndex = position227, tokenIndex227 - if buffer[position] != rune('_') { - goto l224 - } - position++ - if !_rules[ruledigit]() { - goto l224 - } - } - l227: - goto l223 - l224: - position, tokenIndex = position224, tokenIndex224 - } - goto l221 - l222: - position, tokenIndex = position221, tokenIndex221 - if !_rules[ruledigit]() { - goto l214 - } - } - l221: - add(ruleint, position220) - } - add(ruleinteger, position215) - } - return true - l214: - position, tokenIndex = position214, tokenIndex214 - return false - }, - /* 21 int <- <(([1-9] (digit / ('_' digit))+) / digit)> */ - nil, - /* 22 float <- <(integer ((frac exp?) / (frac? exp)))> */ - nil, - /* 23 frac <- <('.' digit (digit / ('_' digit))*)> */ - func() bool { - position231, tokenIndex231 := position, tokenIndex - { - position232 := position - if buffer[position] != rune('.') { - goto l231 - } - position++ - if !_rules[ruledigit]() { - goto l231 - } - l233: - { - position234, tokenIndex234 := position, tokenIndex - { - position235, tokenIndex235 := position, tokenIndex - if !_rules[ruledigit]() { - goto l236 - } - goto l235 - l236: - position, tokenIndex = position235, tokenIndex235 - if buffer[position] != rune('_') { - goto l234 - } - position++ - if !_rules[ruledigit]() { - goto l234 - } - } - l235: - goto l233 - l234: - position, tokenIndex = position234, tokenIndex234 - } - add(rulefrac, position232) - } - return true - l231: - position, tokenIndex = position231, tokenIndex231 - return false - }, - /* 24 exp <- <(('e' / 'E') ('-' / '+')? digit (digit / ('_' digit))*)> */ - func() bool { - position237, tokenIndex237 := position, tokenIndex - { - position238 := position - { - position239, tokenIndex239 := position, tokenIndex - if buffer[position] != rune('e') { - goto l240 - } - position++ - goto l239 - l240: - position, tokenIndex = position239, tokenIndex239 - if buffer[position] != rune('E') { - goto l237 - } - position++ - } - l239: - { - position241, tokenIndex241 := position, tokenIndex - { - position243, tokenIndex243 := position, tokenIndex - if buffer[position] != rune('-') { - goto l244 - } - position++ - goto l243 - l244: - position, tokenIndex = position243, tokenIndex243 - if buffer[position] != rune('+') { - goto l241 - } - position++ - } - l243: - goto l242 - l241: - position, tokenIndex = position241, tokenIndex241 - } - l242: - if !_rules[ruledigit]() { - goto l237 - } - l245: - { - position246, tokenIndex246 := position, tokenIndex - { - position247, tokenIndex247 := position, tokenIndex - if !_rules[ruledigit]() { - goto l248 - } - goto l247 - l248: - position, tokenIndex = position247, tokenIndex247 - if buffer[position] != rune('_') { - goto l246 - } - position++ - if !_rules[ruledigit]() { - goto l246 - } - } - l247: - goto l245 - l246: - position, tokenIndex = position246, tokenIndex246 - } - add(ruleexp, position238) - } - return true - l237: - position, tokenIndex = position237, tokenIndex237 - return false - }, - /* 25 string <- <(mlLiteralString / literalString / mlBasicString / basicString)> */ - nil, - /* 26 basicString <- <(<('"' basicChar* '"')> Action18)> */ - nil, - /* 27 basicChar <- <(basicUnescaped / escaped)> */ - func() bool { - position251, tokenIndex251 := position, tokenIndex - { - position252 := position - { - position253, tokenIndex253 := position, tokenIndex - { - position255 := position - { - switch buffer[position] { - case ' ', '!': - if c := buffer[position]; c < rune(' ') || c > rune('!') { - goto l254 - } - position++ - break - case '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[': - if c := buffer[position]; c < rune('#') || c > rune('[') { - goto l254 - } - position++ - break - default: - if c := buffer[position]; c < rune(']') || c > rune('\U0010ffff') { - goto l254 - } - position++ - break - } - } - - add(rulebasicUnescaped, position255) - } - goto l253 - l254: - position, tokenIndex = position253, tokenIndex253 - { - position257 := position - if !_rules[ruleescape]() { - goto l251 - } - { - switch buffer[position] { - case 'U': - if buffer[position] != rune('U') { - goto l251 - } - position++ - if !_rules[rulehexQuad]() { - goto l251 - } - if !_rules[rulehexQuad]() { - goto l251 - } - break - case 'u': - if buffer[position] != rune('u') { - goto l251 - } - position++ - if !_rules[rulehexQuad]() { - goto l251 - } - break - case '\\': - if buffer[position] != rune('\\') { - goto l251 - } - position++ - break - case '/': - if buffer[position] != rune('/') { - goto l251 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l251 - } - position++ - break - case 'r': - if buffer[position] != rune('r') { - goto l251 - } - position++ - break - case 'f': - if buffer[position] != rune('f') { - goto l251 - } - position++ - break - case 'n': - if buffer[position] != rune('n') { - goto l251 - } - position++ - break - case 't': - if buffer[position] != rune('t') { - goto l251 - } - position++ - break - default: - if buffer[position] != rune('b') { - goto l251 - } - position++ - break - } - } - - add(ruleescaped, position257) - } - } - l253: - add(rulebasicChar, position252) - } - return true - l251: - position, tokenIndex = position251, tokenIndex251 - return false - }, - /* 28 escaped <- <(escape ((&('U') ('U' hexQuad hexQuad)) | (&('u') ('u' hexQuad)) | (&('\\') '\\') | (&('/') '/') | (&('"') '"') | (&('r') 'r') | (&('f') 'f') | (&('n') 'n') | (&('t') 't') | (&('b') 'b')))> */ - nil, - /* 29 basicUnescaped <- <((&(' ' | '!') [ -!]) | (&('#' | '$' | '%' | '&' | '\'' | '(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[') [#-[]) | (&(']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | 'Â¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Ã' | 'Â' | 'Ã' | 'Ä' | 'Ã…' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'ÃŒ' | 'Ã' | 'ÃŽ' | 'Ã' | 'Ã' | 'Ñ' | 'Ã’' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ã' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'Ã¥' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô' | 'õ' | 'ö' | '÷' | 'ø' | 'ù' | 'ú' | 'û' | 'ü' | 'ý' | 'þ' | 'ÿ') []-\U0010ffff]))> */ - nil, - /* 30 escape <- <'\\'> */ - func() bool { - position261, tokenIndex261 := position, tokenIndex - { - position262 := position - if buffer[position] != rune('\\') { - goto l261 - } - position++ - add(ruleescape, position262) - } - return true - l261: - position, tokenIndex = position261, tokenIndex261 - return false - }, - /* 31 mlBasicString <- <('"' '"' '"' mlBasicBody ('"' '"' '"') Action19)> */ - nil, - /* 32 mlBasicBody <- <((<(basicChar / newline)> Action20) / (escape newline wsnl))*> */ - nil, - /* 33 literalString <- <('\'' '\'' Action21)> */ - nil, - /* 34 literalChar <- <((&('\t') '\t') | (&(' ' | '!' | '"' | '#' | '$' | '%' | '&') [ -&]) | (&('(' | ')' | '*' | '+' | ',' | '-' | '.' | '/' | '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | ':' | ';' | '<' | '=' | '>' | '?' | '@' | 'A' | 'B' | 'C' | 'D' | 'E' | 'F' | 'G' | 'H' | 'I' | 'J' | 'K' | 'L' | 'M' | 'N' | 'O' | 'P' | 'Q' | 'R' | 'S' | 'T' | 'U' | 'V' | 'W' | 'X' | 'Y' | 'Z' | '[' | '\\' | ']' | '^' | '_' | '`' | 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | 'g' | 'h' | 'i' | 'j' | 'k' | 'l' | 'm' | 'n' | 'o' | 'p' | 'q' | 'r' | 's' | 't' | 'u' | 'v' | 'w' | 'x' | 'y' | 'z' | '{' | '|' | '}' | '~' | '\u007f' | '\u0080' | '\u0081' | '\u0082' | '\u0083' | '\u0084' | '\u0085' | '\u0086' | '\u0087' | '\u0088' | '\u0089' | '\u008a' | '\u008b' | '\u008c' | '\u008d' | '\u008e' | '\u008f' | '\u0090' | '\u0091' | '\u0092' | '\u0093' | '\u0094' | '\u0095' | '\u0096' | '\u0097' | '\u0098' | '\u0099' | '\u009a' | '\u009b' | '\u009c' | '\u009d' | '\u009e' | '\u009f' | '\u00a0' | '¡' | '¢' | '£' | '¤' | 'Â¥' | '¦' | '§' | '¨' | '©' | 'ª' | '«' | '¬' | '\u00ad' | '®' | '¯' | '°' | '±' | '²' | '³' | '´' | 'µ' | '¶' | '·' | '¸' | '¹' | 'º' | '»' | '¼' | '½' | '¾' | '¿' | 'À' | 'Ã' | 'Â' | 'Ã' | 'Ä' | 'Ã…' | 'Æ' | 'Ç' | 'È' | 'É' | 'Ê' | 'Ë' | 'ÃŒ' | 'Ã' | 'ÃŽ' | 'Ã' | 'Ã' | 'Ñ' | 'Ã’' | 'Ó' | 'Ô' | 'Õ' | 'Ö' | '×' | 'Ø' | 'Ù' | 'Ú' | 'Û' | 'Ü' | 'Ã' | 'Þ' | 'ß' | 'à' | 'á' | 'â' | 'ã' | 'ä' | 'Ã¥' | 'æ' | 'ç' | 'è' | 'é' | 'ê' | 'ë' | 'ì' | 'í' | 'î' | 'ï' | 'ð' | 'ñ' | 'ò' | 'ó' | 'ô' | 'õ' | 'ö' | '÷' | 'ø' | 'ù' | 'ú' | 'û' | 'ü' | 'ý' | 'þ' | 'ÿ') [(-\U0010ffff]))> */ - nil, - /* 35 mlLiteralString <- <('\'' '\'' '\'' ('\'' '\'' '\'') Action22)> */ - nil, - /* 36 mlLiteralBody <- <(!('\'' '\'' '\'') (mlLiteralChar / newline))*> */ - nil, - /* 37 mlLiteralChar <- <('\t' / [ -\U0010ffff])> */ - nil, - /* 38 hexdigit <- <((&('a' | 'b' | 'c' | 'd' | 'e' | 'f') [a-f]) | (&('A' | 'B' | 'C' | 'D' | 'E' | 'F') [A-F]) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') [0-9]))> */ - func() bool { - position270, tokenIndex270 := position, tokenIndex - { - position271 := position - { - switch buffer[position] { - case 'a', 'b', 'c', 'd', 'e', 'f': - if c := buffer[position]; c < rune('a') || c > rune('f') { - goto l270 - } - position++ - break - case 'A', 'B', 'C', 'D', 'E', 'F': - if c := buffer[position]; c < rune('A') || c > rune('F') { - goto l270 - } - position++ - break - default: - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l270 - } - position++ - break - } - } - - add(rulehexdigit, position271) - } - return true - l270: - position, tokenIndex = position270, tokenIndex270 - return false - }, - /* 39 hexQuad <- <(hexdigit hexdigit hexdigit hexdigit)> */ - func() bool { - position273, tokenIndex273 := position, tokenIndex - { - position274 := position - if !_rules[rulehexdigit]() { - goto l273 - } - if !_rules[rulehexdigit]() { - goto l273 - } - if !_rules[rulehexdigit]() { - goto l273 - } - if !_rules[rulehexdigit]() { - goto l273 - } - add(rulehexQuad, position274) - } - return true - l273: - position, tokenIndex = position273, tokenIndex273 - return false - }, - /* 40 boolean <- <(('t' 'r' 'u' 'e') / ('f' 'a' 'l' 's' 'e'))> */ - nil, - /* 41 dateFullYear <- */ - nil, - /* 42 dateMonth <- */ - nil, - /* 43 dateMDay <- */ - nil, - /* 44 timeHour <- */ - func() bool { - position279, tokenIndex279 := position, tokenIndex - { - position280 := position - if !_rules[ruledigitDual]() { - goto l279 - } - add(ruletimeHour, position280) - } - return true - l279: - position, tokenIndex = position279, tokenIndex279 - return false - }, - /* 45 timeMinute <- */ - func() bool { - position281, tokenIndex281 := position, tokenIndex - { - position282 := position - if !_rules[ruledigitDual]() { - goto l281 - } - add(ruletimeMinute, position282) - } - return true - l281: - position, tokenIndex = position281, tokenIndex281 - return false - }, - /* 46 timeSecond <- */ - nil, - /* 47 timeSecfrac <- <('.' digit+)> */ - nil, - /* 48 timeNumoffset <- <(('-' / '+') timeHour ':' timeMinute)> */ - nil, - /* 49 timeOffset <- <('Z' / timeNumoffset)> */ - nil, - /* 50 partialTime <- <(timeHour ':' timeMinute ':' timeSecond timeSecfrac?)> */ - func() bool { - position287, tokenIndex287 := position, tokenIndex - { - position288 := position - if !_rules[ruletimeHour]() { - goto l287 - } - if buffer[position] != rune(':') { - goto l287 - } - position++ - if !_rules[ruletimeMinute]() { - goto l287 - } - if buffer[position] != rune(':') { - goto l287 - } - position++ - { - position289 := position - if !_rules[ruledigitDual]() { - goto l287 - } - add(ruletimeSecond, position289) - } - { - position290, tokenIndex290 := position, tokenIndex - { - position292 := position - if buffer[position] != rune('.') { - goto l290 - } - position++ - if !_rules[ruledigit]() { - goto l290 - } - l293: - { - position294, tokenIndex294 := position, tokenIndex - if !_rules[ruledigit]() { - goto l294 - } - goto l293 - l294: - position, tokenIndex = position294, tokenIndex294 - } - add(ruletimeSecfrac, position292) - } - goto l291 - l290: - position, tokenIndex = position290, tokenIndex290 - } - l291: - add(rulepartialTime, position288) - } - return true - l287: - position, tokenIndex = position287, tokenIndex287 - return false - }, - /* 51 fullDate <- <(dateFullYear '-' dateMonth '-' dateMDay)> */ - nil, - /* 52 fullTime <- <(partialTime timeOffset)> */ - nil, - /* 53 datetime <- <((fullDate ('T' fullTime)?) / partialTime)> */ - nil, - /* 54 digit <- <[0-9]> */ - func() bool { - position298, tokenIndex298 := position, tokenIndex - { - position299 := position - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l298 - } - position++ - add(ruledigit, position299) - } - return true - l298: - position, tokenIndex = position298, tokenIndex298 - return false - }, - /* 55 digitDual <- <(digit digit)> */ - func() bool { - position300, tokenIndex300 := position, tokenIndex - { - position301 := position - if !_rules[ruledigit]() { - goto l300 - } - if !_rules[ruledigit]() { - goto l300 - } - add(ruledigitDual, position301) - } - return true - l300: - position, tokenIndex = position300, tokenIndex300 - return false - }, - /* 56 digitQuad <- <(digitDual digitDual)> */ - nil, - /* 57 array <- <('[' Action23 wsnl arrayValues? wsnl ']')> */ - nil, - /* 58 arrayValues <- <(val Action24 (wsnl comment? wsnl arraySep wsnl comment? wsnl val Action25)* wsnl arraySep? wsnl comment?)> */ - nil, - /* 59 arraySep <- <','> */ - func() bool { - position305, tokenIndex305 := position, tokenIndex - { - position306 := position - if buffer[position] != rune(',') { - goto l305 - } - position++ - add(rulearraySep, position306) - } - return true - l305: - position, tokenIndex = position305, tokenIndex305 - return false - }, - /* 61 Action0 <- <{ _ = buffer }> */ - nil, - nil, - /* 63 Action1 <- <{ p.SetTableString(begin, end) }> */ - nil, - /* 64 Action2 <- <{ p.AddLineCount(end - begin) }> */ - nil, - /* 65 Action3 <- <{ p.AddLineCount(end - begin) }> */ - nil, - /* 66 Action4 <- <{ p.AddKeyValue() }> */ - nil, - /* 67 Action5 <- <{ p.SetKey(p.buffer, begin, end) }> */ - nil, - /* 68 Action6 <- <{ p.SetKey(p.buffer, begin, end) }> */ - nil, - /* 69 Action7 <- <{ p.SetTime(begin, end) }> */ - nil, - /* 70 Action8 <- <{ p.SetFloat64(begin, end) }> */ - nil, - /* 71 Action9 <- <{ p.SetInt64(begin, end) }> */ - nil, - /* 72 Action10 <- <{ p.SetString(begin, end) }> */ - nil, - /* 73 Action11 <- <{ p.SetBool(begin, end) }> */ - nil, - /* 74 Action12 <- <{ p.SetArray(begin, end) }> */ - nil, - /* 75 Action13 <- <{ p.SetTable(p.buffer, begin, end) }> */ - nil, - /* 76 Action14 <- <{ p.SetArrayTable(p.buffer, begin, end) }> */ - nil, - /* 77 Action15 <- <{ p.StartInlineTable() }> */ - nil, - /* 78 Action16 <- <{ p.EndInlineTable() }> */ - nil, - /* 79 Action17 <- <{ p.AddTableKey() }> */ - nil, - /* 80 Action18 <- <{ p.SetBasicString(p.buffer, begin, end) }> */ - nil, - /* 81 Action19 <- <{ p.SetMultilineString() }> */ - nil, - /* 82 Action20 <- <{ p.AddMultilineBasicBody(p.buffer, begin, end) }> */ - nil, - /* 83 Action21 <- <{ p.SetLiteralString(p.buffer, begin, end) }> */ - nil, - /* 84 Action22 <- <{ p.SetMultilineLiteralString(p.buffer, begin, end) }> */ - nil, - /* 85 Action23 <- <{ p.StartArray() }> */ - nil, - /* 86 Action24 <- <{ p.AddArrayVal() }> */ - nil, - /* 87 Action25 <- <{ p.AddArrayVal() }> */ - nil, - } - p.rules = _rules -} diff --git a/vendor/github.com/naoina/toml/util.go b/vendor/github.com/naoina/toml/util.go deleted file mode 100644 index f882f4e5f..000000000 --- a/vendor/github.com/naoina/toml/util.go +++ /dev/null @@ -1,65 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "strings" -) - -const fieldTagName = "toml" - -// fieldCache maps normalized field names to their position in a struct. -type fieldCache struct { - named map[string]fieldInfo // fields with an explicit name in tag - auto map[string]fieldInfo // fields with auto-assigned normalized names -} - -type fieldInfo struct { - index []int - name string - ignored bool -} - -func makeFieldCache(cfg *Config, rt reflect.Type) fieldCache { - named, auto := make(map[string]fieldInfo), make(map[string]fieldInfo) - for i := 0; i < rt.NumField(); i++ { - ft := rt.Field(i) - // skip unexported fields - if ft.PkgPath != "" && !ft.Anonymous { - continue - } - col, _ := extractTag(ft.Tag.Get(fieldTagName)) - info := fieldInfo{index: ft.Index, name: ft.Name, ignored: col == "-"} - if col == "" || col == "-" { - auto[cfg.NormFieldName(rt, ft.Name)] = info - } else { - named[col] = info - } - } - return fieldCache{named, auto} -} - -func (fc fieldCache) findField(cfg *Config, rv reflect.Value, name string) (reflect.Value, string, error) { - info, found := fc.named[name] - if !found { - info, found = fc.auto[cfg.NormFieldName(rv.Type(), name)] - } - if !found { - if cfg.MissingField == nil { - return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' is not defined in %v", name, rv.Type()) - } else { - return reflect.Value{}, "", cfg.MissingField(rv.Type(), name) - } - } else if info.ignored { - return reflect.Value{}, "", fmt.Errorf("field corresponding to `%s' in %v cannot be set through TOML", name, rv.Type()) - } - return rv.FieldByIndex(info.index), info.name, nil -} - -func extractTag(tag string) (col, rest string) { - tags := strings.SplitN(tag, ",", 2) - if len(tags) == 2 { - return strings.TrimSpace(tags[0]), strings.TrimSpace(tags[1]) - } - return strings.TrimSpace(tags[0]), "" -} diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE index 8dada3eda..5c389317e 100644 --- a/vendor/github.com/openshift/api/LICENSE +++ b/vendor/github.com/openshift/api/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -175,18 +176,7 @@ END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} + Copyright 2020 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/openshift/api/apps/v1/consts.go b/vendor/github.com/openshift/api/apps/v1/consts.go new file mode 100644 index 000000000..212578bcc --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/consts.go @@ -0,0 +1,108 @@ +package v1 + +const ( + // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state + // Used for specifying the reason for cancellation or failure of a deployment + // This is on replication controller set by deployer controller. + DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason" + + // DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the name of the deployer Pod which will act upon the ReplicationController + // to implement the deployment behavior. + // This is set on replication controller by deployer controller. + DeploymentPodAnnotation = "openshift.io/deployer-pod.name" + + // DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the + // DeploymentConfig on which the deployment is based. + // This is set on replication controller pod template by deployer controller. + DeploymentConfigAnnotation = "openshift.io/deployment-config.name" + + // DeploymentCancelledAnnotation indicates that the deployment has been cancelled + // The annotation value does not matter and its mere presence indicates cancellation. + // This is set on replication controller by deployment config controller or oc rollout cancel command. + DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled" + + // DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded + // DeploymentConfig on which a given deployment is based. + // This is set on replication controller by deployer controller. + DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config" + + // DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the LatestVersion value of the DeploymentConfig which was the basis for + // the deployment. + // This is set on replication controller pod template by deployment config controller. + DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version" + + // DeployerPodForDeploymentLabel is a label which groups pods related to a + // deployment. The value is a deployment name. The deployer pod and hook pods + // created by the internal strategies will have this label. Custom + // strategies can apply this label to any pods they create, enabling + // platform-provided cancellation and garbage collection support. + // This is set on deployer pod by deployer controller. + DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name" + + // DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of + // a deployment. + // This is set on replication controller by deployer controller. + DeploymentStatusAnnotation = "openshift.io/deployment.phase" +) + +type DeploymentConditionReason string + +var ( + // ReplicationControllerUpdatedReason is added in a deployment config when one of its replication + // controllers is updated as part of the rollout process. + ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated" + + // ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication + // controller. + ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError" + + // ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication + // controller. + NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated" + + // NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made + // available ie. the number of new pods that have passed readiness checks and run for at least + // minReadySeconds is at least the minimum available pods that need to run for the deployment config. + NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable" + + // ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show + // any progress within the given deadline (progressDeadlineSeconds). + ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded" + + // DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be + // estimated once a deployment config is paused. + DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused" + + // DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally + // deployment configs that paused amidst a rollout. + DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed" + + // RolloutCancelledReason is added in a deployment config when its newest rollout was + // interrupted by cancellation. + RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled" +) + +// DeploymentStatus describes the possible states a deployment can be in. +type DeploymentStatus string + +var ( + + // DeploymentStatusNew means the deployment has been accepted but not yet acted upon. + DeploymentStatusNew DeploymentStatus = "New" + + // DeploymentStatusPending means the deployment been handed over to a deployment strategy, + // but the strategy has not yet declared the deployment to be running. + DeploymentStatusPending DeploymentStatus = "Pending" + + // DeploymentStatusRunning means the deployment strategy has reported the deployment as + // being in-progress. + DeploymentStatusRunning DeploymentStatus = "Running" + + // DeploymentStatusComplete means the deployment finished without an error. + DeploymentStatusComplete DeploymentStatus = "Complete" + + // DeploymentStatusFailed means the deployment finished with an error. + DeploymentStatusFailed DeploymentStatus = "Failed" +) diff --git a/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go new file mode 100644 index 000000000..31969786c --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go @@ -0,0 +1,38 @@ +package v1 + +// This file contains consts that are not shared between components and set just internally. +// They will likely be removed in (near) future. + +const ( + // DeployerPodCreatedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was created. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at" + + // DeployerPodStartedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was started. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at" + + // DeployerPodCompletedAtAnnotation is an annotation on deployment that records + // the time in RFC3339 format of when the deployer pod finished. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at" + + // DesiredReplicasAnnotation represents the desired number of replicas for a + // new deployment. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas" + + // DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name + // of the deployment (a ReplicationController) on which the deployer Pod acts. + // This is set by deployer controller and consumed internally and in oc adm top command. + // DEPRECATED: will be removed soon + DeploymentAnnotation = "openshift.io/deployment.name" +) diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go index 53b61c61a..d54012700 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go @@ -1,55 +1,26 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/apps/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - github.com/openshift/api/apps/v1/generated.proto - - It has these top-level messages: - CustomDeploymentStrategyParams - DeploymentCause - DeploymentCauseImageTrigger - DeploymentCondition - DeploymentConfig - DeploymentConfigList - DeploymentConfigRollback - DeploymentConfigRollbackSpec - DeploymentConfigSpec - DeploymentConfigStatus - DeploymentDetails - DeploymentLog - DeploymentLogOptions - DeploymentRequest - DeploymentStrategy - DeploymentTriggerImageChangeParams - DeploymentTriggerPolicies - DeploymentTriggerPolicy - ExecNewPodHook - LifecycleHook - RecreateDeploymentStrategyParams - RollingDeploymentStrategyParams - TagImageHook -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + io "io" -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" -import strings "strings" -import reflect "reflect" - -import io "io" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -60,117 +31,651 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} } func (*CustomDeploymentStrategyParams) ProtoMessage() {} func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} + return fileDescriptor_8f1b1bee37da74c1, []int{0} +} +func (m *CustomDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomDeploymentStrategyParams.Merge(m, src) +} +func (m *CustomDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *CustomDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_CustomDeploymentStrategyParams.DiscardUnknown(m) } -func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } -func (*DeploymentCause) ProtoMessage() {} -func (*DeploymentCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_CustomDeploymentStrategyParams proto.InternalMessageInfo + +func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } +func (*DeploymentCause) ProtoMessage() {} +func (*DeploymentCause) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{1} +} +func (m *DeploymentCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCause.Merge(m, src) +} +func (m *DeploymentCause) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCause) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCause.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCause proto.InternalMessageInfo func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} } func (*DeploymentCauseImageTrigger) ProtoMessage() {} func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} + return fileDescriptor_8f1b1bee37da74c1, []int{2} +} +func (m *DeploymentCauseImageTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCauseImageTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCauseImageTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCauseImageTrigger.Merge(m, src) +} +func (m *DeploymentCauseImageTrigger) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCauseImageTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCauseImageTrigger.DiscardUnknown(m) } -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_DeploymentCauseImageTrigger proto.InternalMessageInfo -func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } -func (*DeploymentConfig) ProtoMessage() {} -func (*DeploymentConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} -func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } -func (*DeploymentConfigList) ProtoMessage() {} -func (*DeploymentConfigList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } +func (*DeploymentConfig) ProtoMessage() {} +func (*DeploymentConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{4} +} +func (m *DeploymentConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfig.Merge(m, src) +} +func (m *DeploymentConfig) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfig) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfig proto.InternalMessageInfo + +func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } +func (*DeploymentConfigList) ProtoMessage() {} +func (*DeploymentConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{5} +} +func (m *DeploymentConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigList.Merge(m, src) +} +func (m *DeploymentConfigList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigList proto.InternalMessageInfo func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} } func (*DeploymentConfigRollback) ProtoMessage() {} func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptor_8f1b1bee37da74c1, []int{6} } +func (m *DeploymentConfigRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollback.Merge(m, src) +} +func (m *DeploymentConfigRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigRollback proto.InternalMessageInfo func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} } func (*DeploymentConfigRollbackSpec) ProtoMessage() {} func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} + return fileDescriptor_8f1b1bee37da74c1, []int{7} +} +func (m *DeploymentConfigRollbackSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollbackSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollbackSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollbackSpec.Merge(m, src) +} +func (m *DeploymentConfigRollbackSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollbackSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollbackSpec.DiscardUnknown(m) } -func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } -func (*DeploymentConfigSpec) ProtoMessage() {} -func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +var xxx_messageInfo_DeploymentConfigRollbackSpec proto.InternalMessageInfo -func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } -func (*DeploymentConfigStatus) ProtoMessage() {} -func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } +func (*DeploymentConfigSpec) ProtoMessage() {} +func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{8} +} +func (m *DeploymentConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigSpec.Merge(m, src) +} +func (m *DeploymentConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigSpec.DiscardUnknown(m) +} -func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } -func (*DeploymentDetails) ProtoMessage() {} -func (*DeploymentDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +var xxx_messageInfo_DeploymentConfigSpec proto.InternalMessageInfo -func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } -func (*DeploymentLog) ProtoMessage() {} -func (*DeploymentLog) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } +func (*DeploymentConfigStatus) ProtoMessage() {} +func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{9} +} +func (m *DeploymentConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigStatus.Merge(m, src) +} +func (m *DeploymentConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigStatus.DiscardUnknown(m) +} -func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } -func (*DeploymentLogOptions) ProtoMessage() {} -func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +var xxx_messageInfo_DeploymentConfigStatus proto.InternalMessageInfo -func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } -func (*DeploymentRequest) ProtoMessage() {} -func (*DeploymentRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } +func (*DeploymentDetails) ProtoMessage() {} +func (*DeploymentDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{10} +} +func (m *DeploymentDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentDetails.Merge(m, src) +} +func (m *DeploymentDetails) XXX_Size() int { + return m.Size() +} +func (m *DeploymentDetails) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentDetails.DiscardUnknown(m) +} -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +var xxx_messageInfo_DeploymentDetails proto.InternalMessageInfo + +func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } +func (*DeploymentLog) ProtoMessage() {} +func (*DeploymentLog) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{11} +} +func (m *DeploymentLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLog.Merge(m, src) +} +func (m *DeploymentLog) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLog) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLog.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLog proto.InternalMessageInfo + +func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } +func (*DeploymentLogOptions) ProtoMessage() {} +func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{12} +} +func (m *DeploymentLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLogOptions.Merge(m, src) +} +func (m *DeploymentLogOptions) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLogOptions proto.InternalMessageInfo + +func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } +func (*DeploymentRequest) ProtoMessage() {} +func (*DeploymentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{13} +} +func (m *DeploymentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRequest.Merge(m, src) +} +func (m *DeploymentRequest) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRequest proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{14} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} } func (*DeploymentTriggerImageChangeParams) ProtoMessage() {} func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} + return fileDescriptor_8f1b1bee37da74c1, []int{15} } +func (m *DeploymentTriggerImageChangeParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerImageChangeParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerImageChangeParams.Merge(m, src) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerImageChangeParams) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerImageChangeParams.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerImageChangeParams proto.InternalMessageInfo func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} } func (*DeploymentTriggerPolicies) ProtoMessage() {} func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{16} + return fileDescriptor_8f1b1bee37da74c1, []int{16} } +func (m *DeploymentTriggerPolicies) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicies) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicies.Merge(m, src) +} +func (m *DeploymentTriggerPolicies) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicies) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicies.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerPolicies proto.InternalMessageInfo func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} } func (*DeploymentTriggerPolicy) ProtoMessage() {} func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{17} + return fileDescriptor_8f1b1bee37da74c1, []int{17} +} +func (m *DeploymentTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicy.Merge(m, src) +} +func (m *DeploymentTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicy.DiscardUnknown(m) } -func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } -func (*ExecNewPodHook) ProtoMessage() {} -func (*ExecNewPodHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +var xxx_messageInfo_DeploymentTriggerPolicy proto.InternalMessageInfo -func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } -func (*LifecycleHook) ProtoMessage() {} -func (*LifecycleHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } +func (*ExecNewPodHook) ProtoMessage() {} +func (*ExecNewPodHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{18} +} +func (m *ExecNewPodHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecNewPodHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecNewPodHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecNewPodHook.Merge(m, src) +} +func (m *ExecNewPodHook) XXX_Size() int { + return m.Size() +} +func (m *ExecNewPodHook) XXX_DiscardUnknown() { + xxx_messageInfo_ExecNewPodHook.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecNewPodHook proto.InternalMessageInfo + +func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } +func (*LifecycleHook) ProtoMessage() {} +func (*LifecycleHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{19} +} +func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LifecycleHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleHook.Merge(m, src) +} +func (m *LifecycleHook) XXX_Size() int { + return m.Size() +} +func (m *LifecycleHook) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleHook.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} } func (*RecreateDeploymentStrategyParams) ProtoMessage() {} func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptor_8f1b1bee37da74c1, []int{20} } +func (m *RecreateDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecreateDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RecreateDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecreateDeploymentStrategyParams.Merge(m, src) +} +func (m *RecreateDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RecreateDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RecreateDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_RecreateDeploymentStrategyParams proto.InternalMessageInfo func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} } func (*RollingDeploymentStrategyParams) ProtoMessage() {} func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} + return fileDescriptor_8f1b1bee37da74c1, []int{21} +} +func (m *RollingDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingDeploymentStrategyParams.Merge(m, src) +} +func (m *RollingDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RollingDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RollingDeploymentStrategyParams.DiscardUnknown(m) } -func (m *TagImageHook) Reset() { *m = TagImageHook{} } -func (*TagImageHook) ProtoMessage() {} -func (*TagImageHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +var xxx_messageInfo_RollingDeploymentStrategyParams proto.InternalMessageInfo + +func (m *TagImageHook) Reset() { *m = TagImageHook{} } +func (*TagImageHook) ProtoMessage() {} +func (*TagImageHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{22} +} +func (m *TagImageHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImageHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImageHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImageHook.Merge(m, src) +} +func (m *TagImageHook) XXX_Size() int { + return m.Size() +} +func (m *TagImageHook) XXX_DiscardUnknown() { + xxx_messageInfo_TagImageHook.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImageHook proto.InternalMessageInfo func init() { proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams") @@ -180,14 +685,18 @@ func init() { proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig") proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList") proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec") proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec.SelectorEntry") proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus") proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails") proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog") proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions") proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest") proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.LabelsEntry") proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams") proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies") proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy") @@ -197,10 +706,177 @@ func init() { proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams") proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook") } + +func init() { + proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptor_8f1b1bee37da74c1) +} + +var fileDescriptor_8f1b1bee37da74c1 = []byte{ + // 2520 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6f, 0x5b, 0x59, + 0x15, 0xcf, 0x8b, 0xed, 0xc4, 0x3e, 0xf9, 0x6a, 0x6e, 0xfa, 0xe1, 0xc9, 0xa0, 0x38, 0xf2, 0x68, + 0x86, 0x00, 0x83, 0x3d, 0xcd, 0x94, 0xd1, 0xb4, 0xd5, 0x0c, 0xc4, 0x69, 0x3a, 0x93, 0xca, 0x69, + 0xc3, 0x4d, 0xda, 0xd2, 0x0a, 0x41, 0x6f, 0x9e, 0x6f, 0x9c, 0x3b, 0x79, 0xef, 0x5d, 0xf3, 0xde, + 0xb5, 0x5b, 0x23, 0x84, 0x66, 0x03, 0x12, 0xd2, 0x2c, 0x58, 0xc2, 0x06, 0xb1, 0x60, 0x0b, 0x62, + 0xc1, 0x1e, 0xb1, 0x40, 0xea, 0x02, 0xa4, 0x91, 0x90, 0x60, 0x84, 0x50, 0x34, 0x0d, 0x3b, 0xfe, + 0x84, 0xae, 0xd0, 0xfd, 0x78, 0x5f, 0xfe, 0x68, 0xe2, 0xb4, 0x3b, 0xbf, 0xf3, 0xf1, 0x3b, 0xe7, + 0x9e, 0x7b, 0xce, 0xb9, 0xe7, 0x5e, 0xc3, 0x3b, 0x4d, 0x26, 0x0e, 0xda, 0x7b, 0x15, 0x9b, 0xbb, + 0x55, 0xde, 0xa2, 0x5e, 0x70, 0xc0, 0xf6, 0x45, 0x95, 0xb4, 0x58, 0x95, 0xb4, 0x5a, 0x41, 0xb5, + 0x73, 0xb9, 0xda, 0xa4, 0x1e, 0xf5, 0x89, 0xa0, 0x8d, 0x4a, 0xcb, 0xe7, 0x82, 0xa3, 0xe5, 0x58, + 0xa3, 0x12, 0x69, 0x54, 0x48, 0x8b, 0x55, 0xa4, 0x46, 0xa5, 0x73, 0x79, 0xf1, 0x9b, 0x09, 0xcc, + 0x26, 0x6f, 0xf2, 0xaa, 0x52, 0xdc, 0x6b, 0xef, 0xab, 0x2f, 0xf5, 0xa1, 0x7e, 0x69, 0xc0, 0xc5, + 0xf2, 0xe1, 0xfb, 0x41, 0x85, 0x71, 0x65, 0xd4, 0xe6, 0x3e, 0x1d, 0x60, 0x74, 0xf1, 0x4a, 0x2c, + 0xe3, 0x12, 0xfb, 0x80, 0x79, 0xd4, 0xef, 0x56, 0x5b, 0x87, 0x4d, 0x49, 0x08, 0xaa, 0x2e, 0x15, + 0x64, 0x90, 0xd6, 0x7b, 0xc3, 0xb4, 0xfc, 0xb6, 0x27, 0x98, 0x4b, 0xab, 0x81, 0x7d, 0x40, 0x5d, + 0xd2, 0xa7, 0xf7, 0xee, 0x30, 0xbd, 0xb6, 0x60, 0x4e, 0x95, 0x79, 0x22, 0x10, 0x7e, 0xaf, 0x52, + 0xf9, 0xcf, 0x16, 0x2c, 0xad, 0xb7, 0x03, 0xc1, 0xdd, 0x1b, 0xb4, 0xe5, 0xf0, 0xae, 0x4b, 0x3d, + 0xb1, 0x23, 0xa4, 0x44, 0xb3, 0xbb, 0x4d, 0x7c, 0xe2, 0x06, 0xe8, 0x0d, 0xc8, 0x31, 0x97, 0x34, + 0x69, 0xd1, 0x5a, 0xb6, 0x56, 0x0a, 0xb5, 0x99, 0xa7, 0x47, 0xa5, 0xb1, 0xe3, 0xa3, 0x52, 0x6e, + 0x53, 0x12, 0xb1, 0xe6, 0xa1, 0xef, 0xc2, 0x14, 0xf5, 0x3a, 0xcc, 0xe7, 0x9e, 0x44, 0x28, 0x8e, + 0x2f, 0x67, 0x56, 0xa6, 0x56, 0x17, 0x2b, 0xda, 0x25, 0x15, 0x67, 0x19, 0xa4, 0x4a, 0xe7, 0x72, + 0x65, 0xc3, 0xeb, 0xdc, 0x23, 0x7e, 0x6d, 0xc1, 0xc0, 0x4c, 0x6d, 0xc4, 0x6a, 0x38, 0x89, 0x81, + 0xde, 0x84, 0x49, 0x9b, 0xbb, 0x2e, 0xf1, 0x1a, 0xc5, 0xcc, 0x72, 0x66, 0xa5, 0x50, 0x9b, 0x3a, + 0x3e, 0x2a, 0x4d, 0xae, 0x6b, 0x12, 0x0e, 0x79, 0xe5, 0xbf, 0x58, 0x30, 0x17, 0xfb, 0xbe, 0x4e, + 0xda, 0x01, 0x45, 0x57, 0x21, 0x2b, 0xba, 0xad, 0xd0, 0xe3, 0x37, 0x8d, 0xa9, 0xec, 0x6e, 0xb7, + 0x45, 0x9f, 0x1f, 0x95, 0x2e, 0xc4, 0xe2, 0xbb, 0x3e, 0x6b, 0x36, 0xa9, 0x2f, 0x19, 0x58, 0xa9, + 0xa0, 0x00, 0xa6, 0xd5, 0x8a, 0x0c, 0xa7, 0x38, 0xbe, 0x6c, 0xad, 0x4c, 0xad, 0x7e, 0x50, 0x39, + 0x29, 0x7f, 0x2a, 0x3d, 0x3e, 0x6c, 0x26, 0x40, 0x6a, 0xe7, 0x8e, 0x8f, 0x4a, 0xd3, 0x49, 0x0a, + 0x4e, 0x19, 0x29, 0x37, 0xe0, 0xf5, 0x17, 0xa8, 0xa3, 0x0d, 0xc8, 0xee, 0xfb, 0xdc, 0x55, 0xcb, + 0x99, 0x5a, 0x7d, 0x63, 0x50, 0x54, 0xef, 0xec, 0x7d, 0x42, 0x6d, 0x81, 0xe9, 0x3e, 0xf5, 0xa9, + 0x67, 0xd3, 0xda, 0x74, 0xb8, 0xe6, 0x9b, 0x3e, 0x77, 0xb1, 0x52, 0x2f, 0xff, 0x2b, 0x03, 0x0b, + 0x09, 0x33, 0xdc, 0x6b, 0x30, 0xc1, 0xb8, 0x87, 0xae, 0xa7, 0xa2, 0xf5, 0xd5, 0x9e, 0x68, 0x5d, + 0x1a, 0xa0, 0x92, 0x88, 0x57, 0x1d, 0x26, 0x02, 0x41, 0x44, 0x3b, 0x50, 0x91, 0x2a, 0xd4, 0xae, + 0x18, 0xf5, 0x89, 0x1d, 0x45, 0x7d, 0x7e, 0x54, 0x1a, 0x50, 0x29, 0x95, 0x08, 0x49, 0x4b, 0x61, + 0x83, 0x81, 0x3e, 0x81, 0x59, 0x87, 0x04, 0xe2, 0x6e, 0xab, 0x41, 0x04, 0xdd, 0x65, 0x2e, 0x2d, + 0x4e, 0xa8, 0x35, 0x7f, 0x3d, 0xb1, 0xe6, 0x28, 0xb9, 0x2b, 0xad, 0xc3, 0xa6, 0x24, 0x04, 0x15, + 0x59, 0x4a, 0x32, 0x0a, 0x52, 0xa3, 0x76, 0xd1, 0x78, 0x30, 0x5b, 0x4f, 0x21, 0xe1, 0x1e, 0x64, + 0xd4, 0x01, 0x24, 0x29, 0xbb, 0x3e, 0xf1, 0x02, 0xbd, 0x2a, 0x69, 0x2f, 0x33, 0xb2, 0xbd, 0x45, + 0x63, 0x0f, 0xd5, 0xfb, 0xd0, 0xf0, 0x00, 0x0b, 0xe8, 0x2d, 0x98, 0xf0, 0x29, 0x09, 0xb8, 0x57, + 0xcc, 0xaa, 0x88, 0xcd, 0x86, 0x11, 0xc3, 0x8a, 0x8a, 0x0d, 0x17, 0x7d, 0x0d, 0x26, 0x5d, 0x1a, + 0x04, 0xb2, 0xf2, 0x72, 0x4a, 0x70, 0xce, 0x08, 0x4e, 0x6e, 0x69, 0x32, 0x0e, 0xf9, 0xe5, 0x3f, + 0x8e, 0xc3, 0xb9, 0xd4, 0x36, 0xed, 0xb3, 0x26, 0x7a, 0x04, 0x79, 0xe9, 0x67, 0x83, 0x08, 0x62, + 0x32, 0xe7, 0x9d, 0xd3, 0xad, 0x4a, 0xe7, 0xd2, 0x16, 0x15, 0xa4, 0x86, 0x8c, 0x49, 0x88, 0x69, + 0x38, 0x42, 0x45, 0xdf, 0x83, 0x6c, 0xd0, 0xa2, 0xb6, 0xa9, 0x91, 0xf7, 0x46, 0xaa, 0x11, 0xe5, + 0xe3, 0x4e, 0x8b, 0xda, 0x71, 0xaa, 0xca, 0x2f, 0xac, 0x10, 0xd1, 0xa3, 0x28, 0xab, 0xf4, 0x7e, + 0xbc, 0x7f, 0x06, 0x6c, 0xa5, 0x1f, 0x47, 0x37, 0x9d, 0x69, 0xe5, 0xbf, 0x5b, 0x70, 0xbe, 0x57, + 0xa5, 0xce, 0x02, 0x81, 0xbe, 0xdf, 0x17, 0xb6, 0xca, 0xe9, 0xc2, 0x26, 0xb5, 0x55, 0xd0, 0xce, + 0x19, 0x93, 0xf9, 0x90, 0x92, 0x08, 0xd9, 0x7d, 0xc8, 0x31, 0x41, 0xdd, 0xc0, 0x74, 0xc8, 0xd5, + 0xd1, 0xd7, 0x95, 0x68, 0xc0, 0x12, 0x08, 0x6b, 0xbc, 0xf2, 0xcf, 0x33, 0x50, 0xec, 0x15, 0xc5, + 0xdc, 0x71, 0xf6, 0x88, 0x7d, 0x88, 0x96, 0x21, 0xeb, 0x11, 0x37, 0xac, 0xf0, 0x28, 0xe0, 0xb7, + 0x89, 0x4b, 0xb1, 0xe2, 0xa0, 0xdf, 0x58, 0x80, 0xda, 0xaa, 0x36, 0x1a, 0x6b, 0x9e, 0xc7, 0x05, + 0x91, 0xe9, 0x1a, 0x7a, 0x89, 0x47, 0xf7, 0x32, 0x34, 0x5d, 0xb9, 0xdb, 0x07, 0xba, 0xe1, 0x09, + 0xbf, 0x1b, 0x57, 0x4d, 0xbf, 0x00, 0x1e, 0xe0, 0x09, 0x7a, 0x64, 0x72, 0x4d, 0xe7, 0xc3, 0x87, + 0x67, 0xf7, 0x68, 0x58, 0xce, 0x2d, 0x6e, 0xc0, 0xa5, 0x21, 0xce, 0xa2, 0x73, 0x90, 0x39, 0xa4, + 0x5d, 0x1d, 0x3e, 0x2c, 0x7f, 0xa2, 0xf3, 0x90, 0xeb, 0x10, 0xa7, 0x4d, 0x75, 0xd7, 0xc3, 0xfa, + 0xe3, 0xda, 0xf8, 0xfb, 0x56, 0xf9, 0x4f, 0x19, 0xf8, 0xca, 0x8b, 0x6c, 0xbf, 0xa2, 0x6e, 0x8e, + 0xde, 0x86, 0xbc, 0x4f, 0x3b, 0x2c, 0x60, 0xdc, 0x53, 0x4e, 0x64, 0xe2, 0xbc, 0xc3, 0x86, 0x8e, + 0x23, 0x09, 0xb4, 0x06, 0x73, 0xcc, 0xb3, 0x9d, 0x76, 0x23, 0x3c, 0x54, 0x74, 0x65, 0xe5, 0x6b, + 0x97, 0x8c, 0xd2, 0xdc, 0x66, 0x9a, 0x8d, 0x7b, 0xe5, 0x93, 0x10, 0xd4, 0x6d, 0x39, 0x44, 0x50, + 0xd5, 0xc0, 0x06, 0x40, 0x18, 0x36, 0xee, 0x95, 0x47, 0xf7, 0xe0, 0xa2, 0x21, 0x61, 0xda, 0x72, + 0x98, 0xad, 0x62, 0x2c, 0x2b, 0x44, 0x75, 0xb8, 0x7c, 0x6d, 0xc9, 0x20, 0x5d, 0xdc, 0x1c, 0x28, + 0x85, 0x87, 0x68, 0x27, 0x5c, 0x0b, 0x67, 0x17, 0x75, 0x6e, 0xf4, 0xbb, 0x16, 0xb2, 0x71, 0xaf, + 0x7c, 0xf9, 0x7f, 0xb9, 0xfe, 0x7e, 0xa0, 0xb6, 0x6b, 0x0f, 0xf2, 0x41, 0x08, 0xaa, 0xb7, 0xec, + 0xca, 0x28, 0xc9, 0x17, 0x1a, 0x88, 0x77, 0x27, 0xf2, 0x21, 0xc2, 0x95, 0xfe, 0xbb, 0xcc, 0xc3, + 0x94, 0x34, 0xba, 0x3b, 0xd4, 0xe6, 0x5e, 0x23, 0x28, 0x16, 0x96, 0xad, 0x95, 0x5c, 0xec, 0xff, + 0x56, 0x9a, 0x8d, 0x7b, 0xe5, 0x11, 0x85, 0xbc, 0x08, 0x77, 0x56, 0xf7, 0xe3, 0xeb, 0xa3, 0xb8, + 0x69, 0x76, 0x79, 0x9b, 0x3b, 0xcc, 0x66, 0x34, 0xa8, 0x4d, 0x4b, 0x4f, 0xa3, 0x5c, 0x88, 0xa0, + 0x75, 0xd6, 0xa9, 0xe0, 0xeb, 0x04, 0xca, 0x25, 0xb3, 0x4e, 0xd3, 0x71, 0x24, 0x81, 0xea, 0x70, + 0x3e, 0xcc, 0xc0, 0x8f, 0x59, 0x20, 0xb8, 0xdf, 0xad, 0x33, 0x97, 0x09, 0x95, 0x37, 0xb9, 0x5a, + 0xf1, 0xf8, 0xa8, 0x74, 0x1e, 0x0f, 0xe0, 0xe3, 0x81, 0x5a, 0xb2, 0x8b, 0x09, 0x1a, 0x08, 0x93, + 0x2b, 0x51, 0x4d, 0xec, 0xd2, 0x40, 0x60, 0xc5, 0x91, 0x47, 0x6b, 0x4b, 0x4e, 0x4f, 0x0d, 0xb3, + 0xfd, 0x51, 0xf3, 0xdf, 0x56, 0x54, 0x6c, 0xb8, 0xc8, 0x87, 0x7c, 0x40, 0x1d, 0x6a, 0x0b, 0xee, + 0x17, 0x27, 0x55, 0x8b, 0xbb, 0x71, 0xb6, 0xc3, 0xab, 0xb2, 0x63, 0x60, 0x74, 0x53, 0x8b, 0xf7, + 0xd8, 0x90, 0x71, 0x64, 0x07, 0x6d, 0x41, 0x5e, 0x84, 0x75, 0x93, 0x1f, 0x5e, 0xfa, 0xdb, 0xbc, + 0x11, 0x96, 0x8b, 0xee, 0x54, 0x6a, 0x23, 0xc2, 0x8a, 0x8a, 0x20, 0x16, 0xaf, 0xc3, 0x4c, 0xca, + 0xf6, 0x48, 0x3d, 0xea, 0x0f, 0x39, 0xb8, 0x38, 0xf8, 0xbc, 0x44, 0xd7, 0x61, 0x46, 0xe2, 0x07, + 0xe2, 0x1e, 0xf5, 0x55, 0x6f, 0xb1, 0x54, 0x6f, 0xb9, 0x60, 0x56, 0x36, 0x53, 0x4f, 0x32, 0x71, + 0x5a, 0x16, 0xdd, 0x02, 0xc4, 0xf7, 0x02, 0xea, 0x77, 0x68, 0xe3, 0x23, 0x7d, 0xd1, 0x88, 0xbb, + 0x53, 0xd4, 0xf0, 0xef, 0xf4, 0x49, 0xe0, 0x01, 0x5a, 0x23, 0x66, 0xda, 0x1a, 0xcc, 0x99, 0x43, + 0x23, 0x64, 0x9a, 0x24, 0x8b, 0x2a, 0xe8, 0x6e, 0x9a, 0x8d, 0x7b, 0xe5, 0xd1, 0x47, 0x30, 0x4f, + 0x3a, 0x84, 0x39, 0x64, 0xcf, 0xa1, 0x11, 0x48, 0x4e, 0x81, 0xbc, 0x66, 0x40, 0xe6, 0xd7, 0x7a, + 0x05, 0x70, 0xbf, 0x0e, 0xda, 0x82, 0x85, 0xb6, 0xd7, 0x0f, 0x35, 0xa1, 0xa0, 0x5e, 0x37, 0x50, + 0x0b, 0x77, 0xfb, 0x45, 0xf0, 0x20, 0x3d, 0xf4, 0x10, 0x26, 0x1b, 0x54, 0x10, 0xe6, 0x04, 0xc5, + 0x49, 0x95, 0x37, 0xef, 0x8e, 0x92, 0xab, 0x37, 0xb4, 0xaa, 0xbe, 0x3c, 0x99, 0x0f, 0x1c, 0x02, + 0x22, 0x06, 0x60, 0x87, 0xa3, 0x78, 0x50, 0xcc, 0xab, 0x52, 0xf8, 0xd6, 0x88, 0xa5, 0xa0, 0xb5, + 0xe3, 0x51, 0x31, 0x22, 0x05, 0x38, 0x01, 0x2e, 0x13, 0xcb, 0x97, 0x0d, 0x2b, 0x8a, 0x87, 0xee, + 0x70, 0x51, 0x62, 0xe1, 0x24, 0x13, 0xa7, 0x65, 0xcb, 0xbf, 0xb6, 0x60, 0xbe, 0x6f, 0x4d, 0xc9, + 0x09, 0xd9, 0x7a, 0xf1, 0x84, 0x8c, 0x1e, 0xc0, 0x84, 0x2d, 0x6b, 0x3f, 0x1c, 0x69, 0x2e, 0x8f, + 0x7c, 0xa1, 0x8b, 0x9b, 0x89, 0xfa, 0x0c, 0xb0, 0x01, 0x2c, 0xcf, 0xc1, 0x4c, 0x2c, 0x5a, 0xe7, + 0xcd, 0xf2, 0x67, 0xd9, 0xe4, 0x51, 0x52, 0xe7, 0xcd, 0x3b, 0x2d, 0x1d, 0x82, 0x2a, 0x14, 0x6c, + 0xee, 0x09, 0x22, 0x07, 0x48, 0xe3, 0xf1, 0xbc, 0x01, 0x2d, 0xac, 0x87, 0x0c, 0x1c, 0xcb, 0xc8, + 0x7e, 0xb6, 0xcf, 0x1d, 0x87, 0x3f, 0x56, 0x35, 0x94, 0xe8, 0x67, 0x37, 0x15, 0x15, 0x1b, 0xae, + 0xac, 0x95, 0x96, 0x6c, 0x99, 0xbc, 0x1d, 0x1e, 0xeb, 0x51, 0xad, 0x6c, 0x1b, 0x3a, 0x8e, 0x24, + 0xd0, 0x15, 0x98, 0x0e, 0x98, 0x67, 0xd3, 0xf0, 0xa8, 0xc9, 0xea, 0xe9, 0x41, 0xde, 0x51, 0x77, + 0x12, 0x74, 0x9c, 0x92, 0x42, 0xf7, 0xa1, 0xa0, 0xbe, 0xd5, 0x2d, 0x29, 0x37, 0xf2, 0x2d, 0x69, + 0x46, 0x2e, 0x72, 0x27, 0x04, 0xc0, 0x31, 0x16, 0x5a, 0x05, 0x10, 0xcc, 0xa5, 0x81, 0x20, 0x6e, + 0x2b, 0x30, 0x8d, 0x3b, 0x4a, 0xa6, 0xdd, 0x88, 0x83, 0x13, 0x52, 0xe8, 0x1b, 0x50, 0x90, 0x29, + 0x50, 0x67, 0x1e, 0xd5, 0x55, 0x91, 0xd1, 0x06, 0x76, 0x43, 0x22, 0x8e, 0xf9, 0xa8, 0x02, 0xe0, + 0xc8, 0x03, 0xa4, 0xd6, 0x15, 0x34, 0x50, 0xbd, 0x37, 0x53, 0x9b, 0x95, 0xe0, 0xf5, 0x88, 0x8a, + 0x13, 0x12, 0x32, 0xea, 0x1e, 0x7f, 0x4c, 0x98, 0x50, 0x29, 0x9a, 0x88, 0xfa, 0x6d, 0x7e, 0x9f, + 0x30, 0x81, 0x0d, 0x17, 0xbd, 0x09, 0x93, 0x1d, 0xd3, 0x24, 0x41, 0x81, 0xaa, 0x1a, 0x0b, 0x5b, + 0x63, 0xc8, 0x2b, 0xff, 0x3b, 0x95, 0xbb, 0x98, 0xfe, 0xa8, 0x2d, 0x8f, 0xaa, 0x93, 0x47, 0xf2, + 0xb7, 0x60, 0x42, 0x77, 0xd7, 0xde, 0xcd, 0xd7, 0x2d, 0x18, 0x1b, 0x2e, 0x7a, 0x03, 0x72, 0xfb, + 0xdc, 0xb7, 0xa9, 0xd9, 0xf9, 0xe8, 0x7a, 0x70, 0x53, 0x12, 0xb1, 0xe6, 0xa1, 0x7b, 0x30, 0x47, + 0x9f, 0xa4, 0xe7, 0xbf, 0xac, 0x7a, 0x54, 0x79, 0x5b, 0xf6, 0xc6, 0x8d, 0x34, 0x6b, 0xf8, 0x1b, + 0x49, 0x2f, 0x48, 0xf9, 0x1f, 0x93, 0x80, 0xfa, 0x87, 0x1d, 0x74, 0x2d, 0xf5, 0xa4, 0xf0, 0x56, + 0xcf, 0x93, 0xc2, 0xc5, 0x7e, 0x8d, 0xc4, 0x8b, 0x42, 0x07, 0xa6, 0x6d, 0xf5, 0x22, 0xa5, 0xdf, + 0x9f, 0xcc, 0x34, 0xf3, 0x9d, 0x93, 0x0b, 0xf6, 0xc5, 0xef, 0x58, 0x3a, 0xc1, 0xd7, 0x13, 0xc8, + 0x38, 0x65, 0x07, 0xfd, 0x14, 0x66, 0x7d, 0x6a, 0xfb, 0x94, 0x08, 0x6a, 0x2c, 0xeb, 0xbb, 0x46, + 0xed, 0x64, 0xcb, 0xd8, 0xe8, 0x0d, 0xb5, 0x8d, 0x8e, 0x8f, 0x4a, 0xb3, 0x38, 0x85, 0x8e, 0x7b, + 0xac, 0xa1, 0x1f, 0xc3, 0x8c, 0xcf, 0x1d, 0x87, 0x79, 0x4d, 0x63, 0x3e, 0xab, 0xcc, 0xaf, 0x9d, + 0xc2, 0xbc, 0x56, 0x1b, 0x6a, 0x7d, 0x5e, 0xf5, 0xd7, 0x24, 0x36, 0x4e, 0x9b, 0x42, 0x0f, 0xa0, + 0xe0, 0xd3, 0x80, 0xb7, 0x7d, 0x9b, 0x06, 0xa6, 0xb8, 0x57, 0x06, 0x4d, 0x27, 0xd8, 0x08, 0xc9, + 0x2c, 0x66, 0x3e, 0x95, 0xb6, 0x82, 0xb8, 0x87, 0x85, 0xdc, 0x00, 0xc7, 0x68, 0xe8, 0x40, 0xa6, + 0xf1, 0x1e, 0x75, 0x64, 0x69, 0x67, 0x4e, 0xb7, 0x91, 0xfd, 0x0b, 0xa9, 0xd4, 0x15, 0x84, 0x9e, + 0xb2, 0x12, 0x85, 0x20, 0x89, 0xd8, 0xe0, 0xa3, 0x9f, 0xc0, 0x14, 0x49, 0xdc, 0x5d, 0xf5, 0x60, + 0xb7, 0x71, 0x26, 0x73, 0x7d, 0xd7, 0xd5, 0xe8, 0xb9, 0x32, 0x79, 0x4f, 0x4d, 0x9a, 0x43, 0x77, + 0xe0, 0x02, 0xb1, 0x05, 0xeb, 0xd0, 0x1b, 0x94, 0x34, 0x1c, 0xe6, 0x45, 0xed, 0x55, 0x37, 0x9c, + 0xd7, 0x8e, 0x8f, 0x4a, 0x17, 0xd6, 0x06, 0x09, 0xe0, 0xc1, 0x7a, 0x8b, 0x57, 0x61, 0x2a, 0xb1, + 0xea, 0x51, 0xe6, 0xbb, 0xc5, 0x0f, 0xe1, 0xdc, 0x4b, 0xdd, 0x61, 0x7f, 0x37, 0x0e, 0xe5, 0xbe, + 0x06, 0xa0, 0x9e, 0x24, 0xd7, 0x0f, 0x88, 0xd7, 0x0c, 0x33, 0xb6, 0x0a, 0x05, 0xd2, 0x16, 0xdc, + 0x25, 0x82, 0xd9, 0x0a, 0x38, 0x1f, 0xe7, 0xc2, 0x5a, 0xc8, 0xc0, 0xb1, 0x0c, 0xba, 0x06, 0xb3, + 0xd1, 0xe1, 0x26, 0x3b, 0x9d, 0x3e, 0x8d, 0x0b, 0xba, 0x3c, 0xd6, 0x53, 0x1c, 0xdc, 0x23, 0x19, + 0x5d, 0x9b, 0x33, 0x2f, 0x77, 0x6d, 0xbe, 0x15, 0xbe, 0xfa, 0xa9, 0x35, 0xd1, 0x86, 0x5a, 0x95, + 0x79, 0x89, 0xeb, 0x79, 0xc9, 0x4b, 0x4a, 0xe0, 0x01, 0x5a, 0xe5, 0x9f, 0x59, 0xf0, 0xda, 0xd0, + 0x2b, 0x14, 0xfa, 0x41, 0xf8, 0xd4, 0x63, 0xa9, 0x44, 0xbc, 0x7a, 0xd6, 0xeb, 0x58, 0x77, 0xf0, + 0x8b, 0xcf, 0xb5, 0xfc, 0xaf, 0x7e, 0x5b, 0x1a, 0xfb, 0xf4, 0x3f, 0xcb, 0x63, 0xe5, 0x2f, 0x2d, + 0xb8, 0x34, 0x44, 0xf7, 0x65, 0x9e, 0xc2, 0x7f, 0x61, 0xc1, 0x3c, 0xeb, 0xdd, 0x74, 0xd3, 0x8e, + 0x6f, 0x9c, 0x61, 0x35, 0x7d, 0x09, 0x54, 0xbb, 0x20, 0x67, 0xea, 0x3e, 0x32, 0xee, 0xb7, 0x5a, + 0xfe, 0xa7, 0x05, 0xb3, 0x1b, 0x4f, 0xa8, 0x7d, 0x9b, 0x3e, 0xde, 0xe6, 0x8d, 0x8f, 0x39, 0x3f, + 0x4c, 0xfe, 0x3f, 0x60, 0x0d, 0xff, 0x7f, 0x00, 0x5d, 0x85, 0x0c, 0xf5, 0x3a, 0xa7, 0xf8, 0x47, + 0x62, 0xca, 0xc4, 0x26, 0xb3, 0xe1, 0x75, 0xb0, 0xd4, 0x91, 0x23, 0x6b, 0x2a, 0x09, 0x55, 0xee, + 0x15, 0xe2, 0x91, 0x35, 0x95, 0xb1, 0x38, 0x2d, 0xab, 0xa6, 0x03, 0xee, 0xb4, 0x65, 0x92, 0x67, + 0x63, 0xf7, 0xee, 0x69, 0x12, 0x0e, 0x79, 0xe5, 0xdf, 0x8f, 0xc3, 0x4c, 0x9d, 0xed, 0x53, 0xbb, + 0x6b, 0x3b, 0x54, 0xad, 0xeb, 0x01, 0xcc, 0xec, 0x13, 0xe6, 0xb4, 0x7d, 0xaa, 0xb7, 0xd0, 0x6c, + 0xdd, 0xbb, 0xa1, 0xd5, 0x9b, 0x49, 0xe6, 0xf3, 0xa3, 0xd2, 0x62, 0x4a, 0x3d, 0xc5, 0xc5, 0x69, + 0x24, 0xf4, 0x08, 0x80, 0x46, 0x41, 0x34, 0x3b, 0xf9, 0xce, 0xc9, 0x3b, 0x99, 0x0e, 0xbc, 0x9e, + 0x9d, 0x62, 0x1a, 0x4e, 0x60, 0xa2, 0x1f, 0xca, 0xc1, 0xac, 0xa9, 0xb6, 0x34, 0x50, 0x7f, 0xdb, + 0x4c, 0xad, 0x56, 0x4e, 0x36, 0xb0, 0x6b, 0x54, 0x14, 0x7c, 0xd4, 0x42, 0x42, 0xaa, 0x1a, 0xe6, + 0xcc, 0xcf, 0xf2, 0x5f, 0xc7, 0x61, 0xf9, 0xa4, 0xe3, 0x56, 0xf6, 0x19, 0x39, 0x2c, 0xf2, 0xb6, + 0x08, 0x9b, 0xb0, 0xbe, 0xc5, 0xaa, 0x3e, 0xb3, 0x9b, 0xe2, 0xe0, 0x1e, 0x49, 0x74, 0x0b, 0x32, + 0x2d, 0x9f, 0x9a, 0xe0, 0x54, 0x4f, 0xf6, 0x3d, 0x15, 0xfd, 0xda, 0xa4, 0x4c, 0xa0, 0x6d, 0x9f, + 0x62, 0x09, 0x22, 0xb1, 0x5c, 0xd6, 0x30, 0x2d, 0xeb, 0x6c, 0x58, 0x5b, 0xac, 0x81, 0x25, 0x08, + 0xda, 0x82, 0x6c, 0x8b, 0x07, 0xc2, 0x4c, 0x05, 0x23, 0x83, 0xe5, 0x65, 0xd5, 0x6f, 0xf3, 0x40, + 0x60, 0x05, 0x53, 0xfe, 0x5b, 0x16, 0x4a, 0x27, 0xcc, 0x0d, 0x68, 0x13, 0x16, 0xf4, 0x25, 0x79, + 0x9b, 0xfa, 0x8c, 0x37, 0xd2, 0xb1, 0xbc, 0xa4, 0x2e, 0xb1, 0xfd, 0x6c, 0x3c, 0x48, 0x07, 0x7d, + 0x00, 0x73, 0xcc, 0x13, 0xd4, 0xef, 0x10, 0x27, 0x84, 0xd1, 0xcf, 0x02, 0x0b, 0xfa, 0x75, 0x2e, + 0xc5, 0xc2, 0xbd, 0xb2, 0x03, 0x36, 0x34, 0x73, 0xea, 0x0d, 0x75, 0x60, 0xd6, 0x25, 0x4f, 0x12, + 0xd7, 0x6d, 0x13, 0xc2, 0xe1, 0xff, 0x86, 0xb4, 0x05, 0x73, 0x2a, 0xfa, 0x0f, 0xd3, 0xca, 0xa6, + 0x27, 0xee, 0xf8, 0x3b, 0xc2, 0x67, 0x5e, 0x53, 0x5b, 0xdb, 0x4a, 0x61, 0xe1, 0x1e, 0x6c, 0xf4, + 0x10, 0xf2, 0x2e, 0x79, 0xb2, 0xd3, 0xf6, 0x9b, 0xe1, 0x2d, 0x69, 0x74, 0x3b, 0xea, 0xcd, 0x67, + 0xcb, 0xa0, 0xe0, 0x08, 0x2f, 0x4c, 0xcd, 0xc9, 0x57, 0x91, 0x9a, 0x61, 0x3a, 0xe5, 0x5f, 0x4d, + 0x3a, 0x7d, 0x66, 0xc1, 0x74, 0xb2, 0x8a, 0xfb, 0x7b, 0xa7, 0x35, 0x42, 0xef, 0xfc, 0x36, 0x8c, + 0x0b, 0x6e, 0x4a, 0xf0, 0x54, 0x27, 0x3d, 0x18, 0xd8, 0xf1, 0x5d, 0x8e, 0xc7, 0x05, 0xaf, 0xad, + 0x3c, 0x7d, 0xb6, 0x34, 0xf6, 0xf9, 0xb3, 0xa5, 0xb1, 0x2f, 0x9e, 0x2d, 0x8d, 0x7d, 0x7a, 0xbc, + 0x64, 0x3d, 0x3d, 0x5e, 0xb2, 0x3e, 0x3f, 0x5e, 0xb2, 0xbe, 0x38, 0x5e, 0xb2, 0xbe, 0x3c, 0x5e, + 0xb2, 0x7e, 0xf9, 0xdf, 0xa5, 0xb1, 0x87, 0xe3, 0x9d, 0xcb, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, + 0x19, 0x34, 0x0f, 0xd6, 0x4b, 0x20, 0x00, 0x00, +} + func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -208,48 +884,50 @@ func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { } func (m *CustomDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - if len(m.Environment) > 0 { - for _, msg := range m.Environment { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } if len(m.Command) > 0 { - for _, s := range m.Command { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + if len(m.Environment) > 0 { + for iNdEx := len(m.Environment) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Environment[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -257,31 +935,39 @@ func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCause) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.ImageTrigger != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ImageTrigger.Size())) - n1, err := m.ImageTrigger.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ImageTrigger.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -289,25 +975,32 @@ func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCauseImageTrigger) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCauseImageTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n2, err := m.From.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n2 - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -315,49 +1008,62 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n3, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) + i-- dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n4 - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -365,41 +1071,52 @@ func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfig) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n7 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -407,37 +1124,46 @@ func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -445,51 +1171,61 @@ func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigRollback) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { - dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n9, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -497,60 +1233,67 @@ func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigRollbackSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollbackSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n10, err := m.From.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - dAtA[i] = 0x18 - i++ - if m.IncludeTriggers { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x20 - i++ - if m.IncludeTemplate { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x28 - i++ - if m.IncludeReplicationMeta { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x30 - i++ + i-- if m.IncludeStrategy { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - return i, nil + i-- + dAtA[i] = 0x30 + i-- + if m.IncludeReplicationMeta { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.IncludeTemplate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.IncludeTriggers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x10 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -558,94 +1301,107 @@ func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n11, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - if m.Triggers != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Triggers.Size())) - n12, err := m.Triggers.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x48 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n12 + i-- + dAtA[i] = 0x42 } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - dAtA[i] = 0x28 - i++ - if m.Test { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x30 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x3a - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a } } - if m.Template != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n13, err := m.Template.MarshalTo(dAtA[i:]) + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- + if m.Test { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + if m.Triggers != nil { + { + size, err := m.Triggers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n13 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -653,60 +1409,69 @@ func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentConfigStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - if m.Details != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n14, err := m.Details.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x48 if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - return i, nil + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -714,33 +1479,41 @@ func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { } func (m *DeploymentDetails) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -748,17 +1521,22 @@ func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { } func (m *DeploymentLog) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -766,83 +1544,91 @@ func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { } func (m *DeploymentLogOptions) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - dAtA[i] = 0x10 - i++ - if m.Follow { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 } - i++ - dAtA[i] = 0x18 - i++ - if m.Previous { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SinceSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n15, err := m.SinceTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - dAtA[i] = 0x30 - i++ - if m.Timestamps { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.TailLines != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) - } - dAtA[i] = 0x48 - i++ + i-- if m.NoWait { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if m.Version != nil { - dAtA[i] = 0x50 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 } - return i, nil + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -850,52 +1636,52 @@ func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.Latest { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ExcludeTriggers) > 0 { + for iNdEx := len(m.ExcludeTriggers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTriggers[iNdEx]) + copy(dAtA[i:], m.ExcludeTriggers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExcludeTriggers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - i++ - dAtA[i] = 0x18 - i++ + i-- if m.Force { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if len(m.ExcludeTriggers) > 0 { - for _, s := range m.ExcludeTriggers { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } + i-- + dAtA[i] = 0x18 + i-- + if m.Latest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return i, nil + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -903,73 +1689,19 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.CustomParams != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CustomParams.Size())) - n16, err := m.CustomParams.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.RecreateParams != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RecreateParams.Size())) - n17, err := m.RecreateParams.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.RollingParams != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingParams.Size())) - n18, err := m.RollingParams.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n19, err := m.Resources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for _, k := range keysForLabels { - dAtA[i] = 0x32 - i++ - v := m.Labels[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x40 } if len(m.Annotations) > 0 { keysForAnnotations := make([]string, 0, len(m.Annotations)) @@ -977,34 +1709,106 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { keysForAnnotations = append(keysForAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { - dAtA[i] = 0x3a - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a } } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } } - return i, nil + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.RollingParams != nil { + { + size, err := m.RollingParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RecreateParams != nil { + { + size, err := m.RecreateParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CustomParams != nil { + { + size, err := m.CustomParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1012,52 +1816,54 @@ func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) } func (m *DeploymentTriggerImageChangeParams) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerImageChangeParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0x8 - i++ + i -= len(m.LastTriggeredImage) + copy(dAtA[i:], m.LastTriggeredImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) + i-- + dAtA[i] = 0x22 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ContainerNames) > 0 { + for iNdEx := len(m.ContainerNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ContainerNames[iNdEx]) + copy(dAtA[i:], m.ContainerNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- if m.Automatic { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - if len(m.ContainerNames) > 0 { - for _, s := range m.ContainerNames { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n20, err := m.From.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) - i += copy(dAtA[i:], m.LastTriggeredImage) - return i, nil + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1065,29 +1871,36 @@ func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { } func (m DeploymentTriggerPolicies) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m DeploymentTriggerPolicies) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m) > 0 { - for _, msg := range m { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0xa } } - return i, nil + return len(dAtA) - i, nil } func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1095,31 +1908,39 @@ func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) if m.ImageChangeParams != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ImageChangeParams.Size())) - n21, err := m.ImageChangeParams.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.ImageChangeParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n21 + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1127,63 +1948,59 @@ func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { } func (m *ExecNewPodHook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecNewPodHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i += copy(dAtA[i:], m.ContainerName) if len(m.Volumes) > 0 { - for _, s := range m.Volumes { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x1a + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1191,43 +2008,53 @@ func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { } func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) - i += copy(dAtA[i:], m.FailurePolicy) - if m.ExecNewPod != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExecNewPod.Size())) - n22, err := m.ExecNewPod.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } if len(m.TagImages) > 0 { - for _, msg := range m.TagImages { + for iNdEx := len(m.TagImages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TagImages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) + } + } + if m.ExecNewPod != nil { + { + size, err := m.ExecNewPod.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } - i += n + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return i, nil + i -= len(m.FailurePolicy) + copy(dAtA[i:], m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1235,52 +2062,63 @@ func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { } func (m *RecreateDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RecreateDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.TimeoutSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - if m.Pre != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pre.Size())) - n23, err := m.Pre.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n23 + i-- + dAtA[i] = 0x22 } if m.Mid != nil { + { + size, err := m.Mid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Mid.Size())) - n24, err := m.Mid.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 } - if m.Post != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Post.Size())) - n25, err := m.Post.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n25 + i-- + dAtA[i] = 0x12 } - return i, nil + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1288,72 +2126,85 @@ func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { } func (m *RollingDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.UpdatePeriodSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) - } - if m.IntervalSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) - } - if m.TimeoutSeconds != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - if m.MaxUnavailable != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n26, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n26 - } - if m.MaxSurge != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n27, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 + i-- + dAtA[i] = 0x42 } if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pre.Size())) - n28, err := m.Pre.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 } - if m.Post != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Post.Size())) - n29, err := m.Post.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n29 + i-- + dAtA[i] = 0x2a } - return i, nil + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.IntervalSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.UpdatePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } func (m *TagImageHook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -1361,53 +2212,48 @@ func (m *TagImageHook) Marshal() (dAtA []byte, err error) { } func (m *TagImageHook) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImageHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i += copy(dAtA[i:], m.ContainerName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.To.Size())) - n30, err := m.To.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n30 - return i, nil + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *CustomDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Image) @@ -1428,6 +2274,9 @@ func (m *CustomDeploymentStrategyParams) Size() (n int) { } func (m *DeploymentCause) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1440,6 +2289,9 @@ func (m *DeploymentCause) Size() (n int) { } func (m *DeploymentCauseImageTrigger) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.From.Size() @@ -1448,6 +2300,9 @@ func (m *DeploymentCauseImageTrigger) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1466,6 +2321,9 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentConfig) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -1478,6 +2336,9 @@ func (m *DeploymentConfig) Size() (n int) { } func (m *DeploymentConfigList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -1492,6 +2353,9 @@ func (m *DeploymentConfigList) Size() (n int) { } func (m *DeploymentConfigRollback) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1510,6 +2374,9 @@ func (m *DeploymentConfigRollback) Size() (n int) { } func (m *DeploymentConfigRollbackSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.From.Size() @@ -1523,6 +2390,9 @@ func (m *DeploymentConfigRollbackSpec) Size() (n int) { } func (m *DeploymentConfigSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.Strategy.Size() @@ -1554,6 +2424,9 @@ func (m *DeploymentConfigSpec) Size() (n int) { } func (m *DeploymentConfigStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 1 + sovGenerated(uint64(m.LatestVersion)) @@ -1577,6 +2450,9 @@ func (m *DeploymentConfigStatus) Size() (n int) { } func (m *DeploymentDetails) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Message) @@ -1591,12 +2467,18 @@ func (m *DeploymentDetails) Size() (n int) { } func (m *DeploymentLog) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l return n } func (m *DeploymentLogOptions) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Container) @@ -1625,6 +2507,9 @@ func (m *DeploymentLogOptions) Size() (n int) { } func (m *DeploymentRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -1641,6 +2526,9 @@ func (m *DeploymentRequest) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1682,6 +2570,9 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *DeploymentTriggerImageChangeParams) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l n += 2 @@ -1699,6 +2590,9 @@ func (m *DeploymentTriggerImageChangeParams) Size() (n int) { } func (m DeploymentTriggerPolicies) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m) > 0 { @@ -1711,6 +2605,9 @@ func (m DeploymentTriggerPolicies) Size() (n int) { } func (m *DeploymentTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Type) @@ -1723,6 +2620,9 @@ func (m *DeploymentTriggerPolicy) Size() (n int) { } func (m *ExecNewPodHook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Command) > 0 { @@ -1749,6 +2649,9 @@ func (m *ExecNewPodHook) Size() (n int) { } func (m *LifecycleHook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.FailurePolicy) @@ -1767,6 +2670,9 @@ func (m *LifecycleHook) Size() (n int) { } func (m *RecreateDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.TimeoutSeconds != nil { @@ -1788,6 +2694,9 @@ func (m *RecreateDeploymentStrategyParams) Size() (n int) { } func (m *RollingDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.UpdatePeriodSeconds != nil { @@ -1819,6 +2728,9 @@ func (m *RollingDeploymentStrategyParams) Size() (n int) { } func (m *TagImageHook) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.ContainerName) @@ -1829,14 +2741,7 @@ func (m *TagImageHook) Size() (n int) { } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1845,9 +2750,14 @@ func (this *CustomDeploymentStrategyParams) String() string { if this == nil { return "nil" } + repeatedStringForEnvironment := "[]EnvVar{" + for _, f := range this.Environment { + repeatedStringForEnvironment += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvironment += "}" s := strings.Join([]string{`&CustomDeploymentStrategyParams{`, `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Environment:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Environment), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `Environment:` + repeatedStringForEnvironment + `,`, `Command:` + fmt.Sprintf("%v", this.Command) + `,`, `}`, }, "") @@ -1859,7 +2769,7 @@ func (this *DeploymentCause) String() string { } s := strings.Join([]string{`&DeploymentCause{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ImageTrigger:` + strings.Replace(fmt.Sprintf("%v", this.ImageTrigger), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, + `ImageTrigger:` + strings.Replace(this.ImageTrigger.String(), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, `}`, }, "") return s @@ -1869,7 +2779,7 @@ func (this *DeploymentCauseImageTrigger) String() string { return "nil" } s := strings.Join([]string{`&DeploymentCauseImageTrigger{`, - `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1881,10 +2791,10 @@ func (this *DeploymentCondition) String() string { s := strings.Join([]string{`&DeploymentCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1894,7 +2804,7 @@ func (this *DeploymentConfig) String() string { return "nil" } s := strings.Join([]string{`&DeploymentConfig{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigSpec", "DeploymentConfigSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentConfigStatus", "DeploymentConfigStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1905,9 +2815,14 @@ func (this *DeploymentConfigList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]DeploymentConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentConfigList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -1939,7 +2854,7 @@ func (this *DeploymentConfigRollbackSpec) String() string { return "nil" } s := strings.Join([]string{`&DeploymentConfigRollbackSpec{`, - `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `IncludeTriggers:` + fmt.Sprintf("%v", this.IncludeTriggers) + `,`, `IncludeTemplate:` + fmt.Sprintf("%v", this.IncludeTemplate) + `,`, @@ -1971,7 +2886,7 @@ func (this *DeploymentConfigSpec) String() string { `Test:` + fmt.Sprintf("%v", this.Test) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1) + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -1981,6 +2896,11 @@ func (this *DeploymentConfigStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentConfigStatus{`, `LatestVersion:` + fmt.Sprintf("%v", this.LatestVersion) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, @@ -1988,8 +2908,8 @@ func (this *DeploymentConfigStatus) String() string { `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "DeploymentDetails", "DeploymentDetails", 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `Details:` + strings.Replace(this.Details.String(), "DeploymentDetails", "DeploymentDetails", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `}`, }, "") @@ -1999,9 +2919,14 @@ func (this *DeploymentDetails) String() string { if this == nil { return "nil" } + repeatedStringForCauses := "[]DeploymentCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" s := strings.Join([]string{`&DeploymentDetails{`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + `,`, + `Causes:` + repeatedStringForCauses + `,`, `}`, }, "") return s @@ -2024,7 +2949,7 @@ func (this *DeploymentLogOptions) String() string { `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v11.Time", 1) + `,`, `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, @@ -2073,10 +2998,10 @@ func (this *DeploymentStrategy) String() string { mapStringForAnnotations += "}" s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `CustomParams:` + strings.Replace(fmt.Sprintf("%v", this.CustomParams), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, - `RecreateParams:` + strings.Replace(fmt.Sprintf("%v", this.RecreateParams), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, - `RollingParams:` + strings.Replace(fmt.Sprintf("%v", this.RollingParams), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "k8s_io_api_core_v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `CustomParams:` + strings.Replace(this.CustomParams.String(), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, + `RecreateParams:` + strings.Replace(this.RecreateParams.String(), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, + `RollingParams:` + strings.Replace(this.RollingParams.String(), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, @@ -2091,7 +3016,7 @@ func (this *DeploymentTriggerImageChangeParams) String() string { s := strings.Join([]string{`&DeploymentTriggerImageChangeParams{`, `Automatic:` + fmt.Sprintf("%v", this.Automatic) + `,`, `ContainerNames:` + fmt.Sprintf("%v", this.ContainerNames) + `,`, - `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, `LastTriggeredImage:` + fmt.Sprintf("%v", this.LastTriggeredImage) + `,`, `}`, }, "") @@ -2103,7 +3028,7 @@ func (this *DeploymentTriggerPolicy) String() string { } s := strings.Join([]string{`&DeploymentTriggerPolicy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ImageChangeParams:` + strings.Replace(fmt.Sprintf("%v", this.ImageChangeParams), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, + `ImageChangeParams:` + strings.Replace(this.ImageChangeParams.String(), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, `}`, }, "") return s @@ -2112,9 +3037,14 @@ func (this *ExecNewPodHook) String() string { if this == nil { return "nil" } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" s := strings.Join([]string{`&ExecNewPodHook{`, `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `}`, @@ -2125,10 +3055,15 @@ func (this *LifecycleHook) String() string { if this == nil { return "nil" } + repeatedStringForTagImages := "[]TagImageHook{" + for _, f := range this.TagImages { + repeatedStringForTagImages += strings.Replace(strings.Replace(f.String(), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + "," + } + repeatedStringForTagImages += "}" s := strings.Join([]string{`&LifecycleHook{`, `FailurePolicy:` + fmt.Sprintf("%v", this.FailurePolicy) + `,`, - `ExecNewPod:` + strings.Replace(fmt.Sprintf("%v", this.ExecNewPod), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, - `TagImages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TagImages), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + `,`, + `ExecNewPod:` + strings.Replace(this.ExecNewPod.String(), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, + `TagImages:` + repeatedStringForTagImages + `,`, `}`, }, "") return s @@ -2139,9 +3074,9 @@ func (this *RecreateDeploymentStrategyParams) String() string { } s := strings.Join([]string{`&RecreateDeploymentStrategyParams{`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `Pre:` + strings.Replace(fmt.Sprintf("%v", this.Pre), "LifecycleHook", "LifecycleHook", 1) + `,`, - `Mid:` + strings.Replace(fmt.Sprintf("%v", this.Mid), "LifecycleHook", "LifecycleHook", 1) + `,`, - `Post:` + strings.Replace(fmt.Sprintf("%v", this.Post), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Mid:` + strings.Replace(this.Mid.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, `}`, }, "") return s @@ -2154,10 +3089,10 @@ func (this *RollingDeploymentStrategyParams) String() string { `UpdatePeriodSeconds:` + valueToStringGenerated(this.UpdatePeriodSeconds) + `,`, `IntervalSeconds:` + valueToStringGenerated(this.IntervalSeconds) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `Pre:` + strings.Replace(fmt.Sprintf("%v", this.Pre), "LifecycleHook", "LifecycleHook", 1) + `,`, - `Post:` + strings.Replace(fmt.Sprintf("%v", this.Post), "LifecycleHook", "LifecycleHook", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, `}`, }, "") return s @@ -2168,7 +3103,7 @@ func (this *TagImageHook) String() string { } s := strings.Join([]string{`&TagImageHook{`, `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `To:` + strings.Replace(strings.Replace(this.To.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2196,7 +3131,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2224,7 +3159,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2234,6 +3169,9 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2253,7 +3191,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2262,10 +3200,13 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Environment = append(m.Environment, k8s_io_api_core_v1.EnvVar{}) + m.Environment = append(m.Environment, v1.EnvVar{}) if err := m.Environment[len(m.Environment)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2284,7 +3225,7 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2294,6 +3235,9 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2308,6 +3252,9 @@ func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2335,7 +3282,7 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2363,7 +3310,7 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2373,6 +3320,9 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2392,7 +3342,7 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2401,6 +3351,9 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2420,6 +3373,9 @@ func (m *DeploymentCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2447,7 +3403,7 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2475,7 +3431,7 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2484,6 +3440,9 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2500,6 +3459,9 @@ func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2527,7 +3489,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2555,7 +3517,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2565,6 +3527,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2584,7 +3549,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2594,6 +3559,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2613,7 +3581,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2622,6 +3590,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2643,7 +3614,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2653,6 +3624,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2672,7 +3646,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2682,6 +3656,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2701,7 +3678,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2710,6 +3687,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2726,6 +3706,9 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2753,7 +3736,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2781,7 +3764,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2790,6 +3773,9 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2811,7 +3797,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2820,6 +3806,9 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2841,7 +3830,7 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2850,6 +3839,9 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2866,6 +3858,9 @@ func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2893,7 +3888,7 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -2921,7 +3916,7 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2930,6 +3925,9 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2951,7 +3949,7 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -2960,6 +3958,9 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2977,6 +3978,9 @@ func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3004,7 +4008,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3032,7 +4036,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3042,6 +4046,9 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3061,7 +4068,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3070,54 +4077,20 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3127,41 +4100,86 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue } + m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -3177,7 +4195,7 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3186,6 +4204,9 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3202,6 +4223,9 @@ func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3229,7 +4253,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3257,7 +4281,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3266,6 +4290,9 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3287,7 +4314,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift + m.Revision |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3306,7 +4333,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3326,7 +4353,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3346,7 +4373,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3366,7 +4393,7 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3381,6 +4408,9 @@ func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3408,7 +4438,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3436,7 +4466,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3445,6 +4475,9 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3466,7 +4499,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3475,6 +4508,9 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3499,7 +4535,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3518,7 +4554,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int32(b) & 0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3538,7 +4574,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3558,7 +4594,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3578,7 +4614,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3587,54 +4623,20 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3644,41 +4646,86 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue } + m.Selector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -3694,7 +4741,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3703,11 +4750,14 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.Template == nil { - m.Template = &k8s_io_api_core_v1.PodTemplateSpec{} + m.Template = &v1.PodTemplateSpec{} } if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3727,7 +4777,7 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift + m.MinReadySeconds |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3741,6 +4791,9 @@ func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3768,7 +4821,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -3796,7 +4849,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.LatestVersion |= (int64(b) & 0x7F) << shift + m.LatestVersion |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3815,7 +4868,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift + m.ObservedGeneration |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -3834,7 +4887,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift + m.Replicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3853,7 +4906,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + m.UpdatedReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3872,7 +4925,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift + m.AvailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3891,7 +4944,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + m.UnavailableReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3910,7 +4963,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3919,6 +4972,9 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3943,7 +4999,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -3952,6 +5008,9 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3974,7 +5033,7 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift + m.ReadyReplicas |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -3988,6 +5047,9 @@ func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4015,7 +5077,7 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4043,7 +5105,7 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4053,6 +5115,9 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4072,7 +5137,7 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4081,6 +5146,9 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4098,6 +5166,9 @@ func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4125,7 +5196,7 @@ func (m *DeploymentLog) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4148,6 +5219,9 @@ func (m *DeploymentLog) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4175,7 +5249,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4203,7 +5277,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4213,6 +5287,9 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4232,7 +5309,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4252,7 +5329,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4272,7 +5349,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4292,7 +5369,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4301,11 +5378,14 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + m.SinceTime = &v11.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4325,7 +5405,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4345,7 +5425,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4365,7 +5445,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4385,7 +5465,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4405,7 +5485,7 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -4420,6 +5500,9 @@ func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4447,7 +5530,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4475,7 +5558,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4485,6 +5568,9 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4504,7 +5590,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4524,7 +5610,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4544,7 +5630,7 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4554,6 +5640,9 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4568,6 +5657,9 @@ func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4595,7 +5687,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4623,7 +5715,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -4633,6 +5725,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4652,7 +5747,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4661,6 +5756,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4685,7 +5783,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4694,6 +5792,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4718,7 +5819,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4727,6 +5828,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4751,7 +5855,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4760,6 +5864,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4781,7 +5888,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4790,54 +5897,20 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Labels == nil { m.Labels = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4847,41 +5920,86 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Labels[mapkey] = mapvalue - } else { - var mapvalue string - m.Labels[mapkey] = mapvalue } + m.Labels[mapkey] = mapvalue iNdEx = postIndex case 7: if wireType != 2 { @@ -4897,7 +6015,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -4906,54 +6024,20 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey if m.Annotations == nil { m.Annotations = make(map[string]string) } - if iNdEx < postIndex { - var valuekey uint64 + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4963,41 +6047,86 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Annotations[mapkey] = mapvalue - } else { - var mapvalue string - m.Annotations[mapkey] = mapvalue } + m.Annotations[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 0 { @@ -5013,7 +6142,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5028,6 +6157,9 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5055,7 +6187,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5083,7 +6215,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5103,7 +6235,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5113,6 +6245,9 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5132,7 +6267,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5141,6 +6276,9 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5162,7 +6300,7 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5172,6 +6310,9 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5186,6 +6327,9 @@ func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5213,7 +6357,7 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5241,7 +6385,7 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5250,6 +6394,9 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5267,6 +6414,9 @@ func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5294,7 +6444,7 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5322,7 +6472,7 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5332,6 +6482,9 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5351,7 +6504,7 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5360,6 +6513,9 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5379,6 +6535,9 @@ func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5406,7 +6565,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5434,7 +6593,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5444,6 +6603,9 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5463,7 +6625,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5472,10 +6634,13 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) + m.Env = append(m.Env, v1.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -5494,7 +6659,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5504,6 +6669,9 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5523,7 +6691,7 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5533,6 +6701,9 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5547,6 +6718,9 @@ func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5574,7 +6748,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5602,7 +6776,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5612,6 +6786,9 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5631,7 +6808,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5640,6 +6817,9 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5664,7 +6844,7 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5673,6 +6853,9 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5690,6 +6873,9 @@ func (m *LifecycleHook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5717,7 +6903,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5745,7 +6931,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5765,7 +6951,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5774,6 +6960,9 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5798,7 +6987,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5807,6 +6996,9 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5831,7 +7023,7 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5840,6 +7032,9 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5859,6 +7054,9 @@ func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5886,7 +7084,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -5914,7 +7112,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5934,7 +7132,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5954,7 +7152,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int64(b) & 0x7F) << shift + v |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -5974,7 +7172,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -5983,11 +7181,14 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxUnavailable = &intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6007,7 +7208,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6016,11 +7217,14 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + m.MaxSurge = &intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6040,7 +7244,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6049,6 +7253,9 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6073,7 +7280,7 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6082,6 +7289,9 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6101,6 +7311,9 @@ func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6128,7 +7341,7 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6156,7 +7369,7 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -6166,6 +7379,9 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6185,7 +7401,7 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -6194,6 +7410,9 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6210,6 +7429,9 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6225,6 +7447,7 @@ func (m *TagImageHook) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -6256,10 +7479,8 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -6276,219 +7497,34 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") ) - -func init() { - proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2517 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcb, 0x6f, 0x1c, 0x49, - 0x19, 0x77, 0x7b, 0x66, 0xec, 0x99, 0xcf, 0xaf, 0xb8, 0x9c, 0xc7, 0xac, 0x17, 0xd9, 0xd6, 0xac, - 0x36, 0x18, 0x58, 0x66, 0x12, 0x27, 0xac, 0xf2, 0xd0, 0x2e, 0x78, 0x1c, 0x67, 0xd7, 0xd1, 0x38, - 0x31, 0x65, 0x27, 0x21, 0x11, 0x82, 0x94, 0x7b, 0xca, 0xe3, 0x5a, 0x77, 0x77, 0x0d, 0xdd, 0x35, - 0x93, 0x0c, 0x42, 0x68, 0x2f, 0x20, 0x21, 0xed, 0x81, 0x23, 0x5c, 0x10, 0x07, 0xae, 0x20, 0x0e, - 0xdc, 0x11, 0x07, 0xa4, 0x1c, 0x40, 0x5a, 0x09, 0x09, 0x56, 0x08, 0x59, 0x1b, 0x73, 0xe3, 0x4f, - 0xc8, 0x09, 0xd5, 0xa3, 0x5f, 0xf3, 0x88, 0x3d, 0x4e, 0x6e, 0xd3, 0xdf, 0xe3, 0xf7, 0x55, 0x7d, - 0xaf, 0xfa, 0xaa, 0x06, 0x2e, 0x35, 0x98, 0xd8, 0x6f, 0xed, 0x96, 0x6d, 0xee, 0x56, 0x78, 0x93, - 0x7a, 0xc1, 0x3e, 0xdb, 0x13, 0x15, 0xd2, 0x64, 0x15, 0xd2, 0x6c, 0x06, 0x95, 0xf6, 0xe5, 0x4a, - 0x83, 0x7a, 0xd4, 0x27, 0x82, 0xd6, 0xcb, 0x4d, 0x9f, 0x0b, 0x8e, 0x96, 0x62, 0x8d, 0x72, 0xa4, - 0x51, 0x26, 0x4d, 0x56, 0x96, 0x1a, 0xe5, 0xf6, 0xe5, 0xf9, 0x6f, 0x26, 0x30, 0x1b, 0xbc, 0xc1, - 0x2b, 0x4a, 0x71, 0xb7, 0xb5, 0xa7, 0xbe, 0xd4, 0x87, 0xfa, 0xa5, 0x01, 0xe7, 0x4b, 0x07, 0xd7, - 0x82, 0x32, 0xe3, 0xca, 0xa8, 0xcd, 0x7d, 0xda, 0xc7, 0xe8, 0xfc, 0xd5, 0x58, 0xc6, 0x25, 0xf6, - 0x3e, 0xf3, 0xa8, 0xdf, 0xa9, 0x34, 0x0f, 0x1a, 0x92, 0x10, 0x54, 0x5c, 0x2a, 0x48, 0x3f, 0xad, - 0xca, 0x20, 0x2d, 0xbf, 0xe5, 0x09, 0xe6, 0xd2, 0x1e, 0x85, 0xf7, 0x8f, 0x53, 0x08, 0xec, 0x7d, - 0xea, 0x92, 0x1e, 0xbd, 0x2b, 0x83, 0xf4, 0x5a, 0x82, 0x39, 0x15, 0xe6, 0x89, 0x40, 0xf8, 0xdd, - 0x4a, 0xa5, 0x3f, 0x5b, 0xb0, 0xb0, 0xd6, 0x0a, 0x04, 0x77, 0x6f, 0xd1, 0xa6, 0xc3, 0x3b, 0x2e, - 0xf5, 0xc4, 0xb6, 0x90, 0x12, 0x8d, 0xce, 0x16, 0xf1, 0x89, 0x1b, 0xa0, 0x77, 0x20, 0xc7, 0x5c, - 0xd2, 0xa0, 0x45, 0x6b, 0xc9, 0x5a, 0x2e, 0x54, 0xa7, 0x9e, 0x1f, 0x2e, 0x8e, 0x1c, 0x1d, 0x2e, - 0xe6, 0x36, 0x24, 0x11, 0x6b, 0x1e, 0xfa, 0x2e, 0x4c, 0x50, 0xaf, 0xcd, 0x7c, 0xee, 0x49, 0x84, - 0xe2, 0xe8, 0x52, 0x66, 0x79, 0x62, 0x65, 0xbe, 0xac, 0x97, 0xa4, 0x02, 0x23, 0xbd, 0x5a, 0x6e, - 0x5f, 0x2e, 0xaf, 0x7b, 0xed, 0x07, 0xc4, 0xaf, 0xce, 0x19, 0x98, 0x89, 0xf5, 0x58, 0x0d, 0x27, - 0x31, 0xd0, 0xbb, 0x30, 0x6e, 0x73, 0xd7, 0x25, 0x5e, 0xbd, 0x98, 0x59, 0xca, 0x2c, 0x17, 0xaa, - 0x13, 0x47, 0x87, 0x8b, 0xe3, 0x6b, 0x9a, 0x84, 0x43, 0x5e, 0xe9, 0x2f, 0x16, 0xcc, 0xc4, 0x6b, - 0x5f, 0x23, 0xad, 0x80, 0xa2, 0xeb, 0x90, 0x15, 0x9d, 0x66, 0xb8, 0xe2, 0x77, 0x8d, 0xa9, 0xec, - 0x4e, 0xa7, 0x49, 0x5f, 0x1e, 0x2e, 0x9e, 0x8b, 0xc5, 0x77, 0x7c, 0xd6, 0x68, 0x50, 0x5f, 0x32, - 0xb0, 0x52, 0x41, 0x01, 0x4c, 0xaa, 0x1d, 0x19, 0x4e, 0x71, 0x74, 0xc9, 0x5a, 0x9e, 0x58, 0xf9, - 0xa0, 0x7c, 0x5c, 0xc2, 0x95, 0xbb, 0xd6, 0xb0, 0x91, 0x00, 0xa9, 0x9e, 0x39, 0x3a, 0x5c, 0x9c, - 0x4c, 0x52, 0x70, 0xca, 0x48, 0xa9, 0x0e, 0x6f, 0xbf, 0x42, 0x1d, 0xad, 0x43, 0x76, 0xcf, 0xe7, - 0xae, 0xda, 0xce, 0xc4, 0xca, 0x3b, 0xfd, 0xbc, 0x7a, 0x6f, 0xf7, 0x13, 0x6a, 0x0b, 0x4c, 0xf7, - 0xa8, 0x4f, 0x3d, 0x9b, 0x56, 0x27, 0xc3, 0x3d, 0xdf, 0xf6, 0xb9, 0x8b, 0x95, 0x7a, 0xe9, 0x5f, - 0x19, 0x98, 0x4b, 0x98, 0xe1, 0x5e, 0x9d, 0x09, 0xc6, 0x3d, 0x74, 0x33, 0xe5, 0xad, 0xaf, 0x76, - 0x79, 0xeb, 0x42, 0x1f, 0x95, 0x84, 0xbf, 0x6a, 0x30, 0x16, 0x08, 0x22, 0x5a, 0x81, 0xf2, 0x54, - 0xa1, 0x7a, 0xd5, 0xa8, 0x8f, 0x6d, 0x2b, 0xea, 0xcb, 0xc3, 0xc5, 0x3e, 0xa5, 0x55, 0x8e, 0x90, - 0xb4, 0x14, 0x36, 0x18, 0xa8, 0x0d, 0xc8, 0x21, 0x81, 0xd8, 0xf1, 0x89, 0x17, 0x68, 0x4b, 0xcc, - 0xa5, 0xc5, 0x8c, 0xda, 0xf7, 0xd7, 0x13, 0xfb, 0x8e, 0x12, 0xbc, 0xdc, 0x3c, 0x68, 0x48, 0x42, - 0x50, 0x96, 0xf5, 0x27, 0x3d, 0x21, 0x35, 0xaa, 0xf3, 0x66, 0x15, 0xa8, 0xd6, 0x83, 0x86, 0xfb, - 0x58, 0x40, 0x17, 0x61, 0xcc, 0xa7, 0x24, 0xe0, 0x5e, 0x31, 0xab, 0x76, 0x31, 0x1d, 0xee, 0x02, - 0x2b, 0x2a, 0x36, 0x5c, 0xf4, 0x35, 0x18, 0x77, 0x69, 0x10, 0xc8, 0x6a, 0xc8, 0x29, 0xc1, 0x19, - 0x23, 0x38, 0xbe, 0xa9, 0xc9, 0x38, 0xe4, 0xa3, 0x4f, 0x60, 0x5a, 0x1a, 0xba, 0xdf, 0xac, 0x13, - 0x41, 0xd5, 0x36, 0xc6, 0x86, 0xde, 0xc6, 0x79, 0x83, 0x3e, 0x5d, 0x4b, 0x21, 0xe1, 0x2e, 0xe4, - 0xd2, 0x1f, 0x47, 0xe1, 0x4c, 0x2a, 0x4c, 0x7b, 0xac, 0x81, 0x9e, 0x40, 0x5e, 0x82, 0xd5, 0x89, - 0x20, 0x26, 0x73, 0x2e, 0x9d, 0xcc, 0xb4, 0xce, 0xa5, 0x4d, 0x2a, 0x48, 0x15, 0x99, 0x05, 0x40, - 0x4c, 0xc3, 0x11, 0x2a, 0xfa, 0x1e, 0x64, 0x83, 0x26, 0xb5, 0x4d, 0x8d, 0xbc, 0x3f, 0x54, 0x8d, - 0xa8, 0x35, 0x6e, 0x37, 0xa9, 0x1d, 0xa7, 0xaa, 0xfc, 0xc2, 0x0a, 0x11, 0x3d, 0x89, 0xb2, 0x4a, - 0xc7, 0xfe, 0xda, 0x29, 0xb0, 0x95, 0x7e, 0x1c, 0xc9, 0x74, 0xa6, 0x95, 0xfe, 0x6e, 0xc1, 0xd9, - 0x6e, 0x95, 0x1a, 0x0b, 0x04, 0xfa, 0x7e, 0x8f, 0xdb, 0xca, 0x27, 0x73, 0x9b, 0xd4, 0x56, 0x4e, - 0x3b, 0x63, 0x4c, 0xe6, 0x43, 0x4a, 0xc2, 0x65, 0x0f, 0x21, 0xc7, 0x04, 0x75, 0x03, 0xd3, 0x21, - 0x57, 0x86, 0xdf, 0x57, 0xa2, 0x01, 0x4b, 0x20, 0xac, 0xf1, 0x4a, 0x3f, 0xcf, 0x40, 0xb1, 0x5b, - 0x14, 0x73, 0xc7, 0xd9, 0x25, 0xf6, 0x01, 0x5a, 0x82, 0xac, 0x47, 0xdc, 0xb0, 0xc2, 0x23, 0x87, - 0xdf, 0x25, 0x2e, 0xc5, 0x8a, 0x83, 0x7e, 0x63, 0x01, 0x6a, 0xa9, 0x84, 0xaa, 0xaf, 0x7a, 0x1e, - 0x17, 0x44, 0x96, 0x46, 0xb8, 0x4a, 0x3c, 0xfc, 0x2a, 0x43, 0xd3, 0xe5, 0xfb, 0x3d, 0xa0, 0xeb, - 0x9e, 0xf0, 0x3b, 0x71, 0x85, 0xf6, 0x0a, 0xe0, 0x3e, 0x2b, 0x41, 0x4f, 0x4c, 0xae, 0xe9, 0x7c, - 0xf8, 0xf0, 0xf4, 0x2b, 0x1a, 0x94, 0x73, 0xf3, 0xeb, 0x70, 0x61, 0xc0, 0x62, 0xd1, 0x19, 0xc8, - 0x1c, 0xd0, 0x8e, 0x76, 0x1f, 0x96, 0x3f, 0xd1, 0x59, 0xc8, 0xb5, 0x89, 0xd3, 0xa2, 0xba, 0xeb, - 0x61, 0xfd, 0x71, 0x63, 0xf4, 0x9a, 0x55, 0xfa, 0x53, 0x06, 0xbe, 0xf2, 0x2a, 0xdb, 0x6f, 0xa8, - 0x9b, 0xa3, 0xf7, 0x20, 0xef, 0xd3, 0x36, 0x0b, 0x18, 0xf7, 0xd4, 0x22, 0x32, 0x71, 0xde, 0x61, - 0x43, 0xc7, 0x91, 0x04, 0x5a, 0x85, 0x19, 0xe6, 0xd9, 0x4e, 0xab, 0x1e, 0x1e, 0x2a, 0xba, 0xb2, - 0xf2, 0xd5, 0x0b, 0x46, 0x69, 0x66, 0x23, 0xcd, 0xc6, 0xdd, 0xf2, 0x49, 0x08, 0xea, 0x36, 0x1d, - 0x22, 0xa8, 0x6a, 0x96, 0x7d, 0x20, 0x0c, 0x1b, 0x77, 0xcb, 0xa3, 0x07, 0x70, 0xde, 0x90, 0x30, - 0x6d, 0x3a, 0xcc, 0x56, 0x3e, 0x96, 0x15, 0xa2, 0xba, 0x69, 0xbe, 0xba, 0x60, 0x90, 0xce, 0x6f, - 0xf4, 0x95, 0xc2, 0x03, 0xb4, 0x13, 0x4b, 0x0b, 0x67, 0x17, 0xd5, 0x6c, 0x7b, 0x97, 0x16, 0xb2, - 0x71, 0xb7, 0x7c, 0xe9, 0x7f, 0xb9, 0xde, 0x7e, 0xa0, 0xc2, 0xb5, 0x0b, 0xf9, 0x20, 0x04, 0xd5, - 0x21, 0xbb, 0x3a, 0x4c, 0xf2, 0x85, 0x06, 0xe2, 0xe8, 0x44, 0x6b, 0x88, 0x70, 0x11, 0x85, 0xbc, - 0x08, 0xc3, 0xa2, 0x9b, 0xe9, 0xcd, 0x61, 0x6c, 0x98, 0x10, 0x6d, 0x71, 0x87, 0xd9, 0x8c, 0x06, - 0xd5, 0x49, 0x69, 0x26, 0x0a, 0x64, 0x04, 0xad, 0x53, 0x46, 0x79, 0x4e, 0x47, 0x3f, 0x97, 0x4c, - 0x19, 0x4d, 0xc7, 0x91, 0x04, 0xaa, 0xc1, 0xd9, 0x30, 0x7d, 0x3e, 0x66, 0x81, 0xe0, 0x7e, 0xa7, - 0xc6, 0x5c, 0x26, 0x54, 0xd0, 0x73, 0xd5, 0xe2, 0xd1, 0xe1, 0xe2, 0x59, 0xdc, 0x87, 0x8f, 0xfb, - 0x6a, 0xc9, 0x16, 0x24, 0x68, 0x20, 0x4c, 0xa0, 0xa3, 0x84, 0xde, 0xa1, 0x81, 0xc0, 0x8a, 0x23, - 0xcf, 0xe0, 0xa6, 0x1c, 0x7d, 0xea, 0x26, 0x76, 0x51, 0xe7, 0xde, 0x52, 0x54, 0x6c, 0xb8, 0xc8, - 0x87, 0x7c, 0x40, 0x1d, 0x6a, 0x0b, 0xee, 0x17, 0xc7, 0x55, 0x7f, 0xba, 0x75, 0xba, 0x93, 0xa7, - 0xbc, 0x6d, 0x60, 0x74, 0x47, 0x8a, 0x03, 0x64, 0xc8, 0x38, 0xb2, 0x83, 0x36, 0x21, 0x2f, 0xc2, - 0xa4, 0xcf, 0x0f, 0xae, 0xdb, 0x2d, 0x5e, 0x0f, 0x73, 0x5d, 0xb7, 0x19, 0x15, 0x88, 0xb0, 0x1c, - 0x22, 0x08, 0x99, 0xaf, 0x2e, 0xf3, 0x30, 0x25, 0xf5, 0xce, 0x36, 0xb5, 0xb9, 0x57, 0x0f, 0x8a, - 0x05, 0xe5, 0xd5, 0x28, 0x5f, 0x37, 0xd3, 0x6c, 0xdc, 0x2d, 0x3f, 0x7f, 0x13, 0xa6, 0x52, 0xcb, - 0x1f, 0xaa, 0x47, 0xfd, 0x21, 0x07, 0xe7, 0xfb, 0x9f, 0x97, 0xe8, 0x26, 0x4c, 0xc9, 0x25, 0x06, - 0xe2, 0x01, 0xf5, 0x55, 0x6f, 0xb1, 0x54, 0x6f, 0x39, 0x67, 0x16, 0x36, 0x55, 0x4b, 0x32, 0x71, - 0x5a, 0x16, 0xdd, 0x01, 0xc4, 0x77, 0x03, 0xea, 0xb7, 0x69, 0xfd, 0x23, 0x7d, 0xd1, 0x88, 0xbb, - 0x53, 0xd4, 0xf0, 0xef, 0xf5, 0x48, 0xe0, 0x3e, 0x5a, 0x43, 0x26, 0xeb, 0x2a, 0xcc, 0x98, 0x43, - 0x23, 0x64, 0x9a, 0x3c, 0x8d, 0x3c, 0x7a, 0x3f, 0xcd, 0xc6, 0xdd, 0xf2, 0xe8, 0x23, 0x98, 0x25, - 0x6d, 0xc2, 0x1c, 0xb2, 0xeb, 0xd0, 0x08, 0x24, 0xa7, 0x40, 0xde, 0x32, 0x20, 0xb3, 0xab, 0xdd, - 0x02, 0xb8, 0x57, 0x07, 0x6d, 0xc2, 0x5c, 0xcb, 0xeb, 0x85, 0x1a, 0x53, 0x50, 0x6f, 0x1b, 0xa8, - 0xb9, 0xfb, 0xbd, 0x22, 0xb8, 0x9f, 0x1e, 0x7a, 0x0c, 0xe3, 0x75, 0x2a, 0x08, 0x73, 0x82, 0xe2, - 0xb8, 0x4a, 0xbd, 0x2b, 0xc3, 0xa4, 0xfb, 0x2d, 0xad, 0xaa, 0x2f, 0x4f, 0xe6, 0x03, 0x87, 0x80, - 0x88, 0x01, 0xd8, 0xe1, 0x28, 0x1e, 0x14, 0xf3, 0xaa, 0x9a, 0xbe, 0x35, 0x64, 0x35, 0x69, 0xed, - 0x78, 0x54, 0x8c, 0x48, 0x01, 0x4e, 0x80, 0xcb, 0xc4, 0xf2, 0x65, 0x02, 0x47, 0xfe, 0xd0, 0x19, - 0x1f, 0x25, 0x16, 0x4e, 0x32, 0x71, 0x5a, 0xb6, 0xf4, 0x6b, 0x0b, 0x66, 0x7b, 0xf6, 0x94, 0x9c, - 0xc6, 0xad, 0x63, 0xa6, 0xf1, 0x47, 0x30, 0x66, 0xcb, 0xf6, 0x11, 0x8e, 0x34, 0x97, 0x87, 0xbe, - 0xd0, 0xc5, 0xfd, 0x48, 0x7d, 0x06, 0xd8, 0x00, 0x96, 0x66, 0x60, 0x2a, 0x16, 0xad, 0xf1, 0x46, - 0xe9, 0xb3, 0x6c, 0xf2, 0x28, 0xa9, 0xf1, 0xc6, 0xbd, 0xa6, 0x76, 0x41, 0x05, 0x0a, 0x36, 0xf7, - 0x04, 0x91, 0x03, 0xa4, 0x59, 0xf1, 0xac, 0x01, 0x2d, 0xac, 0x85, 0x0c, 0x1c, 0xcb, 0xc8, 0x96, - 0xb8, 0xc7, 0x1d, 0x87, 0x3f, 0x55, 0x35, 0x94, 0x68, 0x89, 0xb7, 0x15, 0x15, 0x1b, 0xae, 0xac, - 0x95, 0xa6, 0xec, 0xba, 0xbc, 0x15, 0x1e, 0xeb, 0x51, 0xad, 0x6c, 0x19, 0x3a, 0x8e, 0x24, 0xd0, - 0x55, 0x98, 0x0c, 0x98, 0x67, 0xd3, 0xb0, 0xf5, 0x64, 0xf5, 0xf4, 0x20, 0xef, 0xa8, 0xdb, 0x09, - 0x3a, 0x4e, 0x49, 0xa1, 0x87, 0x50, 0x50, 0xdf, 0xea, 0x2a, 0x93, 0x1b, 0xfa, 0x2a, 0x33, 0x25, - 0x37, 0xb9, 0x1d, 0x02, 0xe0, 0x18, 0x0b, 0xad, 0x00, 0x08, 0xe6, 0xd2, 0x40, 0x10, 0xb7, 0x19, - 0x98, 0xde, 0x1f, 0x25, 0xd3, 0x4e, 0xc4, 0xc1, 0x09, 0x29, 0xf4, 0x0d, 0x28, 0xc8, 0x14, 0xa8, - 0x31, 0x8f, 0xea, 0xaa, 0xc8, 0x68, 0x03, 0x3b, 0x21, 0x11, 0xc7, 0x7c, 0x54, 0x06, 0x70, 0xe4, - 0x19, 0x54, 0xed, 0x08, 0x1a, 0xa8, 0xf6, 0x9d, 0xa9, 0x4e, 0x4b, 0xf0, 0x5a, 0x44, 0xc5, 0x09, - 0x09, 0xe9, 0x75, 0x8f, 0x3f, 0x25, 0x4c, 0xa8, 0x14, 0x4d, 0x78, 0xfd, 0x2e, 0x7f, 0x48, 0x98, - 0xc0, 0x86, 0x8b, 0xde, 0x85, 0xf1, 0xb6, 0x69, 0x92, 0xa0, 0x40, 0x55, 0x8d, 0x85, 0xad, 0x31, - 0xe4, 0x95, 0xfe, 0x9d, 0xca, 0x5d, 0x4c, 0x7f, 0xd4, 0x92, 0xa7, 0xdd, 0xf1, 0x23, 0xf9, 0x45, - 0x18, 0xd3, 0xdd, 0xb5, 0x3b, 0xf8, 0xba, 0x05, 0x63, 0xc3, 0x45, 0xef, 0x40, 0x6e, 0x8f, 0xfb, - 0x36, 0x35, 0x91, 0x8f, 0xae, 0x07, 0xb7, 0x25, 0x11, 0x6b, 0x1e, 0x7a, 0x00, 0x33, 0xf4, 0x59, - 0x7a, 0xfe, 0xcb, 0xaa, 0x47, 0x95, 0xf7, 0x64, 0x6f, 0x5c, 0x4f, 0xb3, 0x06, 0xbf, 0x91, 0x74, - 0x83, 0x94, 0xfe, 0x31, 0x0e, 0xa8, 0x77, 0xd8, 0x41, 0x37, 0x52, 0x4f, 0x0a, 0x17, 0xbb, 0x9e, - 0x14, 0xce, 0xf7, 0x6a, 0x24, 0x5e, 0x14, 0xda, 0x30, 0x69, 0xab, 0x17, 0x29, 0xfd, 0xfe, 0x64, - 0x06, 0xa2, 0xef, 0x1c, 0x5f, 0xb0, 0xaf, 0x7e, 0xc7, 0xd2, 0x09, 0xbe, 0x96, 0x40, 0xc6, 0x29, - 0x3b, 0xe8, 0xa7, 0x30, 0xed, 0x53, 0xdb, 0xa7, 0x44, 0x50, 0x63, 0x59, 0xdf, 0x35, 0xaa, 0xc7, - 0x5b, 0xc6, 0x46, 0x6f, 0xa0, 0x6d, 0x24, 0x2f, 0xf1, 0x38, 0x85, 0x8e, 0xbb, 0xac, 0xa1, 0x1f, - 0xc3, 0x94, 0xcf, 0x1d, 0x87, 0x79, 0x0d, 0x63, 0x3e, 0xab, 0xcc, 0xaf, 0x9e, 0xc0, 0xbc, 0x56, - 0x1b, 0x68, 0x7d, 0x56, 0xf5, 0xd7, 0x24, 0x36, 0x4e, 0x9b, 0x42, 0x8f, 0xa0, 0xe0, 0xd3, 0x80, - 0xb7, 0x7c, 0x9b, 0x06, 0xa6, 0xb8, 0x97, 0xfb, 0x0d, 0x38, 0xd8, 0x08, 0xc9, 0x2c, 0x66, 0x3e, - 0x95, 0xb6, 0x82, 0xb8, 0x87, 0x85, 0xdc, 0x00, 0xc7, 0x68, 0x68, 0x5f, 0xa6, 0xf1, 0x2e, 0x75, - 0x64, 0x69, 0x67, 0x4e, 0x16, 0xc8, 0xde, 0x8d, 0x94, 0x6b, 0x0a, 0x42, 0x0f, 0x6a, 0x89, 0x42, - 0x90, 0x44, 0x6c, 0xf0, 0xd1, 0x4f, 0x60, 0x82, 0x24, 0xee, 0xae, 0x7a, 0x36, 0x5c, 0x3f, 0x95, - 0xb9, 0x9e, 0xeb, 0x6a, 0xf4, 0x5c, 0x99, 0xbc, 0xa7, 0x26, 0xcd, 0xa1, 0x7b, 0x70, 0x8e, 0xd8, - 0x82, 0xb5, 0xe9, 0x2d, 0x4a, 0xea, 0x0e, 0xf3, 0xa2, 0xf6, 0xaa, 0x1b, 0xce, 0x5b, 0x47, 0x87, - 0x8b, 0xe7, 0x56, 0xfb, 0x09, 0xe0, 0xfe, 0x7a, 0xf3, 0xd7, 0x61, 0x22, 0xb1, 0xeb, 0x61, 0xe6, - 0xbb, 0xf9, 0x0f, 0xe1, 0xcc, 0x6b, 0xdd, 0x61, 0x7f, 0x37, 0x0a, 0xa5, 0x9e, 0x06, 0xa0, 0x9e, - 0x24, 0xd7, 0xf6, 0x89, 0xd7, 0x08, 0x33, 0xb6, 0x02, 0x05, 0xd2, 0x12, 0xdc, 0x25, 0x82, 0xd9, - 0x0a, 0x38, 0x1f, 0xe7, 0xc2, 0x6a, 0xc8, 0xc0, 0xb1, 0x0c, 0xba, 0x01, 0xd3, 0xd1, 0xe1, 0x26, - 0x3b, 0x9d, 0x3e, 0x8d, 0x0b, 0xba, 0x3c, 0xd6, 0x52, 0x1c, 0xdc, 0x25, 0x19, 0x5d, 0x9b, 0x33, - 0xaf, 0x77, 0x6d, 0xbe, 0x13, 0xbe, 0x30, 0xaa, 0x3d, 0xd1, 0xba, 0xda, 0x95, 0x79, 0xf5, 0xeb, - 0x7a, 0x35, 0x4c, 0x4a, 0xe0, 0x3e, 0x5a, 0xa5, 0x9f, 0x59, 0xf0, 0xd6, 0xc0, 0x5b, 0x18, 0xfa, - 0x41, 0xf8, 0xd4, 0x63, 0xa9, 0x44, 0xbc, 0x7e, 0xda, 0x1b, 0x5d, 0xa7, 0xff, 0x8b, 0xcf, 0x8d, - 0xfc, 0xaf, 0x7e, 0xbb, 0x38, 0xf2, 0xe9, 0x7f, 0x96, 0x46, 0x4a, 0x5f, 0x5a, 0x70, 0x61, 0x80, - 0xee, 0xeb, 0x3c, 0x85, 0xff, 0xc2, 0x82, 0x59, 0xd6, 0x1d, 0x74, 0xd3, 0x8e, 0x6f, 0x9d, 0x62, - 0x37, 0x3d, 0x09, 0x54, 0x3d, 0x27, 0x67, 0xea, 0x1e, 0x32, 0xee, 0xb5, 0x5a, 0xfa, 0xa7, 0x05, - 0xd3, 0xeb, 0xcf, 0xa8, 0x7d, 0x97, 0x3e, 0xdd, 0xe2, 0xf5, 0x8f, 0x39, 0x3f, 0x48, 0xfe, 0x3f, - 0x60, 0x0d, 0xfe, 0x7f, 0x00, 0x5d, 0x87, 0x0c, 0xf5, 0xda, 0x27, 0xf8, 0x47, 0x62, 0xc2, 0xf8, - 0x26, 0xb3, 0xee, 0xb5, 0xb1, 0xd4, 0x91, 0x23, 0x6b, 0x2a, 0x09, 0x55, 0xee, 0x15, 0xe2, 0x91, - 0x35, 0x95, 0xb1, 0x38, 0x2d, 0xab, 0xa6, 0x03, 0xee, 0xb4, 0x64, 0x92, 0x67, 0xe3, 0xe5, 0x3d, - 0xd0, 0x24, 0x1c, 0xf2, 0x4a, 0xbf, 0x1f, 0x85, 0xa9, 0x1a, 0xdb, 0xa3, 0x76, 0xc7, 0x76, 0xa8, - 0xda, 0xd7, 0x23, 0x98, 0xda, 0x23, 0xcc, 0x69, 0xf9, 0x54, 0x87, 0xd0, 0x84, 0xee, 0x4a, 0x68, - 0xf5, 0x76, 0x92, 0xf9, 0xf2, 0x70, 0x71, 0x3e, 0xa5, 0x9e, 0xe2, 0xe2, 0x34, 0x12, 0x7a, 0x02, - 0x40, 0x23, 0x27, 0x9a, 0x48, 0x5e, 0x3a, 0x3e, 0x92, 0x69, 0xc7, 0xeb, 0xd9, 0x29, 0xa6, 0xe1, - 0x04, 0x26, 0xfa, 0xa1, 0x1c, 0xcc, 0x1a, 0x2a, 0xa4, 0x81, 0xfa, 0xdb, 0x66, 0x62, 0xa5, 0x7c, - 0xbc, 0x81, 0x1d, 0xa3, 0xa2, 0xe0, 0xa3, 0x16, 0x12, 0x52, 0xd5, 0x30, 0x67, 0x7e, 0x96, 0xfe, - 0x3a, 0x0a, 0x4b, 0xc7, 0x1d, 0xb7, 0xb2, 0xcf, 0xc8, 0x61, 0x91, 0xb7, 0x44, 0xd8, 0x84, 0xf5, - 0x2d, 0x56, 0xf5, 0x99, 0x9d, 0x14, 0x07, 0x77, 0x49, 0xa2, 0x3b, 0x90, 0x69, 0xfa, 0xd4, 0x38, - 0xa7, 0x72, 0xfc, 0xda, 0x53, 0xde, 0xaf, 0x8e, 0xcb, 0x04, 0xda, 0xf2, 0x29, 0x96, 0x20, 0x12, - 0xcb, 0x65, 0x75, 0xd3, 0xb2, 0x4e, 0x87, 0xb5, 0xc9, 0xea, 0x58, 0x82, 0xa0, 0x4d, 0xc8, 0x36, - 0x79, 0x20, 0xcc, 0x54, 0x30, 0x34, 0x58, 0x5e, 0x56, 0xfd, 0x16, 0x0f, 0x04, 0x56, 0x30, 0xa5, - 0xbf, 0x65, 0x61, 0xf1, 0x98, 0xb9, 0x01, 0x6d, 0xc0, 0x9c, 0xbe, 0x24, 0x6f, 0x51, 0x9f, 0xf1, - 0x7a, 0xda, 0x97, 0x17, 0xd4, 0x25, 0xb6, 0x97, 0x8d, 0xfb, 0xe9, 0xa0, 0x0f, 0x60, 0x86, 0x79, - 0x82, 0xfa, 0x6d, 0xe2, 0x84, 0x30, 0xfa, 0x59, 0x60, 0x4e, 0xbf, 0xce, 0xa5, 0x58, 0xb8, 0x5b, - 0xb6, 0x4f, 0x40, 0x33, 0x27, 0x0e, 0xa8, 0x03, 0xd3, 0x2e, 0x79, 0x96, 0xb8, 0x6e, 0x1b, 0x17, - 0x0e, 0xfe, 0x37, 0xa4, 0x25, 0x98, 0x53, 0xd6, 0x7f, 0x98, 0x96, 0x37, 0x3c, 0x71, 0xcf, 0xdf, - 0x16, 0x3e, 0xf3, 0x1a, 0xda, 0xda, 0x66, 0x0a, 0x0b, 0x77, 0x61, 0xa3, 0xc7, 0x90, 0x77, 0xc9, - 0xb3, 0xed, 0x96, 0xdf, 0x08, 0x6f, 0x49, 0xc3, 0xdb, 0x51, 0xcf, 0x46, 0x9b, 0x06, 0x05, 0x47, - 0x78, 0x61, 0x6a, 0x8e, 0xbf, 0x89, 0xd4, 0x0c, 0xd3, 0x29, 0xff, 0x66, 0xd2, 0xe9, 0x33, 0x0b, - 0x26, 0x93, 0x55, 0xdc, 0xdb, 0x3b, 0xad, 0x21, 0x7a, 0xe7, 0xb7, 0x61, 0x54, 0x70, 0x53, 0x82, - 0x27, 0x3a, 0xe9, 0xc1, 0xc0, 0x8e, 0xee, 0x70, 0x3c, 0x2a, 0x78, 0x75, 0xf9, 0xf9, 0x8b, 0x85, - 0x91, 0xcf, 0x5f, 0x2c, 0x8c, 0x7c, 0xf1, 0x62, 0x61, 0xe4, 0xd3, 0xa3, 0x05, 0xeb, 0xf9, 0xd1, - 0x82, 0xf5, 0xf9, 0xd1, 0x82, 0xf5, 0xc5, 0xd1, 0x82, 0xf5, 0xe5, 0xd1, 0x82, 0xf5, 0xcb, 0xff, - 0x2e, 0x8c, 0x3c, 0x1e, 0x6d, 0x5f, 0xfe, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb6, 0x32, - 0x5f, 0x7c, 0x20, 0x00, 0x00, -} diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto index 1648f9461..d15f20c0d 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.proto +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -7,7 +7,6 @@ package github.com.openshift.api.apps.v1; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; @@ -16,7 +15,7 @@ option go_package = "v1"; // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. message CustomDeploymentStrategyParams { - // Image specifies a Docker image which can carry out a deployment. + // Image specifies a container image which can carry out a deployment. optional string image = 1; // Environment holds the environment which will be given to the container for Image. @@ -68,25 +67,24 @@ message DeploymentCondition { // A single deployment configuration is usually analogous to a single micro-service. Can support many different // deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as // well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. -// +// // A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. // Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment // is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment // is triggered by any means. message DeploymentConfig { - // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec represents a desired deployment state and how to deploy to it. optional DeploymentConfigSpec spec = 2; // Status represents the current deployment state. + // +optional optional DeploymentConfigStatus status = 3; } // DeploymentConfigList is a collection of deployment configs. message DeploymentConfigList { - // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is a list of deployment configs @@ -362,7 +360,7 @@ message ExecNewPodHook { repeated k8s.io.api.core.v1.EnvVar env = 2; // ContainerName is the name of a container in the deployment pod template - // whose Docker image will be used for the hook pod's container. + // whose container image will be used for the hook pod's container. optional string containerName = 3; // Volumes is a list of named volumes from the pod template which should be @@ -422,9 +420,9 @@ message RollingDeploymentStrategyParams { // during the update. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of update (ex: 10%). Absolute // number is calculated from percentage by rounding down. - // + // // This cannot be 0 if MaxSurge is 0. By default, 25% is used. - // + // // Example: when this is set to 30%, the old RC can be scaled down by 30% // immediately when the rolling update starts. Once new pods are ready, old // RC can be scaled down further, followed by scaling up the new RC, @@ -436,9 +434,9 @@ message RollingDeploymentStrategyParams { // original number of pods. Value can be an absolute number (ex: 5) or a // percentage of total pods at the start of the update (ex: 10%). Absolute // number is calculated from percentage by rounding up. - // + // // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. - // + // // Example: when this is set to 30%, the new RC can be scaled up by 30% // immediately when the rolling update starts. Once old pods have been // killed, new RC can be scaled up further, ensuring that total number of diff --git a/vendor/github.com/openshift/api/apps/v1/legacy.go b/vendor/github.com/openshift/api/apps/v1/legacy.go new file mode 100644 index 000000000..c8fa0ed99 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/register.go b/vendor/github.com/openshift/api/apps/v1/register.go index 88e40b4d4..0c1e47e6d 100644 --- a/vendor/github.com/openshift/api/apps/v1/register.go +++ b/vendor/github.com/openshift/api/apps/v1/register.go @@ -1,50 +1,37 @@ package v1 import ( + corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -const ( - LegacyGroupName = "" - GroupName = "apps.openshift.io" -) - var ( - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + GroupName = "apps.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme - LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) - AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme - - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme ) +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to api.Scheme. -func addLegacyKnownTypes(scheme *runtime.Scheme) error { - types := []runtime.Object{ - &DeploymentConfig{}, - &DeploymentConfigList{}, - &DeploymentConfigRollback{}, - &DeploymentRequest{}, - &DeploymentLog{}, - &DeploymentLogOptions{}, - &extensionsv1beta1.Scale{}, - } - scheme.AddKnownTypes(LegacySchemeGroupVersion, types...) - return nil + return schema.GroupResource{Group: GroupName, Resource: resource} } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, + scheme.AddKnownTypes(GroupVersion, &DeploymentConfig{}, &DeploymentConfigList{}, &DeploymentConfigRollback{}, @@ -53,6 +40,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentLogOptions{}, &extensionsv1beta1.Scale{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, GroupVersion) return nil } diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go index 4f70b3d40..ed147807d 100644 --- a/vendor/github.com/openshift/api/apps/v1/types.go +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -25,14 +25,14 @@ import ( // is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment // is triggered by any means. type DeploymentConfig struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. + metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec represents a desired deployment state and how to deploy to it. Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` // Status represents the current deployment state. + // +optional Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -120,7 +120,7 @@ const ( // CustomDeploymentStrategyParams are the input to the Custom deployment strategy. type CustomDeploymentStrategyParams struct { - // Image specifies a Docker image which can carry out a deployment. + // Image specifies a container image which can carry out a deployment. Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` // Environment holds the environment which will be given to the container for Image. Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` @@ -225,7 +225,7 @@ type ExecNewPodHook struct { // Env is a set of environment variables to supply to the hook pod's container. Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` // ContainerName is the name of a container in the deployment pod template - // whose Docker image will be used for the hook pod's container. + // whose container image will be used for the hook pod's container. ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` // Volumes is a list of named volumes from the pod template which should be // copied to the hook pod. Volumes names not found in pod spec are ignored. @@ -264,7 +264,7 @@ type DeploymentTriggerType string const ( // DeploymentTriggerOnImageChange will create new deployments in response to updated tags from - // a Docker image repository. + // a container image repository. DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange" // DeploymentTriggerOnConfigChange will create new deployments in response to changes to // the ControllerTemplate of a DeploymentConfig. @@ -389,7 +389,6 @@ type DeploymentCondition struct { // DeploymentConfigList is a collection of deployment configs. type DeploymentConfigList struct { metav1.TypeMeta `json:",inline"` - // Standard object's metadata. metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Items is a list of deployment configs diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go index 383c2f516..f6ab2fd48 100644 --- a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go @@ -1,15 +1,13 @@ // +build !ignore_autogenerated -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( - core_v1 "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - unsafe "unsafe" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -17,7 +15,7 @@ func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStra *out = *in if in.Environment != nil { in, out := &in.Environment, &out.Environment - *out = make([]core_v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -45,12 +43,8 @@ func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) { *out = *in if in.ImageTrigger != nil { in, out := &in.ImageTrigger, &out.ImageTrigger - if *in == nil { - *out = nil - } else { - *out = new(DeploymentCauseImageTrigger) - **out = **in - } + *out = new(DeploymentCauseImageTrigger) + **out = **in } return } @@ -100,26 +94,6 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentConditionType) DeepCopyInto(out *DeploymentConditionType) { - { - in := (*string)(unsafe.Pointer(in)) - out := (*string)(unsafe.Pointer(out)) - *out = *in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConditionType. -func (in *DeploymentConditionType) DeepCopy() *DeploymentConditionType { - if in == nil { - return nil - } - out := new(DeploymentConditionType) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) { *out = *in @@ -144,16 +118,15 @@ func (in *DeploymentConfig) DeepCopy() *DeploymentConfig { func (in *DeploymentConfig) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DeploymentConfig, len(*in)) @@ -178,9 +151,8 @@ func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList { func (in *DeploymentConfigList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -212,9 +184,8 @@ func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback { func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -247,12 +218,8 @@ func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { } if in.RevisionHistoryLimit != nil { in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } + *out = new(int32) + **out = **in } if in.Selector != nil { in, out := &in.Selector, &out.Selector @@ -263,12 +230,8 @@ func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { } if in.Template != nil { in, out := &in.Template, &out.Template - if *in == nil { - *out = nil - } else { - *out = new(core_v1.PodTemplateSpec) - (*in).DeepCopyInto(*out) - } + *out = new(corev1.PodTemplateSpec) + (*in).DeepCopyInto(*out) } return } @@ -288,12 +251,8 @@ func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) { *out = *in if in.Details != nil { in, out := &in.Details, &out.Details - if *in == nil { - *out = nil - } else { - *out = new(DeploymentDetails) - (*in).DeepCopyInto(*out) - } + *out = new(DeploymentDetails) + (*in).DeepCopyInto(*out) } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions @@ -359,9 +318,8 @@ func (in *DeploymentLog) DeepCopy() *DeploymentLog { func (in *DeploymentLog) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -370,48 +328,27 @@ func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) { out.TypeMeta = in.TypeMeta if in.SinceSeconds != nil { in, out := &in.SinceSeconds, &out.SinceSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.SinceTime != nil { in, out := &in.SinceTime, &out.SinceTime - if *in == nil { - *out = nil - } else { - *out = new(meta_v1.Time) - (*in).DeepCopyInto(*out) - } + *out = (*in).DeepCopy() } if in.TailLines != nil { in, out := &in.TailLines, &out.TailLines - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.LimitBytes != nil { in, out := &in.LimitBytes, &out.LimitBytes - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.Version != nil { in, out := &in.Version, &out.Version - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } return } @@ -430,9 +367,8 @@ func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions { func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -461,9 +397,8 @@ func (in *DeploymentRequest) DeepCopy() *DeploymentRequest { func (in *DeploymentRequest) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -471,30 +406,18 @@ func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { *out = *in if in.CustomParams != nil { in, out := &in.CustomParams, &out.CustomParams - if *in == nil { - *out = nil - } else { - *out = new(CustomDeploymentStrategyParams) - (*in).DeepCopyInto(*out) - } + *out = new(CustomDeploymentStrategyParams) + (*in).DeepCopyInto(*out) } if in.RecreateParams != nil { in, out := &in.RecreateParams, &out.RecreateParams - if *in == nil { - *out = nil - } else { - *out = new(RecreateDeploymentStrategyParams) - (*in).DeepCopyInto(*out) - } + *out = new(RecreateDeploymentStrategyParams) + (*in).DeepCopyInto(*out) } if in.RollingParams != nil { in, out := &in.RollingParams, &out.RollingParams - if *in == nil { - *out = nil - } else { - *out = new(RollingDeploymentStrategyParams) - (*in).DeepCopyInto(*out) - } + *out = new(RollingDeploymentStrategyParams) + (*in).DeepCopyInto(*out) } in.Resources.DeepCopyInto(&out.Resources) if in.Labels != nil { @@ -513,12 +436,8 @@ func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } return } @@ -533,26 +452,6 @@ func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStrategyType) DeepCopyInto(out *DeploymentStrategyType) { - { - in := (*string)(unsafe.Pointer(in)) - out := (*string)(unsafe.Pointer(out)) - *out = *in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategyType. -func (in *DeploymentStrategyType) DeepCopy() *DeploymentStrategyType { - if in == nil { - return nil - } - out := new(DeploymentStrategyType) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) { *out = *in @@ -576,26 +475,25 @@ func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImage } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { +func (in DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { { - in := (*[]DeploymentTriggerPolicy)(unsafe.Pointer(in)) - out := (*[]DeploymentTriggerPolicy)(unsafe.Pointer(out)) - *out = make([]DeploymentTriggerPolicy, len(*in)) + in := &in + *out = make(DeploymentTriggerPolicies, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } + return } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies. -func (in *DeploymentTriggerPolicies) DeepCopy() *DeploymentTriggerPolicies { +func (in DeploymentTriggerPolicies) DeepCopy() DeploymentTriggerPolicies { if in == nil { return nil } out := new(DeploymentTriggerPolicies) in.DeepCopyInto(out) - return out + return *out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -603,12 +501,8 @@ func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) { *out = *in if in.ImageChangeParams != nil { in, out := &in.ImageChangeParams, &out.ImageChangeParams - if *in == nil { - *out = nil - } else { - *out = new(DeploymentTriggerImageChangeParams) - (*in).DeepCopyInto(*out) - } + *out = new(DeploymentTriggerImageChangeParams) + (*in).DeepCopyInto(*out) } return } @@ -623,26 +517,6 @@ func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentTriggerType) DeepCopyInto(out *DeploymentTriggerType) { - { - in := (*string)(unsafe.Pointer(in)) - out := (*string)(unsafe.Pointer(out)) - *out = *in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerType. -func (in *DeploymentTriggerType) DeepCopy() *DeploymentTriggerType { - if in == nil { - return nil - } - out := new(DeploymentTriggerType) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { *out = *in @@ -653,7 +527,7 @@ func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]core_v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -681,12 +555,8 @@ func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { *out = *in if in.ExecNewPod != nil { in, out := &in.ExecNewPod, &out.ExecNewPod - if *in == nil { - *out = nil - } else { - *out = new(ExecNewPodHook) - (*in).DeepCopyInto(*out) - } + *out = new(ExecNewPodHook) + (*in).DeepCopyInto(*out) } if in.TagImages != nil { in, out := &in.TagImages, &out.TagImages @@ -706,64 +576,28 @@ func (in *LifecycleHook) DeepCopy() *LifecycleHook { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LifecycleHookFailurePolicy) DeepCopyInto(out *LifecycleHookFailurePolicy) { - { - in := (*string)(unsafe.Pointer(in)) - out := (*string)(unsafe.Pointer(out)) - *out = *in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHookFailurePolicy. -func (in *LifecycleHookFailurePolicy) DeepCopy() *LifecycleHookFailurePolicy { - if in == nil { - return nil - } - out := new(LifecycleHookFailurePolicy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) { *out = *in if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.Pre != nil { in, out := &in.Pre, &out.Pre - if *in == nil { - *out = nil - } else { - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) - } + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) } if in.Mid != nil { in, out := &in.Mid, &out.Mid - if *in == nil { - *out = nil - } else { - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) - } + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) } if in.Post != nil { in, out := &in.Post, &out.Post - if *in == nil { - *out = nil - } else { - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) - } + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) } return } @@ -783,66 +617,38 @@ func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentSt *out = *in if in.UpdatePeriodSeconds != nil { in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.IntervalSeconds != nil { in, out := &in.IntervalSeconds, &out.IntervalSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.TimeoutSeconds != nil { in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } + *out = new(int64) + **out = **in } if in.MaxUnavailable != nil { in, out := &in.MaxUnavailable, &out.MaxUnavailable - if *in == nil { - *out = nil - } else { - *out = new(intstr.IntOrString) - **out = **in - } + *out = new(intstr.IntOrString) + **out = **in } if in.MaxSurge != nil { in, out := &in.MaxSurge, &out.MaxSurge - if *in == nil { - *out = nil - } else { - *out = new(intstr.IntOrString) - **out = **in - } + *out = new(intstr.IntOrString) + **out = **in } if in.Pre != nil { in, out := &in.Pre, &out.Pre - if *in == nil { - *out = nil - } else { - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) - } + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) } if in.Post != nil { in, out := &in.Post, &out.Post - if *in == nil { - *out = nil - } else { - *out = new(LifecycleHook) - (*in).DeepCopyInto(*out) - } + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) } return } diff --git a/vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go similarity index 92% rename from vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go rename to vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go index 78fe5ddc0..9e3a07e8f 100644 --- a/vendor/github.com/openshift/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go @@ -8,12 +8,12 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_CustomDeploymentStrategyParams = map[string]string{ "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", - "image": "Image specifies a Docker image which can carry out a deployment.", + "image": "Image specifies a container image which can carry out a deployment.", "environment": "Environment holds the environment which will be given to the container for Image.", "command": "Command is optional and overrides CMD in the container Image.", } @@ -56,10 +56,9 @@ func (DeploymentCondition) SwaggerDoc() map[string]string { } var map_DeploymentConfig = map[string]string{ - "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.", - "metadata": "Standard object's metadata.", - "spec": "Spec represents a desired deployment state and how to deploy to it.", - "status": "Status represents the current deployment state.", + "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.", + "spec": "Spec represents a desired deployment state and how to deploy to it.", + "status": "Status represents the current deployment state.", } func (DeploymentConfig) SwaggerDoc() map[string]string { @@ -67,9 +66,8 @@ func (DeploymentConfig) SwaggerDoc() map[string]string { } var map_DeploymentConfigList = map[string]string{ - "": "DeploymentConfigList is a collection of deployment configs.", - "metadata": "Standard object's metadata.", - "items": "Items is a list of deployment configs", + "": "DeploymentConfigList is a collection of deployment configs.", + "items": "Items is a list of deployment configs", } func (DeploymentConfigList) SwaggerDoc() map[string]string { @@ -225,7 +223,7 @@ var map_ExecNewPodHook = map[string]string{ "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", "command": "Command is the action command and its arguments.", "env": "Env is a set of environment variables to supply to the hook pod's container.", - "containerName": "ContainerName is the name of a container in the deployment pod template whose Docker image will be used for the hook pod's container.", + "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", } @@ -257,7 +255,7 @@ func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { } var map_RollingDeploymentStrategyParams = map[string]string{ - "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", + "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go index 07836449a..35cbc2284 100644 --- a/vendor/github.com/openshift/api/project/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go @@ -1,32 +1,22 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: github.com/openshift/api/project/v1/generated.proto -// DO NOT EDIT! -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - github.com/openshift/api/project/v1/generated.proto - - It has these top-level messages: - Project - ProjectList - ProjectRequest - ProjectSpec - ProjectStatus -*/ package v1 -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" -import k8s_io_api_core_v1 "k8s.io/api/core/v1" + io "io" -import strings "strings" -import reflect "reflect" + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" -import io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -37,27 +27,147 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *Project) Reset() { *m = Project{} } -func (*Project) ProtoMessage() {} -func (*Project) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *Project) Reset() { *m = Project{} } +func (*Project) ProtoMessage() {} +func (*Project) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{0} +} +func (m *Project) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Project) XXX_Merge(src proto.Message) { + xxx_messageInfo_Project.Merge(m, src) +} +func (m *Project) XXX_Size() int { + return m.Size() +} +func (m *Project) XXX_DiscardUnknown() { + xxx_messageInfo_Project.DiscardUnknown(m) +} -func (m *ProjectList) Reset() { *m = ProjectList{} } -func (*ProjectList) ProtoMessage() {} -func (*ProjectList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +var xxx_messageInfo_Project proto.InternalMessageInfo -func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } -func (*ProjectRequest) ProtoMessage() {} -func (*ProjectRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *ProjectList) Reset() { *m = ProjectList{} } +func (*ProjectList) ProtoMessage() {} +func (*ProjectList) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{1} +} +func (m *ProjectList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectList.Merge(m, src) +} +func (m *ProjectList) XXX_Size() int { + return m.Size() +} +func (m *ProjectList) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectList.DiscardUnknown(m) +} -func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } -func (*ProjectSpec) ProtoMessage() {} -func (*ProjectSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +var xxx_messageInfo_ProjectList proto.InternalMessageInfo -func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } -func (*ProjectStatus) ProtoMessage() {} -func (*ProjectStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } +func (*ProjectRequest) ProtoMessage() {} +func (*ProjectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{2} +} +func (m *ProjectRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectRequest.Merge(m, src) +} +func (m *ProjectRequest) XXX_Size() int { + return m.Size() +} +func (m *ProjectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectRequest proto.InternalMessageInfo + +func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } +func (*ProjectSpec) ProtoMessage() {} +func (*ProjectSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{3} +} +func (m *ProjectSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectSpec.Merge(m, src) +} +func (m *ProjectSpec) XXX_Size() int { + return m.Size() +} +func (m *ProjectSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo + +func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } +func (*ProjectStatus) ProtoMessage() {} +func (*ProjectStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{4} +} +func (m *ProjectStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectStatus.Merge(m, src) +} +func (m *ProjectStatus) XXX_Size() int { + return m.Size() +} +func (m *ProjectStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo func init() { proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project") @@ -66,10 +176,55 @@ func init() { proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec") proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus") } + +func init() { + proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptor_fbf46eaac05029bf) +} + +var fileDescriptor_fbf46eaac05029bf = []byte{ + // 570 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x3d, 0x8f, 0xd3, 0x30, + 0x18, 0xc7, 0x9b, 0xf6, 0x7a, 0x5c, 0x5d, 0xee, 0x84, 0xc2, 0x52, 0x75, 0x48, 0x4b, 0x90, 0x50, + 0x07, 0x70, 0x68, 0x79, 0x11, 0x73, 0x40, 0x08, 0x24, 0x5e, 0x0e, 0xb3, 0x55, 0x0c, 0xb8, 0xa9, + 0x9b, 0x9a, 0x5e, 0x62, 0x13, 0xbb, 0x95, 0x8e, 0x89, 0x8f, 0xc0, 0xce, 0xe7, 0x60, 0x65, 0xee, + 0x78, 0xe3, 0x4d, 0xd5, 0x35, 0x7c, 0x8b, 0x9b, 0x90, 0x1d, 0x37, 0x09, 0x5c, 0x91, 0xee, 0x16, + 0xb6, 0xfa, 0xc9, 0xff, 0xf7, 0xb3, 0xfd, 0x3c, 0x2e, 0x78, 0x10, 0x52, 0x39, 0x9d, 0x8f, 0x60, + 0xc0, 0x22, 0x8f, 0x71, 0x12, 0x8b, 0x29, 0x9d, 0x48, 0x0f, 0x73, 0xea, 0xf1, 0x84, 0x7d, 0x22, + 0x81, 0xf4, 0x16, 0x7d, 0x2f, 0x24, 0x31, 0x49, 0xb0, 0x24, 0x63, 0xc8, 0x13, 0x26, 0x99, 0x7d, + 0xbb, 0x80, 0x60, 0x0e, 0x41, 0xcc, 0x29, 0x34, 0x10, 0x5c, 0xf4, 0xdb, 0xf7, 0x4a, 0xe6, 0x90, + 0x85, 0xcc, 0xd3, 0xec, 0x68, 0x3e, 0xd1, 0x2b, 0xbd, 0xd0, 0xbf, 0x32, 0x67, 0xdb, 0x9d, 0x3d, + 0x11, 0x90, 0x32, 0xbd, 0x75, 0xc0, 0x12, 0xb2, 0x65, 0xdf, 0xf6, 0xc3, 0x22, 0x13, 0xe1, 0x60, + 0x4a, 0x63, 0x92, 0x1c, 0x7b, 0x7c, 0x16, 0xaa, 0x82, 0xf0, 0x22, 0x22, 0xf1, 0x36, 0xea, 0xf1, + 0xbf, 0xa8, 0x64, 0x1e, 0x4b, 0x1a, 0x11, 0x4f, 0x04, 0x53, 0x12, 0xe1, 0xbf, 0x39, 0xf7, 0x7b, + 0x15, 0x5c, 0x3b, 0xcc, 0xee, 0x63, 0x7f, 0x04, 0x7b, 0x4a, 0x3f, 0xc6, 0x12, 0xb7, 0xac, 0xae, + 0xd5, 0x6b, 0x0e, 0xee, 0xc3, 0x4c, 0x0b, 0xcb, 0x5a, 0xc8, 0x67, 0xa1, 0x2a, 0x08, 0xa8, 0xd2, + 0x70, 0xd1, 0x87, 0x6f, 0x47, 0x8a, 0x7f, 0x4d, 0x24, 0xf6, 0xed, 0xe5, 0xaa, 0x53, 0x49, 0x57, + 0x1d, 0x50, 0xd4, 0x50, 0x6e, 0xb5, 0x11, 0xd8, 0x11, 0x9c, 0x04, 0xad, 0xaa, 0xb1, 0x5f, 0xa2, + 0xc5, 0xd0, 0x9c, 0xee, 0x3d, 0x27, 0x81, 0x7f, 0xdd, 0xd8, 0x77, 0xd4, 0x0a, 0x69, 0x97, 0x3d, + 0x04, 0xbb, 0x42, 0x62, 0x39, 0x17, 0xad, 0x9a, 0xb6, 0x0e, 0xae, 0x64, 0xd5, 0xa4, 0x7f, 0x60, + 0xbc, 0xbb, 0xd9, 0x1a, 0x19, 0xa3, 0xfb, 0xd3, 0x02, 0x4d, 0x93, 0x7c, 0x45, 0x85, 0xb4, 0x3f, + 0x5c, 0xe8, 0x10, 0xbc, 0x5c, 0x87, 0x14, 0xad, 0xfb, 0x73, 0xc3, 0xec, 0xb4, 0xb7, 0xa9, 0x94, + 0xba, 0xf3, 0x0e, 0xd4, 0xa9, 0x24, 0x91, 0x68, 0x55, 0xbb, 0xb5, 0x5e, 0x73, 0x70, 0xf7, 0x2a, + 0x17, 0xf1, 0xf7, 0x8d, 0xb8, 0xfe, 0x52, 0x29, 0x50, 0x66, 0x72, 0xcf, 0x2c, 0x70, 0x60, 0x12, + 0x88, 0x7c, 0x9e, 0x13, 0xf1, 0x3f, 0xa6, 0xfc, 0x08, 0x34, 0xc7, 0x54, 0xf0, 0x23, 0x7c, 0xfc, + 0x06, 0x47, 0x44, 0x0f, 0xbb, 0xe1, 0xdf, 0x34, 0x48, 0xf3, 0x59, 0xf1, 0x09, 0x95, 0x73, 0x1a, + 0x23, 0x22, 0x48, 0x28, 0x97, 0x94, 0xc5, 0x7a, 0x9a, 0x65, 0xac, 0xf8, 0x84, 0xca, 0x39, 0x17, + 0xe7, 0x23, 0x52, 0x8f, 0xc2, 0x46, 0x00, 0x4c, 0x68, 0x8c, 0x8f, 0xe8, 0x17, 0x92, 0x88, 0x96, + 0xd5, 0xad, 0xf5, 0x1a, 0xfe, 0x40, 0x1d, 0xf5, 0x79, 0x5e, 0x3d, 0x5f, 0x75, 0xba, 0x17, 0xff, + 0x88, 0x30, 0x0f, 0xe8, 0xa3, 0x95, 0x2c, 0xee, 0x0f, 0x0b, 0xec, 0xff, 0xf1, 0x60, 0xec, 0x17, + 0xa0, 0xce, 0xa7, 0x58, 0x10, 0xdd, 0xc1, 0x86, 0x3f, 0xd8, 0x34, 0xff, 0x50, 0x15, 0xcf, 0x57, + 0x9d, 0x5b, 0x5b, 0xfc, 0x4a, 0x2b, 0x38, 0x0e, 0x88, 0x0e, 0xa1, 0x4c, 0x60, 0x0f, 0x01, 0x08, + 0x58, 0x3c, 0xa6, 0xea, 0x2e, 0x9b, 0xc9, 0xdf, 0x29, 0x0d, 0x04, 0x2a, 0x1c, 0x96, 0xf1, 0xa7, + 0x9b, 0x78, 0x31, 0x86, 0xbc, 0x24, 0x50, 0xc9, 0xe6, 0xf7, 0x96, 0x6b, 0xa7, 0x72, 0xb2, 0x76, + 0x2a, 0xa7, 0x6b, 0xa7, 0xf2, 0x35, 0x75, 0xac, 0x65, 0xea, 0x58, 0x27, 0xa9, 0x63, 0x9d, 0xa6, + 0x8e, 0x75, 0x96, 0x3a, 0xd6, 0xb7, 0x5f, 0x4e, 0x65, 0x58, 0x5d, 0xf4, 0x7f, 0x07, 0x00, 0x00, + 0xff, 0xff, 0x0a, 0xd0, 0xf2, 0xe0, 0x22, 0x05, 0x00, 0x00, +} + func (m *Project) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -77,41 +232,52 @@ func (m *Project) Marshal() (dAtA []byte, err error) { } func (m *Project) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 + i-- dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n3 - return i, nil + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ProjectList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -119,37 +285,46 @@ func (m *ProjectList) Marshal() (dAtA []byte, err error) { } func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i += n + i-- + dAtA[i] = 0x12 } } - return i, nil + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -157,33 +332,42 @@ func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { } func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) - i += copy(dAtA[i:], m.DisplayName) - dAtA[i] = 0x1a - i++ + i -= len(m.Description) + copy(dAtA[i:], m.Description) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil + i-- + dAtA[i] = 0x1a + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -191,32 +375,31 @@ func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { } func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) } } - return i, nil + return len(dAtA) - i, nil } func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -224,45 +407,52 @@ func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { } func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - dAtA[i] = 0xa - i++ + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - return i, nil + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Project) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -275,6 +465,9 @@ func (m *Project) Size() (n int) { } func (m *ProjectList) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ListMeta.Size() @@ -289,6 +482,9 @@ func (m *ProjectList) Size() (n int) { } func (m *ProjectRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = m.ObjectMeta.Size() @@ -301,6 +497,9 @@ func (m *ProjectRequest) Size() (n int) { } func (m *ProjectSpec) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Finalizers) > 0 { @@ -313,22 +512,24 @@ func (m *ProjectSpec) Size() (n int) { } func (m *ProjectStatus) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Phase) n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -338,7 +539,7 @@ func (this *Project) String() string { return "nil" } s := strings.Join([]string{`&Project{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -349,9 +550,14 @@ func (this *ProjectList) String() string { if this == nil { return "nil" } + repeatedStringForItems := "[]Project{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" s := strings.Join([]string{`&ProjectList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Project", "Project", 1), `&`, ``, 1) + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, `}`, }, "") return s @@ -361,7 +567,7 @@ func (this *ProjectRequest) String() string { return "nil" } s := strings.Join([]string{`&ProjectRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, `}`, @@ -382,8 +588,14 @@ func (this *ProjectStatus) String() string { if this == nil { return "nil" } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" s := strings.Join([]string{`&ProjectStatus{`, `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, `}`, }, "") return s @@ -411,7 +623,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -439,7 +651,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -448,6 +660,9 @@ func (m *Project) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -469,7 +684,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -478,6 +693,9 @@ func (m *Project) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -499,7 +717,7 @@ func (m *Project) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -508,6 +726,9 @@ func (m *Project) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -524,6 +745,9 @@ func (m *Project) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -551,7 +775,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -579,7 +803,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -588,6 +812,9 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -609,7 +836,7 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -618,6 +845,9 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -635,6 +865,9 @@ func (m *ProjectList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -662,7 +895,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -690,7 +923,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } @@ -699,6 +932,9 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -720,7 +956,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -730,6 +966,9 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -749,7 +988,7 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -759,6 +998,9 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -773,6 +1015,9 @@ func (m *ProjectRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -800,7 +1045,7 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -828,7 +1073,7 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -838,6 +1083,9 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } @@ -852,6 +1100,9 @@ func (m *ProjectSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -879,7 +1130,7 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= (uint64(b) & 0x7F) << shift + wire |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -907,7 +1158,7 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } @@ -917,11 +1168,48 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } if postIndex > l { return io.ErrUnexpectedEOF } m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v11.NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -931,6 +1219,9 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -946,6 +1237,7 @@ func (m *ProjectStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -977,10 +1269,8 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -997,96 +1287,34 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } - iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - return iNdEx, nil + iNdEx += length case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") ) - -func init() { - proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 558 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x3f, 0x6f, 0xd3, 0x4e, - 0x18, 0xc7, 0xe3, 0xa4, 0xe9, 0xaf, 0xb9, 0xfc, 0x5a, 0x21, 0xb3, 0x44, 0x19, 0x9c, 0x60, 0x96, - 0x0c, 0x70, 0x26, 0x29, 0x20, 0x66, 0x0b, 0x21, 0x90, 0xf8, 0x53, 0xcc, 0x44, 0xc5, 0xc0, 0xc5, - 0x79, 0xe2, 0x1c, 0x89, 0xed, 0xc3, 0x77, 0x8e, 0x54, 0x26, 0x5e, 0x02, 0x3b, 0xef, 0x85, 0x39, - 0x63, 0xc7, 0x4e, 0x51, 0x63, 0xde, 0x45, 0x27, 0x74, 0xe7, 0x6b, 0x6c, 0x48, 0x2a, 0xda, 0x85, - 0x2d, 0xcf, 0xe3, 0xef, 0xe7, 0x73, 0xe7, 0xe7, 0x71, 0xd0, 0x61, 0x40, 0xc5, 0x24, 0x1d, 0x62, - 0x3f, 0x0e, 0x9d, 0x98, 0x41, 0xc4, 0x27, 0x74, 0x2c, 0x1c, 0xc2, 0xa8, 0xc3, 0x92, 0xf8, 0x13, - 0xf8, 0xc2, 0x99, 0xf7, 0x9d, 0x00, 0x22, 0x48, 0x88, 0x80, 0x11, 0x66, 0x49, 0x2c, 0x62, 0xf3, - 0x6e, 0x01, 0xe1, 0x35, 0x84, 0x09, 0xa3, 0x58, 0x43, 0x78, 0xde, 0x6f, 0xdf, 0x2f, 0x99, 0x83, - 0x38, 0x88, 0x1d, 0xc5, 0x0e, 0xd3, 0xb1, 0xaa, 0x54, 0xa1, 0x7e, 0xe5, 0xce, 0xb6, 0x3d, 0x7d, - 0xc2, 0x31, 0x8d, 0xd5, 0xd1, 0x7e, 0x9c, 0xc0, 0x96, 0x73, 0xdb, 0x0f, 0x8b, 0x4c, 0x48, 0xfc, - 0x09, 0x8d, 0x20, 0x39, 0x71, 0xd8, 0x34, 0x90, 0x0d, 0xee, 0x84, 0x20, 0xc8, 0x36, 0xca, 0xb9, - 0x8a, 0x4a, 0xd2, 0x48, 0xd0, 0x10, 0x36, 0x80, 0xc7, 0x7f, 0x03, 0xb8, 0x3f, 0x81, 0x90, 0x6c, - 0x70, 0x87, 0x57, 0x71, 0xa9, 0xa0, 0x33, 0x87, 0x46, 0x82, 0x8b, 0xe4, 0x4f, 0xc8, 0xfe, 0x5e, - 0x45, 0xff, 0x1d, 0xe5, 0x53, 0x33, 0x3f, 0xa2, 0x3d, 0xf9, 0x12, 0x23, 0x22, 0x48, 0xcb, 0xe8, - 0x1a, 0xbd, 0xe6, 0xe0, 0x01, 0xce, 0x9d, 0xb8, 0xec, 0xc4, 0x6c, 0x1a, 0xc8, 0x06, 0xc7, 0x32, - 0x8d, 0xe7, 0x7d, 0xfc, 0x66, 0x28, 0xf9, 0x57, 0x20, 0x88, 0x6b, 0x2e, 0x96, 0x9d, 0x4a, 0xb6, - 0xec, 0xa0, 0xa2, 0xe7, 0xad, 0xad, 0xa6, 0x87, 0x76, 0x38, 0x03, 0xbf, 0x55, 0xd5, 0xf6, 0x6b, - 0x2c, 0x12, 0xeb, 0xdb, 0xbd, 0x63, 0xe0, 0xbb, 0xff, 0x6b, 0xfb, 0x8e, 0xac, 0x3c, 0xe5, 0x32, - 0x8f, 0xd1, 0x2e, 0x17, 0x44, 0xa4, 0xbc, 0x55, 0x53, 0xd6, 0xc1, 0x8d, 0xac, 0x8a, 0x74, 0x0f, - 0xb4, 0x77, 0x37, 0xaf, 0x3d, 0x6d, 0xb4, 0x7f, 0x18, 0xa8, 0xa9, 0x93, 0x2f, 0x29, 0x17, 0xe6, - 0x87, 0x8d, 0x09, 0xe1, 0xeb, 0x4d, 0x48, 0xd2, 0x6a, 0x3e, 0xb7, 0xf4, 0x49, 0x7b, 0x97, 0x9d, - 0xd2, 0x74, 0xde, 0xa2, 0x3a, 0x15, 0x10, 0xf2, 0x56, 0xb5, 0x5b, 0xeb, 0x35, 0x07, 0xf7, 0x6e, - 0xf2, 0x22, 0xee, 0xbe, 0x16, 0xd7, 0x5f, 0x48, 0x85, 0x97, 0x9b, 0xec, 0x73, 0x03, 0x1d, 0xe8, - 0x84, 0x07, 0x9f, 0x53, 0xe0, 0xff, 0x62, 0xcb, 0x8f, 0x50, 0x73, 0x44, 0x39, 0x9b, 0x91, 0x93, - 0xd7, 0x24, 0x04, 0xb5, 0xec, 0x86, 0x7b, 0x5b, 0x23, 0xcd, 0xa7, 0xc5, 0x23, 0xaf, 0x9c, 0x53, - 0x18, 0x70, 0x3f, 0xa1, 0x4c, 0xd0, 0x38, 0x52, 0xdb, 0x2c, 0x63, 0xc5, 0x23, 0xaf, 0x9c, 0xb3, - 0xc9, 0x7a, 0x45, 0xf2, 0xa3, 0x30, 0x3d, 0x84, 0xc6, 0x34, 0x22, 0x33, 0xfa, 0x05, 0x12, 0xde, - 0x32, 0xba, 0xb5, 0x5e, 0xc3, 0x1d, 0xc8, 0xab, 0x3e, 0x5b, 0x77, 0x2f, 0x96, 0x9d, 0xee, 0xe6, - 0xdf, 0x1d, 0xaf, 0x03, 0xea, 0x6a, 0x25, 0x8b, 0xfd, 0x1e, 0xed, 0xff, 0xf6, 0xbd, 0x98, 0xcf, - 0x51, 0x9d, 0x4d, 0x08, 0x07, 0x35, 0xc0, 0x86, 0x3b, 0xb8, 0x9c, 0xfd, 0x91, 0x6c, 0x5e, 0x2c, - 0x3b, 0x77, 0xb6, 0xe8, 0xa5, 0x95, 0x33, 0xe2, 0x83, 0x0a, 0x79, 0xb9, 0xc0, 0xed, 0x2d, 0x56, - 0x56, 0xe5, 0x74, 0x65, 0x55, 0xce, 0x56, 0x56, 0xe5, 0x6b, 0x66, 0x19, 0x8b, 0xcc, 0x32, 0x4e, - 0x33, 0xcb, 0x38, 0xcb, 0x2c, 0xe3, 0x3c, 0xb3, 0x8c, 0x6f, 0x3f, 0xad, 0xca, 0x71, 0x75, 0xde, - 0xff, 0x15, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x82, 0x78, 0x13, 0x2b, 0x05, 0x00, 0x00, -} diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto index f42956be3..2baaf6a03 100644 --- a/vendor/github.com/openshift/api/project/v1/generated.proto +++ b/vendor/github.com/openshift/api/project/v1/generated.proto @@ -7,9 +7,7 @@ package github.com.openshift.api.project.v1; import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; @@ -20,26 +18,25 @@ option go_package = "v1"; // membership, editors can create and manage the resources, and viewers can see but not access running // containers. In a normal cluster project administrators are not able to alter their quotas - that is // restricted to cluster administrators. -// +// // Listing or watching projects will return only projects the user has the reader role on. -// +// // An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed // as editable to end users while namespaces are not. Direct creation of a project is typically restricted // to administrators, while end users should use the requestproject resource. message Project { - // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. optional ProjectSpec spec = 2; // Status describes the current status of a Namespace + // +optional optional ProjectStatus status = 3; } // ProjectList is a list of Project objects. message ProjectList { - // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // Items is the list of projects @@ -48,7 +45,6 @@ message ProjectList { // ProjecRequest is the set of options necessary to fully qualify a project request message ProjectRequest { - // Standard object's metadata. optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DisplayName is the display name to apply to a project @@ -67,6 +63,13 @@ message ProjectSpec { // ProjectStatus is information about the current status of a Project message ProjectStatus { // Phase is the current lifecycle phase of the project + // +optional optional string phase = 1; + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated k8s.io.api.core.v1.NamespaceCondition conditions = 2; } diff --git a/vendor/github.com/openshift/api/project/v1/legacy.go b/vendor/github.com/openshift/api/project/v1/legacy.go new file mode 100644 index 000000000..186f905f3 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/legacy.go @@ -0,0 +1,23 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/register.go b/vendor/github.com/openshift/api/project/v1/register.go index 07f866897..e471716ce 100644 --- a/vendor/github.com/openshift/api/project/v1/register.go +++ b/vendor/github.com/openshift/api/project/v1/register.go @@ -1,49 +1,40 @@ package v1 import ( + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -const ( - GroupName = "project.openshift.io" - LegacyGroupName = "" -) - -// SchemeGroupVersion is group version used to register these objects var ( - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + GroupName = "project.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme - LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) - AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme - - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme ) +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() + return schema.GroupResource{Group: GroupName, Resource: resource} } // Adds the list of known types to api.Scheme. func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, + scheme.AddKnownTypes(GroupVersion, &Project{}, &ProjectList{}, &ProjectRequest{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -func addLegacyKnownTypes(scheme *runtime.Scheme) error { - types := []runtime.Object{ - &Project{}, - &ProjectList{}, - &ProjectRequest{}, - } - scheme.AddKnownTypes(LegacySchemeGroupVersion, types...) + metav1.AddToGroupVersion(scheme, GroupVersion) return nil } diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go index c4ed0555f..dea150f12 100644 --- a/vendor/github.com/openshift/api/project/v1/types.go +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -10,8 +10,8 @@ import ( // ProjectList is a list of Project objects. type ProjectList struct { metav1.TypeMeta `json:",inline"` - // Standard object's metadata. metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Items is the list of projects Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -19,6 +19,13 @@ type ProjectList struct { const ( // These are internal finalizer values to Origin FinalizerOrigin corev1.FinalizerName = "openshift.io/origin" + // ProjectNodeSelector is an annotation that holds the node selector; + // the node selector annotation determines which nodes will have pods from this project scheduled to them + ProjectNodeSelector = "openshift.io/node-selector" + + // ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present, + // but it is set by the default project template. + ProjectRequesterAnnotation = "openshift.io/requester" ) // ProjectSpec describes the attributes on a Project @@ -30,7 +37,14 @@ type ProjectSpec struct { // ProjectStatus is information about the current status of a Project type ProjectStatus struct { // Phase is the current lifecycle phase of the project + // +optional Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } // +genclient @@ -50,14 +64,14 @@ type ProjectStatus struct { // as editable to end users while namespaces are not. Direct creation of a project is typically restricted // to administrators, while end users should use the requestproject resource. type Project struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. + metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace + // +optional Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -69,9 +83,9 @@ type Project struct { // ProjecRequest is the set of options necessary to fully qualify a project request type ProjectRequest struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. + metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // DisplayName is the display name to apply to a project DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` // Description is the description to apply to a project diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go index 9c2ef00ba..763383030 100644 --- a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go @@ -1,11 +1,11 @@ // +build !ignore_autogenerated -// This file was autogenerated by deepcopy-gen. Do not edit it manually! +// Code generated by deepcopy-gen. DO NOT EDIT. package v1 import ( - core_v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -15,7 +15,7 @@ func (in *Project) DeepCopyInto(out *Project) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -33,16 +33,15 @@ func (in *Project) DeepCopy() *Project { func (in *Project) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectList) DeepCopyInto(out *ProjectList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Project, len(*in)) @@ -67,9 +66,8 @@ func (in *ProjectList) DeepCopy() *ProjectList { func (in *ProjectList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -94,9 +92,8 @@ func (in *ProjectRequest) DeepCopy() *ProjectRequest { func (in *ProjectRequest) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c - } else { - return nil } + return nil } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -104,7 +101,7 @@ func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { *out = *in if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers - *out = make([]core_v1.FinalizerName, len(*in)) + *out = make([]corev1.FinalizerName, len(*in)) copy(*out, *in) } return @@ -123,6 +120,13 @@ func (in *ProjectSpec) DeepCopy() *ProjectSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]corev1.NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go similarity index 51% rename from vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go rename to vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go index 16c9a3e9f..080f2677a 100644 --- a/vendor/github.com/openshift/api/project/v1/types_swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go @@ -8,14 +8,13 @@ package v1 // they are on one line! For multiple line or blocks that you want to ignore use ---. // Any context after a --- is ignored. // -// Those methods can be generated by using hack/update-generated-swagger-docs.sh +// Those methods can be generated by using hack/update-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE var map_Project = map[string]string{ - "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.", - "metadata": "Standard object's metadata.", - "spec": "Spec defines the behavior of the Namespace.", - "status": "Status describes the current status of a Namespace", + "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.", + "spec": "Spec defines the behavior of the Namespace.", + "status": "Status describes the current status of a Namespace", } func (Project) SwaggerDoc() map[string]string { @@ -23,9 +22,8 @@ func (Project) SwaggerDoc() map[string]string { } var map_ProjectList = map[string]string{ - "": "ProjectList is a list of Project objects.", - "metadata": "Standard object's metadata.", - "items": "Items is the list of projects", + "": "ProjectList is a list of Project objects.", + "items": "Items is the list of projects", } func (ProjectList) SwaggerDoc() map[string]string { @@ -34,7 +32,6 @@ func (ProjectList) SwaggerDoc() map[string]string { var map_ProjectRequest = map[string]string{ "": "ProjecRequest is the set of options necessary to fully qualify a project request", - "metadata": "Standard object's metadata.", "displayName": "DisplayName is the display name to apply to a project", "description": "Description is the description to apply to a project", } @@ -53,8 +50,9 @@ func (ProjectSpec) SwaggerDoc() map[string]string { } var map_ProjectStatus = map[string]string{ - "": "ProjectStatus is information about the current status of a Project", - "phase": "Phase is the current lifecycle phase of the project", + "": "ProjectStatus is information about the current status of a Project", + "phase": "Phase is the current lifecycle phase of the project", + "conditions": "Represents the latest available observations of the project current state.", } func (ProjectStatus) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore deleted file mode 100644 index 75623dccc..000000000 --- a/vendor/github.com/russross/blackfriday/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.out -*.swp -*.8 -*.6 -_obj -_test* -markdown -tags diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml deleted file mode 100644 index 2f3351d7a..000000000 --- a/vendor/github.com/russross/blackfriday/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -sudo: false -language: go -go: - - "1.9.x" - - "1.10.x" - - tip -matrix: - fast_finish: true - allow_failures: - - go: tip -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d -s .) - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt deleted file mode 100644 index 2885af360..000000000 --- a/vendor/github.com/russross/blackfriday/LICENSE.txt +++ /dev/null @@ -1,29 +0,0 @@ -Blackfriday is distributed under the Simplified BSD License: - -> Copyright © 2011 Russ Ross -> All rights reserved. -> -> Redistribution and use in source and binary forms, with or without -> modification, are permitted provided that the following conditions -> are met: -> -> 1. Redistributions of source code must retain the above copyright -> notice, this list of conditions and the following disclaimer. -> -> 2. Redistributions in binary form must reproduce the above -> copyright notice, this list of conditions and the following -> disclaimer in the documentation and/or other materials provided with -> the distribution. -> -> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md deleted file mode 100644 index 3c62e1375..000000000 --- a/vendor/github.com/russross/blackfriday/README.md +++ /dev/null @@ -1,369 +0,0 @@ -Blackfriday -[![Build Status][BuildSVG]][BuildURL] -[![Godoc][GodocV2SVG]][GodocV2URL] -=========== - -Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It -is paranoid about its input (so you can safely feed it user-supplied -data), it is fast, it supports common extensions (tables, smart -punctuation substitutions, etc.), and it is safe for all utf-8 -(unicode) input. - -HTML output is currently supported, along with Smartypants -extensions. - -It started as a translation from C of [Sundown][3]. - - -Installation ------------- - -Blackfriday is compatible with any modern Go release. With Go and git installed: - - go get -u gopkg.in/russross/blackfriday.v2 - -will download, compile, and install the package into your `$GOPATH` directory -hierarchy. - - -Versions --------- - -Currently maintained and recommended version of Blackfriday is `v2`. It's being -developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the -documentation is available at -https://godoc.org/gopkg.in/russross/blackfriday.v2. - -It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, -but we highly recommend using package management tool like [dep][7] or -[Glide][8] and make use of semantic versioning. With package management you -should import `github.com/russross/blackfriday` and specify that you're using -version 2.0.0. - -Version 2 offers a number of improvements over v1: - -* Cleaned up API -* A separate call to [`Parse`][4], which produces an abstract syntax tree for - the document -* Latest bug fixes -* Flexibility to easily add your own rendering extensions - -Potential drawbacks: - -* Our benchmarks show v2 to be slightly slower than v1. Currently in the - ballpark of around 15%. -* API breakage. If you can't afford modifying your code to adhere to the new API - and don't care too much about the new features, v2 is probably not for you. -* Several bug fixes are trailing behind and still need to be forward-ported to - v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for - tracking. - -If you are still interested in the legacy `v1`, you can import it from -`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found -here: https://godoc.org/github.com/russross/blackfriday - -### Known issue with `dep` - -There is a known problem with using Blackfriday v1 _transitively_ and `dep`. -Currently `dep` prioritizes semver versions over anything else, and picks the -latest one, plus it does not apply a `[[constraint]]` specifier to transitively -pulled in packages. So if you're using something that uses Blackfriday v1, but -that something does not use `dep` yet, you will get Blackfriday v2 pulled in and -your first dependency will fail to build. - -There are couple of fixes for it, documented here: -https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version - -Meanwhile, `dep` team is working on a more general solution to the constraints -on transitive dependencies problem: https://github.com/golang/dep/issues/1124. - - -Usage ------ - -### v1 - -For basic usage, it is as simple as getting your input into a byte -slice and calling: - - output := blackfriday.MarkdownBasic(input) - -This renders it with no extensions enabled. To get a more useful -feature set, use this instead: - - output := blackfriday.MarkdownCommon(input) - -### v2 - -For the most sensible markdown processing, it is as simple as getting your input -into a byte slice and calling: - -```go -output := blackfriday.Run(input) -``` - -Your input will be parsed and the output rendered with a set of most popular -extensions enabled. If you want the most basic feature set, corresponding with -the bare Markdown specification, use: - -```go -output := blackfriday.Run(input, blackfriday.WithNoExtensions()) -``` - -### Sanitize untrusted content - -Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running Blackfriday's output -through HTML sanitizer such as [Bluemonday][5]. - -Here's an example of simple usage of Blackfriday together with Bluemonday: - -```go -import ( - "github.com/microcosm-cc/bluemonday" - "gopkg.in/russross/blackfriday.v2" -) - -// ... -unsafe := blackfriday.Run(input) -html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) -``` - -### Custom options, v1 - -If you want to customize the set of options, first get a renderer -(currently only the HTML output engine), then use it to -call the more general `Markdown` function. For examples, see the -implementations of `MarkdownBasic` and `MarkdownCommon` in -`markdown.go`. - -### Custom options, v2 - -If you want to customize the set of options, use `blackfriday.WithExtensions`, -`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. - -### `blackfriday-tool` - -You can also check out `blackfriday-tool` for a more complete example -of how to use it. Download and install it using: - - go get github.com/russross/blackfriday-tool - -This is a simple command-line tool that allows you to process a -markdown file using a standalone program. You can also browse the -source directly on github if you are just looking for some example -code: - -* - -Note that if you have not already done so, installing -`blackfriday-tool` will be sufficient to download and install -blackfriday in addition to the tool itself. The tool binary will be -installed in `$GOPATH/bin`. This is a statically-linked binary that -can be copied to wherever you need it without worrying about -dependencies and library versions. - -### Sanitized anchor names - -Blackfriday includes an algorithm for creating sanitized anchor names -corresponding to a given input text. This algorithm is used to create -anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The -algorithm has a specification, so that other packages can create -compatible anchor names and links to those anchors. - -The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. - -[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to -create compatible links to the anchor names generated by blackfriday. -This algorithm is also implemented in a small standalone package at -[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients -that want a small package and don't need full functionality of blackfriday. - - -Features --------- - -All features of Sundown are supported, including: - -* **Compatibility**. The Markdown v1.0.3 test suite passes with - the `--tidy` option. Without `--tidy`, the differences are - mostly in whitespace and entity escaping, where blackfriday is - more consistent and cleaner. - -* **Common extensions**, including table support, fenced code - blocks, autolinks, strikethroughs, non-strict emphasis, etc. - -* **Safety**. Blackfriday is paranoid when parsing, making it safe - to feed untrusted user input without fear of bad things - happening. The test suite stress tests this and there are no - known inputs that make it crash. If you find one, please let me - know and send me the input that does it. - - NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself against JavaScript injection in untrusted content, see - [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). - -* **Fast processing**. It is fast enough to render on-demand in - most web applications without having to cache the output. - -* **Thread safety**. You can run multiple parsers in different - goroutines without ill effect. There is no dependence on global - shared state. - -* **Minimal dependencies**. Blackfriday only depends on standard - library packages in Go. The source code is pretty - self-contained, so it is easy to add to any project, including - Google App Engine projects. - -* **Standards compliant**. Output successfully validates using the - W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. - - -Extensions ----------- - -In addition to the standard markdown syntax, this package -implements the following extensions: - -* **Intra-word emphasis supression**. The `_` character is - commonly used inside words when discussing code, so having - markdown interpret it as an emphasis command is usually the - wrong thing. Blackfriday lets you treat all emphasis markers as - normal characters when they occur inside a word. - -* **Tables**. Tables can be created by drawing them in the input - using a simple syntax: - - ``` - Name | Age - --------|------ - Bob | 27 - Alice | 23 - ``` - -* **Fenced code blocks**. In addition to the normal 4-space - indentation to mark code blocks, you can explicitly mark them - and supply a language (to make syntax highlighting simple). Just - mark it like this: - - ``` go - func getTrue() bool { - return true - } - ``` - - You can use 3 or more backticks to mark the beginning of the - block, and the same number to mark the end of the block. - - To preserve classes of fenced code blocks while using the bluemonday - HTML sanitizer, use the following policy: - - ``` go - p := bluemonday.UGCPolicy() - p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") - html := p.SanitizeBytes(unsafe) - ``` - -* **Definition lists**. A simple definition list is made of a single-line - term followed by a colon and the definition for that term. - - Cat - : Fluffy animal everyone likes - - Internet - : Vector of transmission for pictures of cats - - Terms must be separated from the previous definition by a blank line. - -* **Footnotes**. A marker in the text that will become a superscript number; - a footnote definition that will be placed in a list of footnotes at the - end of the document. A footnote looks like this: - - This is a footnote.[^1] - - [^1]: the footnote text. - -* **Autolinking**. Blackfriday can find URLs that have not been - explicitly marked as links and turn them into links. - -* **Strikethrough**. Use two tildes (`~~`) to mark text that - should be crossed out. - -* **Hard line breaks**. With this extension enabled (it is off by - default in the `MarkdownBasic` and `MarkdownCommon` convenience - functions), newlines in the input translate into line breaks in - the output. - -* **Smart quotes**. Smartypants-style punctuation substitution is - supported, turning normal double- and single-quote marks into - curly quotes, etc. - -* **LaTeX-style dash parsing** is an additional option, where `--` - is translated into `–`, and `---` is translated into - `—`. This differs from most smartypants processors, which - turn a single hyphen into an ndash and a double hyphen into an - mdash. - -* **Smart fractions**, where anything that looks like a fraction - is translated into suitable HTML (instead of just a few special - cases like most smartypant processors). For example, `4/5` - becomes `45`, which renders as - 45. - - -Other renderers ---------------- - -Blackfriday is structured to allow alternative rendering engines. Here -are a few of note: - -* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): - provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable heading anchor links. - - It's not customizable, and its goal is to produce HTML output - equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), - except the rendering is performed locally. - -* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, - but for markdown. - -* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): - renders output as LaTeX. - -* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience - integration with the [Chroma](https://github.com/alecthomas/chroma) code - highlighting library. bfchroma is only compatible with v2 of Blackfriday and - provides a drop-in renderer ready to use with Blackfriday, as well as - options and means for further customization. - - -TODO ----- - -* More unit testing -* Improve Unicode support. It does not understand all Unicode - rules (about what constitutes a letter, a punctuation symbol, - etc.), so it may fail to detect word boundaries correctly in - some instances. It is safe on all UTF-8 input. - - -License -------- - -[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - - - [1]: https://daringfireball.net/projects/markdown/ "Markdown" - [2]: https://golang.org/ "Go Language" - [3]: https://github.com/vmg/sundown "Sundown" - [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" - [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" - [6]: https://labix.org/gopkg.in "gopkg.in" - [7]: https://github.com/golang/dep/ "dep" - [8]: https://github.com/Masterminds/glide "Glide" - - [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master - [BuildURL]: https://travis-ci.org/russross/blackfriday - [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg - [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go deleted file mode 100644 index 45c21a6c2..000000000 --- a/vendor/github.com/russross/blackfriday/block.go +++ /dev/null @@ -1,1474 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// Functions to parse block-level elements. -// - -package blackfriday - -import ( - "bytes" - "strings" - "unicode" -) - -// Parse block-level data. -// Note: this function and many that it calls assume that -// the input buffer ends with a newline. -func (p *parser) block(out *bytes.Buffer, data []byte) { - if len(data) == 0 || data[len(data)-1] != '\n' { - panic("block input is missing terminating newline") - } - - // this is called recursively: enforce a maximum depth - if p.nesting >= p.maxNesting { - return - } - p.nesting++ - - // parse out one block-level construct at a time - for len(data) > 0 { - // prefixed header: - // - // # Header 1 - // ## Header 2 - // ... - // ###### Header 6 - if p.isPrefixHeader(data) { - data = data[p.prefixHeader(out, data):] - continue - } - - // block of preformatted HTML: - // - //
- // ... - //
- if data[0] == '<' { - if i := p.html(out, data, true); i > 0 { - data = data[i:] - continue - } - } - - // title block - // - // % stuff - // % more stuff - // % even more stuff - if p.flags&EXTENSION_TITLEBLOCK != 0 { - if data[0] == '%' { - if i := p.titleBlock(out, data, true); i > 0 { - data = data[i:] - continue - } - } - } - - // blank lines. note: returns the # of bytes to skip - if i := p.isEmpty(data); i > 0 { - data = data[i:] - continue - } - - // indented code block: - // - // func max(a, b int) int { - // if a > b { - // return a - // } - // return b - // } - if p.codePrefix(data) > 0 { - data = data[p.code(out, data):] - continue - } - - // fenced code block: - // - // ``` go info string here - // func fact(n int) int { - // if n <= 1 { - // return n - // } - // return n * fact(n-1) - // } - // ``` - if p.flags&EXTENSION_FENCED_CODE != 0 { - if i := p.fencedCodeBlock(out, data, true); i > 0 { - data = data[i:] - continue - } - } - - // horizontal rule: - // - // ------ - // or - // ****** - // or - // ______ - if p.isHRule(data) { - p.r.HRule(out) - var i int - for i = 0; data[i] != '\n'; i++ { - } - data = data[i:] - continue - } - - // block quote: - // - // > A big quote I found somewhere - // > on the web - if p.quotePrefix(data) > 0 { - data = data[p.quote(out, data):] - continue - } - - // table: - // - // Name | Age | Phone - // ------|-----|--------- - // Bob | 31 | 555-1234 - // Alice | 27 | 555-4321 - if p.flags&EXTENSION_TABLES != 0 { - if i := p.table(out, data); i > 0 { - data = data[i:] - continue - } - } - - // an itemized/unordered list: - // - // * Item 1 - // * Item 2 - // - // also works with + or - - if p.uliPrefix(data) > 0 { - data = data[p.list(out, data, 0):] - continue - } - - // a numbered/ordered list: - // - // 1. Item 1 - // 2. Item 2 - if p.oliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_ORDERED):] - continue - } - - // definition lists: - // - // Term 1 - // : Definition a - // : Definition b - // - // Term 2 - // : Definition c - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if p.dliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_DEFINITION):] - continue - } - } - - // anything else must look like a normal paragraph - // note: this finds underlined headers, too - data = data[p.paragraph(out, data):] - } - - p.nesting-- -} - -func (p *parser) isPrefixHeader(data []byte) bool { - if data[0] != '#' { - return false - } - - if p.flags&EXTENSION_SPACE_HEADERS != 0 { - level := 0 - for level < 6 && data[level] == '#' { - level++ - } - if data[level] != ' ' { - return false - } - } - return true -} - -func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { - level := 0 - for level < 6 && data[level] == '#' { - level++ - } - i := skipChar(data, level, ' ') - end := skipUntilChar(data, i, '\n') - skip := end - id := "" - if p.flags&EXTENSION_HEADER_IDS != 0 { - j, k := 0, 0 - // find start/end of header id - for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { - } - for k = j + 1; k < end && data[k] != '}'; k++ { - } - // extract header id iff found - if j < end && k < end { - id = string(data[j+2 : k]) - end = j - skip = k + 1 - for end > 0 && data[end-1] == ' ' { - end-- - } - } - } - for end > 0 && data[end-1] == '#' { - if isBackslashEscaped(data, end-1) { - break - } - end-- - } - for end > 0 && data[end-1] == ' ' { - end-- - } - if end > i { - if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { - id = SanitizedAnchorName(string(data[i:end])) - } - work := func() bool { - p.inline(out, data[i:end]) - return true - } - p.r.Header(out, work, level, id) - } - return skip -} - -func (p *parser) isUnderlinedHeader(data []byte) int { - // test of level 1 header - if data[0] == '=' { - i := skipChar(data, 1, '=') - i = skipChar(data, i, ' ') - if data[i] == '\n' { - return 1 - } else { - return 0 - } - } - - // test of level 2 header - if data[0] == '-' { - i := skipChar(data, 1, '-') - i = skipChar(data, i, ' ') - if data[i] == '\n' { - return 2 - } else { - return 0 - } - } - - return 0 -} - -func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { - if data[0] != '%' { - return 0 - } - splitData := bytes.Split(data, []byte("\n")) - var i int - for idx, b := range splitData { - if !bytes.HasPrefix(b, []byte("%")) { - i = idx // - 1 - break - } - } - - data = bytes.Join(splitData[0:i], []byte("\n")) - p.r.TitleBlock(out, data) - - return len(data) -} - -func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { - var i, j int - - // identify the opening tag - if data[0] != '<' { - return 0 - } - curtag, tagfound := p.htmlFindTag(data[1:]) - - // handle special cases - if !tagfound { - // check for an HTML comment - if size := p.htmlComment(out, data, doRender); size > 0 { - return size - } - - // check for an
tag - if size := p.htmlHr(out, data, doRender); size > 0 { - return size - } - - // check for HTML CDATA - if size := p.htmlCDATA(out, data, doRender); size > 0 { - return size - } - - // no special case recognized - return 0 - } - - // look for an unindented matching closing tag - // followed by a blank line - found := false - /* - closetag := []byte("\n") - j = len(curtag) + 1 - for !found { - // scan for a closing tag at the beginning of a line - if skip := bytes.Index(data[j:], closetag); skip >= 0 { - j += skip + len(closetag) - } else { - break - } - - // see if it is the only thing on the line - if skip := p.isEmpty(data[j:]); skip > 0 { - // see if it is followed by a blank line/eof - j += skip - if j >= len(data) { - found = true - i = j - } else { - if skip := p.isEmpty(data[j:]); skip > 0 { - j += skip - found = true - i = j - } - } - } - } - */ - - // if not found, try a second pass looking for indented match - // but not if tag is "ins" or "del" (following original Markdown.pl) - if !found && curtag != "ins" && curtag != "del" { - i = 1 - for i < len(data) { - i++ - for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { - i++ - } - - if i+2+len(curtag) >= len(data) { - break - } - - j = p.htmlFindEnd(curtag, data[i-1:]) - - if j > 0 { - i += j - 1 - found = true - break - } - } - } - - if !found { - return 0 - } - - // the end of the block has been found - if doRender { - // trim newlines - end := i - for end > 0 && data[end-1] == '\n' { - end-- - } - p.r.BlockHtml(out, data[:end]) - } - - return i -} - -func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { - // html block needs to end with a blank line - if i := p.isEmpty(data[start:]); i > 0 { - size := start + i - if doRender { - // trim trailing newlines - end := size - for end > 0 && data[end-1] == '\n' { - end-- - } - p.r.BlockHtml(out, data[:end]) - } - return size - } - return 0 -} - -// HTML comment, lax form -func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { - i := p.inlineHTMLComment(out, data) - return p.renderHTMLBlock(out, data, i, doRender) -} - -// HTML CDATA section -func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { - const cdataTag = "') { - i++ - } - i++ - // no end-of-comment marker - if i >= len(data) { - return 0 - } - return p.renderHTMLBlock(out, data, i, doRender) -} - -// HR, which is the only self-closing block tag considered -func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { - if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { - return 0 - } - if data[3] != ' ' && data[3] != '/' && data[3] != '>' { - // not an
tag after all; at least not a valid one - return 0 - } - - i := 3 - for data[i] != '>' && data[i] != '\n' { - i++ - } - - if data[i] == '>' { - return p.renderHTMLBlock(out, data, i+1, doRender) - } - - return 0 -} - -func (p *parser) htmlFindTag(data []byte) (string, bool) { - i := 0 - for isalnum(data[i]) { - i++ - } - key := string(data[:i]) - if _, ok := blockTags[key]; ok { - return key, true - } - return "", false -} - -func (p *parser) htmlFindEnd(tag string, data []byte) int { - // assume data[0] == '<' && data[1] == '/' already tested - - // check if tag is a match - closetag := []byte("") - if !bytes.HasPrefix(data, closetag) { - return 0 - } - i := len(closetag) - - // check that the rest of the line is blank - skip := 0 - if skip = p.isEmpty(data[i:]); skip == 0 { - return 0 - } - i += skip - skip = 0 - - if i >= len(data) { - return i - } - - if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { - return i - } - if skip = p.isEmpty(data[i:]); skip == 0 { - // following line must be blank - return 0 - } - - return i + skip -} - -func (*parser) isEmpty(data []byte) int { - // it is okay to call isEmpty on an empty buffer - if len(data) == 0 { - return 0 - } - - var i int - for i = 0; i < len(data) && data[i] != '\n'; i++ { - if data[i] != ' ' && data[i] != '\t' { - return 0 - } - } - return i + 1 -} - -func (*parser) isHRule(data []byte) bool { - i := 0 - - // skip up to three spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // look at the hrule char - if data[i] != '*' && data[i] != '-' && data[i] != '_' { - return false - } - c := data[i] - - // the whole line must be the char or whitespace - n := 0 - for data[i] != '\n' { - switch { - case data[i] == c: - n++ - case data[i] != ' ': - return false - } - i++ - } - - return n >= 3 -} - -// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, -// and returns the end index if so, or 0 otherwise. It also returns the marker found. -// If syntax is not nil, it gets set to the syntax specified in the fence line. -// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. -func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { - i, size := 0, 0 - - // skip up to three spaces - for i < len(data) && i < 3 && data[i] == ' ' { - i++ - } - - // check for the marker characters: ~ or ` - if i >= len(data) { - return 0, "" - } - if data[i] != '~' && data[i] != '`' { - return 0, "" - } - - c := data[i] - - // the whole line must be the same char or whitespace - for i < len(data) && data[i] == c { - size++ - i++ - } - - // the marker char must occur at least 3 times - if size < 3 { - return 0, "" - } - marker = string(data[i-size : i]) - - // if this is the end marker, it must match the beginning marker - if oldmarker != "" && marker != oldmarker { - return 0, "" - } - - // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here - // into one, always get the info string, and discard it if the caller doesn't care. - if info != nil { - infoLength := 0 - i = skipChar(data, i, ' ') - - if i >= len(data) { - if newlineOptional && i == len(data) { - return i, marker - } - return 0, "" - } - - infoStart := i - - if data[i] == '{' { - i++ - infoStart++ - - for i < len(data) && data[i] != '}' && data[i] != '\n' { - infoLength++ - i++ - } - - if i >= len(data) || data[i] != '}' { - return 0, "" - } - - // strip all whitespace at the beginning and the end - // of the {} block - for infoLength > 0 && isspace(data[infoStart]) { - infoStart++ - infoLength-- - } - - for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { - infoLength-- - } - - i++ - } else { - for i < len(data) && !isverticalspace(data[i]) { - infoLength++ - i++ - } - } - - *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) - } - - i = skipChar(data, i, ' ') - if i >= len(data) || data[i] != '\n' { - if newlineOptional && i == len(data) { - return i, marker - } - return 0, "" - } - - return i + 1, marker // Take newline into account. -} - -// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, -// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. -// If doRender is true, a final newline is mandatory to recognize the fenced code block. -func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { - var infoString string - beg, marker := isFenceLine(data, &infoString, "", false) - if beg == 0 || beg >= len(data) { - return 0 - } - - var work bytes.Buffer - - for { - // safe to assume beg < len(data) - - // check for the end of the code block - newlineOptional := !doRender - fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) - if fenceEnd != 0 { - beg += fenceEnd - break - } - - // copy the current line - end := skipUntilChar(data, beg, '\n') + 1 - - // did we reach the end of the buffer without a closing marker? - if end >= len(data) { - return 0 - } - - // verbatim copy to the working buffer - if doRender { - work.Write(data[beg:end]) - } - beg = end - } - - if doRender { - p.r.BlockCode(out, work.Bytes(), infoString) - } - - return beg -} - -func (p *parser) table(out *bytes.Buffer, data []byte) int { - var header bytes.Buffer - i, columns := p.tableHeader(&header, data) - if i == 0 { - return 0 - } - - var body bytes.Buffer - - for i < len(data) { - pipes, rowStart := 0, i - for ; data[i] != '\n'; i++ { - if data[i] == '|' { - pipes++ - } - } - - if pipes == 0 { - i = rowStart - break - } - - // include the newline in data sent to tableRow - i++ - p.tableRow(&body, data[rowStart:i], columns, false) - } - - p.r.Table(out, header.Bytes(), body.Bytes(), columns) - - return i -} - -// check if the specified position is preceded by an odd number of backslashes -func isBackslashEscaped(data []byte, i int) bool { - backslashes := 0 - for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { - backslashes++ - } - return backslashes&1 == 1 -} - -func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { - i := 0 - colCount := 1 - for i = 0; data[i] != '\n'; i++ { - if data[i] == '|' && !isBackslashEscaped(data, i) { - colCount++ - } - } - - // doesn't look like a table header - if colCount == 1 { - return - } - - // include the newline in the data sent to tableRow - header := data[:i+1] - - // column count ignores pipes at beginning or end of line - if data[0] == '|' { - colCount-- - } - if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { - colCount-- - } - - columns = make([]int, colCount) - - // move on to the header underline - i++ - if i >= len(data) { - return - } - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - i = skipChar(data, i, ' ') - - // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 - // and trailing | optional on last column - col := 0 - for data[i] != '\n' { - dashes := 0 - - if data[i] == ':' { - i++ - columns[col] |= TABLE_ALIGNMENT_LEFT - dashes++ - } - for data[i] == '-' { - i++ - dashes++ - } - if data[i] == ':' { - i++ - columns[col] |= TABLE_ALIGNMENT_RIGHT - dashes++ - } - for data[i] == ' ' { - i++ - } - - // end of column test is messy - switch { - case dashes < 3: - // not a valid column - return - - case data[i] == '|' && !isBackslashEscaped(data, i): - // marker found, now skip past trailing whitespace - col++ - i++ - for data[i] == ' ' { - i++ - } - - // trailing junk found after last column - if col >= colCount && data[i] != '\n' { - return - } - - case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: - // something else found where marker was required - return - - case data[i] == '\n': - // marker is optional for the last column - col++ - - default: - // trailing junk found after last column - return - } - } - if col != colCount { - return - } - - p.tableRow(out, header, columns, true) - size = i + 1 - return -} - -func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { - i, col := 0, 0 - var rowWork bytes.Buffer - - if data[i] == '|' && !isBackslashEscaped(data, i) { - i++ - } - - for col = 0; col < len(columns) && i < len(data); col++ { - for data[i] == ' ' { - i++ - } - - cellStart := i - - for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { - i++ - } - - cellEnd := i - - // skip the end-of-cell marker, possibly taking us past end of buffer - i++ - - for cellEnd > cellStart && data[cellEnd-1] == ' ' { - cellEnd-- - } - - var cellWork bytes.Buffer - p.inline(&cellWork, data[cellStart:cellEnd]) - - if header { - p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) - } else { - p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) - } - } - - // pad it out with empty columns to get the right number - for ; col < len(columns); col++ { - if header { - p.r.TableHeaderCell(&rowWork, nil, columns[col]) - } else { - p.r.TableCell(&rowWork, nil, columns[col]) - } - } - - // silently ignore rows with too many cells - - p.r.TableRow(out, rowWork.Bytes()) -} - -// returns blockquote prefix length -func (p *parser) quotePrefix(data []byte) int { - i := 0 - for i < 3 && data[i] == ' ' { - i++ - } - if data[i] == '>' { - if data[i+1] == ' ' { - return i + 2 - } - return i + 1 - } - return 0 -} - -// blockquote ends with at least one blank line -// followed by something without a blockquote prefix -func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { - if p.isEmpty(data[beg:]) <= 0 { - return false - } - if end >= len(data) { - return true - } - return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 -} - -// parse a blockquote fragment -func (p *parser) quote(out *bytes.Buffer, data []byte) int { - var raw bytes.Buffer - beg, end := 0, 0 - for beg < len(data) { - end = beg - // Step over whole lines, collecting them. While doing that, check for - // fenced code and if one's found, incorporate it altogether, - // irregardless of any contents inside it - for data[end] != '\n' { - if p.flags&EXTENSION_FENCED_CODE != 0 { - if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { - // -1 to compensate for the extra end++ after the loop: - end += i - 1 - break - } - } - end++ - } - end++ - - if pre := p.quotePrefix(data[beg:]); pre > 0 { - // skip the prefix - beg += pre - } else if p.terminateBlockquote(data, beg, end) { - break - } - - // this line is part of the blockquote - raw.Write(data[beg:end]) - beg = end - } - - var cooked bytes.Buffer - p.block(&cooked, raw.Bytes()) - p.r.BlockQuote(out, cooked.Bytes()) - return end -} - -// returns prefix length for block code -func (p *parser) codePrefix(data []byte) int { - if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { - return 4 - } - return 0 -} - -func (p *parser) code(out *bytes.Buffer, data []byte) int { - var work bytes.Buffer - - i := 0 - for i < len(data) { - beg := i - for data[i] != '\n' { - i++ - } - i++ - - blankline := p.isEmpty(data[beg:i]) > 0 - if pre := p.codePrefix(data[beg:i]); pre > 0 { - beg += pre - } else if !blankline { - // non-empty, non-prefixed line breaks the pre - i = beg - break - } - - // verbatim copy to the working buffeu - if blankline { - work.WriteByte('\n') - } else { - work.Write(data[beg:i]) - } - } - - // trim all the \n off the end of work - workbytes := work.Bytes() - eol := len(workbytes) - for eol > 0 && workbytes[eol-1] == '\n' { - eol-- - } - if eol != len(workbytes) { - work.Truncate(eol) - } - - work.WriteByte('\n') - - p.r.BlockCode(out, work.Bytes(), "") - - return i -} - -// returns unordered list item prefix -func (p *parser) uliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // need a *, +, or - followed by a space - if (data[i] != '*' && data[i] != '+' && data[i] != '-') || - data[i+1] != ' ' { - return 0 - } - return i + 2 -} - -// returns ordered list item prefix -func (p *parser) oliPrefix(data []byte) int { - i := 0 - - // start with up to 3 spaces - for i < 3 && data[i] == ' ' { - i++ - } - - // count the digits - start := i - for data[i] >= '0' && data[i] <= '9' { - i++ - } - - // we need >= 1 digits followed by a dot and a space - if start == i || data[i] != '.' || data[i+1] != ' ' { - return 0 - } - return i + 2 -} - -// returns definition list item prefix -func (p *parser) dliPrefix(data []byte) int { - i := 0 - - // need a : followed by a spaces - if data[i] != ':' || data[i+1] != ' ' { - return 0 - } - for data[i] == ' ' { - i++ - } - return i + 2 -} - -// parse ordered or unordered list block -func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { - i := 0 - flags |= LIST_ITEM_BEGINNING_OF_LIST - work := func() bool { - for i < len(data) { - skip := p.listItem(out, data[i:], &flags) - i += skip - - if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { - break - } - flags &= ^LIST_ITEM_BEGINNING_OF_LIST - } - return true - } - - p.r.List(out, work, flags) - return i -} - -// Parse a single list item. -// Assumes initial prefix is already removed if this is a sublist. -func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { - // keep track of the indentation of the first line - itemIndent := 0 - for itemIndent < 3 && data[itemIndent] == ' ' { - itemIndent++ - } - - i := p.uliPrefix(data) - if i == 0 { - i = p.oliPrefix(data) - } - if i == 0 { - i = p.dliPrefix(data) - // reset definition term flag - if i > 0 { - *flags &= ^LIST_TYPE_TERM - } - } - if i == 0 { - // if in defnition list, set term flag and continue - if *flags&LIST_TYPE_DEFINITION != 0 { - *flags |= LIST_TYPE_TERM - } else { - return 0 - } - } - - // skip leading whitespace on first line - for data[i] == ' ' { - i++ - } - - // find the end of the line - line := i - for i > 0 && data[i-1] != '\n' { - i++ - } - - // get working buffer - var raw bytes.Buffer - - // put the first line into the working buffer - raw.Write(data[line:i]) - line = i - - // process the following lines - containsBlankLine := false - sublist := 0 - codeBlockMarker := "" - -gatherlines: - for line < len(data) { - i++ - - // find the end of this line - for data[i-1] != '\n' { - i++ - } - - // if it is an empty line, guess that it is part of this item - // and move on to the next line - if p.isEmpty(data[line:i]) > 0 { - containsBlankLine = true - raw.Write(data[line:i]) - line = i - continue - } - - // calculate the indentation - indent := 0 - for indent < 4 && line+indent < i && data[line+indent] == ' ' { - indent++ - } - - chunk := data[line+indent : i] - - if p.flags&EXTENSION_FENCED_CODE != 0 { - // determine if in or out of codeblock - // if in codeblock, ignore normal list processing - _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) - if marker != "" { - if codeBlockMarker == "" { - // start of codeblock - codeBlockMarker = marker - } else { - // end of codeblock. - *flags |= LIST_ITEM_CONTAINS_BLOCK - codeBlockMarker = "" - } - } - // we are in a codeblock, write line, and continue - if codeBlockMarker != "" || marker != "" { - raw.Write(data[line+indent : i]) - line = i - continue gatherlines - } - } - - // evaluate how this line fits in - switch { - // is this a nested list item? - case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || - p.oliPrefix(chunk) > 0 || - p.dliPrefix(chunk) > 0: - - if containsBlankLine { - // end the list if the type changed after a blank line - if indent <= itemIndent && - ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || - (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { - - *flags |= LIST_ITEM_END_OF_LIST - break gatherlines - } - *flags |= LIST_ITEM_CONTAINS_BLOCK - } - - // to be a nested list, it must be indented more - // if not, it is the next item in the same list - if indent <= itemIndent { - break gatherlines - } - - // is this the first item in the nested list? - if sublist == 0 { - sublist = raw.Len() - } - - // is this a nested prefix header? - case p.isPrefixHeader(chunk): - // if the header is not indented, it is not nested in the list - // and thus ends the list - if containsBlankLine && indent < 4 { - *flags |= LIST_ITEM_END_OF_LIST - break gatherlines - } - *flags |= LIST_ITEM_CONTAINS_BLOCK - - // anything following an empty line is only part - // of this item if it is indented 4 spaces - // (regardless of the indentation of the beginning of the item) - case containsBlankLine && indent < 4: - if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { - // is the next item still a part of this list? - next := i - for data[next] != '\n' { - next++ - } - for next < len(data)-1 && data[next] == '\n' { - next++ - } - if i < len(data)-1 && data[i] != ':' && data[next] != ':' { - *flags |= LIST_ITEM_END_OF_LIST - } - } else { - *flags |= LIST_ITEM_END_OF_LIST - } - break gatherlines - - // a blank line means this should be parsed as a block - case containsBlankLine: - *flags |= LIST_ITEM_CONTAINS_BLOCK - } - - containsBlankLine = false - - // add the line into the working buffer without prefix - raw.Write(data[line+indent : i]) - - line = i - } - - // If reached end of data, the Renderer.ListItem call we're going to make below - // is definitely the last in the list. - if line >= len(data) { - *flags |= LIST_ITEM_END_OF_LIST - } - - rawBytes := raw.Bytes() - - // render the contents of the list item - var cooked bytes.Buffer - if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { - // intermediate render of block item, except for definition term - if sublist > 0 { - p.block(&cooked, rawBytes[:sublist]) - p.block(&cooked, rawBytes[sublist:]) - } else { - p.block(&cooked, rawBytes) - } - } else { - // intermediate render of inline item - if sublist > 0 { - p.inline(&cooked, rawBytes[:sublist]) - p.block(&cooked, rawBytes[sublist:]) - } else { - p.inline(&cooked, rawBytes) - } - } - - // render the actual list item - cookedBytes := cooked.Bytes() - parsedEnd := len(cookedBytes) - - // strip trailing newlines - for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { - parsedEnd-- - } - p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) - - return line -} - -// render a single paragraph that has already been parsed out -func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { - if len(data) == 0 { - return - } - - // trim leading spaces - beg := 0 - for data[beg] == ' ' { - beg++ - } - - // trim trailing newline - end := len(data) - 1 - - // trim trailing spaces - for end > beg && data[end-1] == ' ' { - end-- - } - - work := func() bool { - p.inline(out, data[beg:end]) - return true - } - p.r.Paragraph(out, work) -} - -func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { - // prev: index of 1st char of previous line - // line: index of 1st char of current line - // i: index of cursor/end of current line - var prev, line, i int - - // keep going until we find something to mark the end of the paragraph - for i < len(data) { - // mark the beginning of the current line - prev = line - current := data[i:] - line = i - - // did we find a blank line marking the end of the paragraph? - if n := p.isEmpty(current); n > 0 { - // did this blank line followed by a definition list item? - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if i < len(data)-1 && data[i+1] == ':' { - return p.list(out, data[prev:], LIST_TYPE_DEFINITION) - } - } - - p.renderParagraph(out, data[:i]) - return i + n - } - - // an underline under some text marks a header, so our paragraph ended on prev line - if i > 0 { - if level := p.isUnderlinedHeader(current); level > 0 { - // render the paragraph - p.renderParagraph(out, data[:prev]) - - // ignore leading and trailing whitespace - eol := i - 1 - for prev < eol && data[prev] == ' ' { - prev++ - } - for eol > prev && data[eol-1] == ' ' { - eol-- - } - - // render the header - // this ugly double closure avoids forcing variables onto the heap - work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { - return func() bool { - pp.inline(o, d) - return true - } - }(out, p, data[prev:eol]) - - id := "" - if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { - id = SanitizedAnchorName(string(data[prev:eol])) - } - - p.r.Header(out, work, level, id) - - // find the end of the underline - for data[i] != '\n' { - i++ - } - return i - } - } - - // if the next line starts a block of HTML, then the paragraph ends here - if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { - if data[i] == '<' && p.html(out, current, false) > 0 { - // rewind to before the HTML block - p.renderParagraph(out, data[:i]) - return i - } - } - - // if there's a prefixed header or a horizontal rule after this, paragraph is over - if p.isPrefixHeader(current) || p.isHRule(current) { - p.renderParagraph(out, data[:i]) - return i - } - - // if there's a fenced code block, paragraph is over - if p.flags&EXTENSION_FENCED_CODE != 0 { - if p.fencedCodeBlock(out, current, false) > 0 { - p.renderParagraph(out, data[:i]) - return i - } - } - - // if there's a definition list item, prev line is a definition term - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { - if p.dliPrefix(current) != 0 { - return p.list(out, data[prev:], LIST_TYPE_DEFINITION) - } - } - - // if there's a list after this, paragraph is over - if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { - if p.uliPrefix(current) != 0 || - p.oliPrefix(current) != 0 || - p.quotePrefix(current) != 0 || - p.codePrefix(current) != 0 { - p.renderParagraph(out, data[:i]) - return i - } - } - - // otherwise, scan to the beginning of the next line - for data[i] != '\n' { - i++ - } - i++ - } - - p.renderParagraph(out, data[:i]) - return i -} - -// SanitizedAnchorName returns a sanitized anchor name for the given text. -// -// It implements the algorithm specified in the package comment. -func SanitizedAnchorName(text string) string { - var anchorName []rune - futureDash := false - for _, r := range text { - switch { - case unicode.IsLetter(r) || unicode.IsNumber(r): - if futureDash && len(anchorName) > 0 { - anchorName = append(anchorName, '-') - } - futureDash = false - anchorName = append(anchorName, unicode.ToLower(r)) - default: - futureDash = true - } - } - return string(anchorName) -} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go deleted file mode 100644 index 9656c42a1..000000000 --- a/vendor/github.com/russross/blackfriday/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package blackfriday is a Markdown processor. -// -// It translates plain text with simple formatting rules into HTML or LaTeX. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that preceed the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod deleted file mode 100644 index b05561a06..000000000 --- a/vendor/github.com/russross/blackfriday/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/russross/blackfriday diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go deleted file mode 100644 index e0a6c69c9..000000000 --- a/vendor/github.com/russross/blackfriday/html.go +++ /dev/null @@ -1,938 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" -) - -// Html renderer configuration options. -const ( - HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks - HTML_SKIP_STYLE // skip embedded