use traditional controller tool to generate code

This commit is contained in:
runzexia
2019-08-07 21:05:12 +08:00
parent bd5f916557
commit e5d59b75a8
86 changed files with 9764 additions and 116 deletions

View File

@@ -69,14 +69,11 @@ vet: generate-apis
manifests: manifests:
go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go all go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go all
crds: generate-apis
$(CONTROLLER_GEN) crd:trivialVersions=true paths="./pkg/apis/devops/..." output:crd:artifacts:config=config/crds
deploy: manifests deploy: manifests
kubectl apply -f config/crds kubectl apply -f config/crds
kustomize build config/default | kubectl apply -f - kustomize build config/default | kubectl apply -f -
generate: crds generate:
go generate ./pkg/... ./cmd/... go generate ./pkg/... ./cmd/...
# Generate code # Generate code
generate-apis: controller-gen generate-apis: controller-gen
@@ -89,7 +86,7 @@ docker-build: all
# Run tests # Run tests
test: test:
export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=1m; go test ./pkg/... ./cmd/... -coverprofile cover.out -p 1 export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=1m; go test ./pkg/... ./cmd/... -coverprofile cover.out
.PHONY: clean .PHONY: clean
clean: clean:

View File

@@ -1,9 +1,9 @@
---
apiVersion: apiextensions.k8s.io/v1beta1 apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
creationTimestamp: null creationTimestamp: null
labels:
controller-tools.k8s.io: "1.0"
name: s2ibinaries.devops.kubesphere.io name: s2ibinaries.devops.kubesphere.io
spec: spec:
additionalPrinterColumns: additionalPrinterColumns:
@@ -23,11 +23,9 @@ spec:
names: names:
kind: S2iBinary kind: S2iBinary
plural: s2ibinaries plural: s2ibinaries
scope: "" scope: Namespaced
subresources: {}
validation: validation:
openAPIV3Schema: openAPIV3Schema:
description: S2iBinary is the Schema for the s2ibinaries API
properties: properties:
apiVersion: apiVersion:
description: 'APIVersion defines the versioned schema of this representation description: 'APIVersion defines the versioned schema of this representation
@@ -42,7 +40,6 @@ spec:
metadata: metadata:
type: object type: object
spec: spec:
description: S2iBinarySpec defines the desired state of S2iBinary
properties: properties:
downloadURL: downloadURL:
description: DownloadURL in KubeSphere description: DownloadURL in KubeSphere
@@ -62,17 +59,12 @@ spec:
type: string type: string
type: object type: object
status: status:
description: S2iBinaryStatus defines the observed state of S2iBinary
properties: properties:
phase: phase:
description: Phase is status of S2iBinary . Possible value is "Ready","UnableToDownload" description: Phase is status of S2iBinary . Possible value is "Ready","UnableToDownload"
type: string type: string
type: object type: object
type: object version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status: status:
acceptedNames: acceptedNames:
kind: "" kind: ""

View File

@@ -83,6 +83,16 @@ spec:
http: http:
description: HTTP connection pool settings. description: HTTP connection pool settings.
properties: properties:
http1MaxPendingRequests:
description: Maximum number of pending HTTP
requests to a destination. Default 1024.
format: int32
type: integer
http2MaxRequests:
description: Maximum number of requests to
a backend. Default 1024.
format: int32
type: integer
maxRequestsPerConnection: maxRequestsPerConnection:
description: Maximum number of requests per description: Maximum number of requests per
connection to a backend. Setting this parameter connection to a backend. Setting this parameter
@@ -211,6 +221,17 @@ spec:
http: http:
description: HTTP connection pool settings. description: HTTP connection pool settings.
properties: properties:
http1MaxPendingRequests:
description: Maximum number of pending
HTTP requests to a destination. Default
1024.
format: int32
type: integer
http2MaxRequests:
description: Maximum number of requests
to a backend. Default 1024.
format: int32
type: integer
maxRequestsPerConnection: maxRequestsPerConnection:
description: Maximum number of requests description: Maximum number of requests
per connection to a backend. Setting per connection to a backend. Setting
@@ -464,6 +485,16 @@ spec:
http: http:
description: HTTP connection pool settings. description: HTTP connection pool settings.
properties: properties:
http1MaxPendingRequests:
description: Maximum number of pending HTTP requests
to a destination. Default 1024.
format: int32
type: integer
http2MaxRequests:
description: Maximum number of requests to a backend.
Default 1024.
format: int32
type: integer
maxRequestsPerConnection: maxRequestsPerConnection:
description: Maximum number of requests per connection description: Maximum number of requests per connection
to a backend. Setting this parameter to 1 disables to a backend. Setting this parameter to 1 disables
@@ -586,6 +617,16 @@ spec:
http: http:
description: HTTP connection pool settings. description: HTTP connection pool settings.
properties: properties:
http1MaxPendingRequests:
description: Maximum number of pending HTTP
requests to a destination. Default 1024.
format: int32
type: integer
http2MaxRequests:
description: Maximum number of requests to
a backend. Default 1024.
format: int32
type: integer
maxRequestsPerConnection: maxRequestsPerConnection:
description: Maximum number of requests per description: Maximum number of requests per
connection to a backend. Setting this parameter connection to a backend. Setting this parameter

View File

@@ -4,100 +4,140 @@ metadata:
creationTimestamp: null creationTimestamp: null
name: manager-role name: manager-role
rules: rules:
- apiGroups: - apiGroups:
- "" - core.kubesphere.io
resources: resources:
- namespaces - namespaces
verbs: verbs:
- get - get
- list - list
- watch - watch
- create - create
- update - update
- patch - patch
- delete - delete
- apiGroups: - apiGroups:
- "" - core.kubesphere.io
resources: resources:
- namespaces/status - namespaces/status
verbs: verbs:
- get - get
- update - update
- patch - patch
- apiGroups: - apiGroups:
- "" - core.kubesphere.io
resources: resources:
- namespaces - namespaces
verbs: verbs:
- get - get
- list - list
- watch - watch
- create - create
- update - update
- patch - patch
- delete - delete
- apiGroups: - apiGroups:
- "" - core.kubesphere.io
resources: resources:
- namespaces/status - namespaces/status
verbs: verbs:
- get - get
- update - update
- patch - patch
- apiGroups: - apiGroups:
- tenant.kubesphere.io - apps
resources: resources:
- workspaces - deployments
verbs: verbs:
- get - get
- list - list
- watch - watch
- create - create
- update - update
- patch - patch
- delete - delete
- apiGroups: - apiGroups:
- tenant.kubesphere.io - apps
resources: resources:
- workspaces/status - deployments/status
verbs: verbs:
- get - get
- update - update
- patch - patch
- apiGroups: - apiGroups:
- admissionregistration.k8s.io - devops.kubesphere.io
resources: resources:
- mutatingwebhookconfigurations - s2ibinaries
- validatingwebhookconfigurations verbs:
verbs: - get
- get - list
- list - watch
- watch - create
- create - update
- update - patch
- patch - delete
- delete - apiGroups:
- apiGroups: - devops.kubesphere.io
- "" resources:
resources: - s2ibinaries/status
- secrets verbs:
verbs: - get
- get - update
- list - patch
- watch - apiGroups:
- create - tenant.kubesphere.io
- update resources:
- patch - workspaces
- delete verbs:
- apiGroups: - get
- "" - list
resources: - watch
- services - create
verbs: - update
- get - patch
- list - delete
- watch - apiGroups:
- create - tenant.kubesphere.io
- update resources:
- patch - workspaces/status
- delete verbs:
- get
- update
- patch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- create
- update
- patch
- delete

2
go.mod
View File

@@ -41,6 +41,7 @@ require (
github.com/go-openapi/spec v0.19.2 github.com/go-openapi/spec v0.19.2
github.com/go-redis/redis v6.15.2+incompatible github.com/go-redis/redis v6.15.2+incompatible
github.com/go-sql-driver/mysql v1.4.1 github.com/go-sql-driver/mysql v1.4.1
github.com/gobuffalo/flect v0.1.5 // indirect
github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6
github.com/golang/example v0.0.0-20170904185048-46695d81d1fa github.com/golang/example v0.0.0-20170904185048-46695d81d1fa
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
@@ -129,6 +130,7 @@ require (
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 // indirect k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 // indirect
sigs.k8s.io/application v0.0.0-20190404151855-67ae7f915d4e sigs.k8s.io/application v0.0.0-20190404151855-67ae7f915d4e
sigs.k8s.io/controller-runtime v0.1.10 sigs.k8s.io/controller-runtime v0.1.10
sigs.k8s.io/controller-tools v0.1.12
sigs.k8s.io/testing_frameworks v0.1.1 // indirect sigs.k8s.io/testing_frameworks v0.1.1 // indirect
sigs.k8s.io/yaml v1.1.0 // indirect sigs.k8s.io/yaml v1.1.0 // indirect
) )

5
go.sum
View File

@@ -121,6 +121,8 @@ github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo=
github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 h1:kumyNm8Vr8cbVm/aLQYTbDE3SKCbbn5HEVoDp/Dyyfc= github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 h1:kumyNm8Vr8cbVm/aLQYTbDE3SKCbbn5HEVoDp/Dyyfc=
github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6/go.mod h1:K/9g3pPouf13kP5K7pdriQEJAy272R9yXuWuDIEWJTM= github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6/go.mod h1:K/9g3pPouf13kP5K7pdriQEJAy272R9yXuWuDIEWJTM=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
@@ -301,6 +303,7 @@ github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009 h1:3wBL/e/qjpSYaXac
github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009/go.mod h1:dVvZuWJd174umvm5g8CmZD6S2GWwHKtpK/0ZPHswuNo= github.com/sony/sonyflake v0.0.0-20181109022403-6d5bd6181009/go.mod h1:dVvZuWJd174umvm5g8CmZD6S2GWwHKtpK/0ZPHswuNo=
github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw= github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw=
github.com/speps/go-hashids v2.0.0+incompatible/go.mod h1:P7hqPzMdnZOfyIk+xrlG1QaSMw+gCBdHKsBDnhpaZvc= github.com/speps/go-hashids v2.0.0+incompatible/go.mod h1:P7hqPzMdnZOfyIk+xrlG1QaSMw+gCBdHKsBDnhpaZvc=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
@@ -462,6 +465,8 @@ sigs.k8s.io/application v0.0.0-20190404151855-67ae7f915d4e h1:/TWUhUxC+Q5uMFUizx
sigs.k8s.io/application v0.0.0-20190404151855-67ae7f915d4e/go.mod h1:9C86g0wiFn8jtZjgJepSx188uJeWLGWTbcCycu5p8mU= sigs.k8s.io/application v0.0.0-20190404151855-67ae7f915d4e/go.mod h1:9C86g0wiFn8jtZjgJepSx188uJeWLGWTbcCycu5p8mU=
sigs.k8s.io/controller-runtime v0.1.10 h1:amLOmcekVdnsD1uIpmgRqfTbQWJ2qxvQkcdeFhcotn4= sigs.k8s.io/controller-runtime v0.1.10 h1:amLOmcekVdnsD1uIpmgRqfTbQWJ2qxvQkcdeFhcotn4=
sigs.k8s.io/controller-runtime v0.1.10/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= sigs.k8s.io/controller-runtime v0.1.10/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8=
sigs.k8s.io/controller-tools v0.1.12 h1:LW8Tfywz+epjYiySSOYWFQl1O1y0os+ZWf22XJmsFww=
sigs.k8s.io/controller-tools v0.1.12/go.mod h1:6g08p9m9G/So3sBc1AOQifHfhxH/mb6Sc4z0LMI8XMw=
sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs=
sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=

View File

@@ -34,7 +34,7 @@ type S2iBinarySpec struct {
//DownloadURL in KubeSphere //DownloadURL in KubeSphere
DownloadURL string `json:"downloadURL,omitempty"` DownloadURL string `json:"downloadURL,omitempty"`
// UploadTime is last upload time // UploadTime is last upload time
UploadTimeStamp metav1.Time `json:"uploadTimeStamp,omitempty"` UploadTimeStamp *metav1.Time `json:"uploadTimeStamp,omitempty"`
} }
// S2iBinaryStatus defines the observed state of S2iBinary // S2iBinaryStatus defines the observed state of S2iBinary

View File

@@ -8,4 +8,5 @@ import (
_ "k8s.io/code-generator/cmd/informer-gen" _ "k8s.io/code-generator/cmd/informer-gen"
_ "k8s.io/code-generator/cmd/lister-gen" _ "k8s.io/code-generator/cmd/lister-gen"
_ "k8s.io/code-generator/cmd/deepcopy-gen" _ "k8s.io/code-generator/cmd/deepcopy-gen"
_ "sigs.k8s.io/controller-tools/cmd/controller-gen"
) )

29
vendor/github.com/gobuffalo/flect/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,29 @@
*.log
.DS_Store
doc
tmp
pkg
*.gem
*.pid
coverage
coverage.data
build/*
*.pbxuser
*.mode1v3
.svn
profile
.console_history
.sass-cache/*
.rake_tasks~
*.log.lck
solr/
.jhw-cache/
jhw.*
*.sublime*
node_modules/
dist/
generated/
.vendor/
bin/*
gin-bin
.idea/

3
vendor/github.com/gobuffalo/flect/.gometalinter.json generated vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"Enable": ["vet", "golint", "goimports", "deadcode", "gotype", "ineffassign", "misspell", "nakedret", "unconvert", "megacheck", "varcheck"]
}

36
vendor/github.com/gobuffalo/flect/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,36 @@
language: go
sudo: false
matrix:
include:
- os: linux
go: "1.9.x"
- os: windows
go: "1.9.x"
- os: linux
go: "1.10.x"
- os: windows
go: "1.10.x"
- os: linux
go: "1.11.x"
env:
- GO111MODULE=off
- os: windows
go: "1.11.x"
env:
- GO111MODULE=off
- os: linux
go: "1.11.x"
env:
- GO111MODULE=on
- os: windows
go: "1.11.x"
env:
- GO111MODULE=on
install: false
script:
- go get -v -t ./...
- go test -v -timeout=5s -race ./...

21
vendor/github.com/gobuffalo/flect/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2019 Mark Bates
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

61
vendor/github.com/gobuffalo/flect/Makefile generated vendored Normal file
View File

@@ -0,0 +1,61 @@
TAGS ?= ""
GO_BIN ?= "go"
install:
$(GO_BIN) install -tags ${TAGS} -v .
make tidy
tidy:
ifeq ($(GO111MODULE),on)
$(GO_BIN) mod tidy
else
echo skipping go mod tidy
endif
deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
make tidy
build:
$(GO_BIN) build -v .
make tidy
test:
$(GO_BIN) test -cover -tags ${TAGS} ./...
make tidy
ci-deps:
$(GO_BIN) get -tags ${TAGS} -t ./...
ci-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
lint:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
golangci-lint run --enable-all
make tidy
update:
ifeq ($(GO111MODULE),on)
rm go.*
$(GO_BIN) mod init
$(GO_BIN) mod tidy
else
$(GO_BIN) get -u -tags ${TAGS}
endif
make test
make install
make tidy
release-test:
$(GO_BIN) test -tags ${TAGS} -race ./...
make tidy
release:
$(GO_BIN) get github.com/gobuffalo/release
make tidy
release -y -f version.go --skip-packr
make tidy

36
vendor/github.com/gobuffalo/flect/README.md generated vendored Normal file
View File

@@ -0,0 +1,36 @@
# Flect
<p align="center">
<a href="https://godoc.org/github.com/gobuffalo/flect"><img src="https://godoc.org/github.com/gobuffalo/flect?status.svg" alt="GoDoc" /></a>
<a href="https://dev.azure.com/markbates/buffalo/_build/latest?definitionId=51&branchName=master"><img src="https://dev.azure.com/markbates/buffalo/_apis/build/status/gobuffalo.flect?branchName=master" alt="CI" /></a>
<a href="https://goreportcard.com/report/github.com/gobuffalo/flect"><img src="https://goreportcard.com/badge/github.com/gobuffalo/flect" alt="Go Report Card" /></a>
</p>
This is a new inflection engine to replace [https://github.com/markbates/inflect](https://github.com/markbates/inflect) designed to be more modular, more readable, and easier to fix issues on than the original.
## Installation
```bash
$ go get -u -v github.com/gobuffalo/flect
```
## `github.com/gobuffalo/flect`
<a href="https://godoc.org/github.com/gobuffalo/flect"><img src="https://godoc.org/github.com/gobuffalo/flect?status.svg" alt="GoDoc" /></a>
The `github.com/gobuffalo/flect` package contains "basic" inflection tools, like pluralization, singularization, etc...
### The `Ident` Type
In addition to helpful methods that take in a `string` and return a `string`, there is an `Ident` type that can be used to create new, custom, inflection rules.
The `Ident` type contains two fields.
* `Original` - This is the original `string` that was used to create the `Ident`
* `Parts` - This is a `[]string` that represents all of the "parts" of the string, that have been split apart, making the segments easier to work with
Examples of creating new inflection rules using `Ident` can be found in the `github.com/gobuffalo/flect/name` package.
## `github.com/gobuffalo/flect/name`
<a href="https://godoc.org/github.com/gobuffalo/flect/name"><img src="https://godoc.org/github.com/gobuffalo/flect/name?status.svg" alt="GoDoc" /></a>
The `github.com/gobuffalo/flect/name` package contains more "business" inflection rules like creating proper names, table names, etc...

10
vendor/github.com/gobuffalo/flect/SHOULDERS.md generated vendored Normal file
View File

@@ -0,0 +1,10 @@
# github.com/gobuffalo/flect Stands on the Shoulders of Giants
github.com/gobuffalo/flect does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
Thank you to the following **GIANTS**:
* [github.com/davecgh/go-spew](https://godoc.org/github.com/davecgh/go-spew)
* [github.com/stretchr/testify](https://godoc.org/github.com/stretchr/testify)

152
vendor/github.com/gobuffalo/flect/acronyms.go generated vendored Normal file
View File

@@ -0,0 +1,152 @@
package flect
import "sync"
var acronymsMoot = &sync.RWMutex{}
var baseAcronyms = map[string]bool{
"OK": true,
"UTF8": true,
"HTML": true,
"JSON": true,
"JWT": true,
"ID": true,
"UUID": true,
"SQL": true,
"ACK": true,
"ACL": true,
"ADSL": true,
"AES": true,
"ANSI": true,
"API": true,
"ARP": true,
"ATM": true,
"BGP": true,
"BSS": true,
"CCITT": true,
"CHAP": true,
"CIDR": true,
"CIR": true,
"CLI": true,
"CPE": true,
"CPU": true,
"CRC": true,
"CRT": true,
"CSMA": true,
"CMOS": true,
"DCE": true,
"DEC": true,
"DES": true,
"DHCP": true,
"DNS": true,
"DRAM": true,
"DSL": true,
"DSLAM": true,
"DTE": true,
"DMI": true,
"EHA": true,
"EIA": true,
"EIGRP": true,
"EOF": true,
"ESS": true,
"FCC": true,
"FCS": true,
"FDDI": true,
"FTP": true,
"GBIC": true,
"gbps": true,
"GEPOF": true,
"HDLC": true,
"HTTP": true,
"HTTPS": true,
"IANA": true,
"ICMP": true,
"IDF": true,
"IDS": true,
"IEEE": true,
"IETF": true,
"IMAP": true,
"IP": true,
"IPS": true,
"ISDN": true,
"ISP": true,
"kbps": true,
"LACP": true,
"LAN": true,
"LAPB": true,
"LAPF": true,
"LLC": true,
"MAC": true,
"Mbps": true,
"MC": true,
"MDF": true,
"MIB": true,
"MoCA": true,
"MPLS": true,
"MTU": true,
"NAC": true,
"NAT": true,
"NBMA": true,
"NIC": true,
"NRZ": true,
"NRZI": true,
"NVRAM": true,
"OSI": true,
"OSPF": true,
"OUI": true,
"PAP": true,
"PAT": true,
"PC": true,
"PIM": true,
"PCM": true,
"PDU": true,
"POP3": true,
"POTS": true,
"PPP": true,
"PPTP": true,
"PTT": true,
"PVST": true,
"RAM": true,
"RARP": true,
"RFC": true,
"RIP": true,
"RLL": true,
"ROM": true,
"RSTP": true,
"RTP": true,
"RCP": true,
"SDLC": true,
"SFD": true,
"SFP": true,
"SLARP": true,
"SLIP": true,
"SMTP": true,
"SNA": true,
"SNAP": true,
"SNMP": true,
"SOF": true,
"SRAM": true,
"SSH": true,
"SSID": true,
"STP": true,
"SYN": true,
"TDM": true,
"TFTP": true,
"TIA": true,
"TOFU": true,
"UDP": true,
"URL": true,
"URI": true,
"USB": true,
"UTP": true,
"VC": true,
"VLAN": true,
"VLSM": true,
"VPN": true,
"W3C": true,
"WAN": true,
"WEP": true,
"WiFi": true,
"WPA": true,
"WWW": true,
}

71
vendor/github.com/gobuffalo/flect/azure-pipelines.yml generated vendored Normal file
View File

@@ -0,0 +1,71 @@
variables:
GOBIN: "$(GOPATH)/bin" # Go binaries path
GOPATH: "$(system.defaultWorkingDirectory)/gopath" # Go workspace path
modulePath: "$(GOPATH)/src/github.com/$(build.repository.name)" # Path to the module"s code
jobs:
- job: Windows
pool:
vmImage: "vs2017-win2016"
strategy:
matrix:
go 1.10:
go_version: "1.10"
go 1.11 (on):
go_version: "1.11.5"
GO111MODULE: "on"
go 1.11 (off):
go_version: "1.11.5"
GO111MODULE: "off"
go 1.12 (on):
go_version: "1.12"
GO111MODULE: "on"
go 1.12 (off):
go_version: "1.12"
GO111MODULE: "off"
steps:
- template: azure-tests.yml
- job: macOS
pool:
vmImage: "macOS-10.13"
strategy:
matrix:
go 1.10:
go_version: "1.10"
go 1.11 (on):
go_version: "1.11.5"
GO111MODULE: "on"
go 1.11 (off):
go_version: "1.11.5"
GO111MODULE: "off"
go 1.12 (on):
go_version: "1.12"
GO111MODULE: "on"
go 1.12 (off):
go_version: "1.12"
GO111MODULE: "off"
steps:
- template: azure-tests.yml
- job: Linux
pool:
vmImage: "ubuntu-16.04"
strategy:
matrix:
go 1.10:
go_version: "1.10"
go 1.11 (on):
go_version: "1.11.5"
GO111MODULE: "on"
go 1.11 (off):
go_version: "1.11.5"
GO111MODULE: "off"
go 1.12 (on):
go_version: "1.12"
GO111MODULE: "on"
go 1.12 (off):
go_version: "1.12"
GO111MODULE: "off"
steps:
- template: azure-tests.yml

19
vendor/github.com/gobuffalo/flect/azure-tests.yml generated vendored Normal file
View File

@@ -0,0 +1,19 @@
steps:
- task: GoTool@0
inputs:
version: $(go_version)
- task: Bash@3
inputs:
targetType: inline
script: |
mkdir -p "$(GOBIN)"
mkdir -p "$(GOPATH)/pkg"
mkdir -p "$(modulePath)"
shopt -s extglob
mv !(gopath) "$(modulePath)"
displayName: "Setup Go Workspace"
- script: |
go get -t -v ./...
go test -race ./...
workingDirectory: "$(modulePath)"
displayName: "Tests"

48
vendor/github.com/gobuffalo/flect/camelize.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
package flect
import (
"strings"
"unicode"
)
// Camelize returns a camelize version of a string
// bob dylan = bobDylan
// widget_id = widgetID
// WidgetID = widgetID
func Camelize(s string) string {
return New(s).Camelize().String()
}
// Camelize returns a camelize version of a string
// bob dylan = bobDylan
// widget_id = widgetID
// WidgetID = widgetID
func (i Ident) Camelize() Ident {
var out []string
for i, part := range i.Parts {
var x string
var capped bool
if strings.ToLower(part) == "id" {
out = append(out, "ID")
continue
}
for _, c := range part {
if unicode.IsLetter(c) || unicode.IsDigit(c) {
if i == 0 {
x += string(unicode.ToLower(c))
continue
}
if !capped {
capped = true
x += string(unicode.ToUpper(c))
continue
}
x += string(c)
}
}
if x != "" {
out = append(out, x)
}
}
return New(strings.Join(out, ""))
}

27
vendor/github.com/gobuffalo/flect/capitalize.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package flect
import "unicode"
// Capitalize will cap the first letter of string
// user = User
// bob dylan = Bob dylan
// widget_id = Widget_id
func Capitalize(s string) string {
return New(s).Capitalize().String()
}
// Capitalize will cap the first letter of string
// user = User
// bob dylan = Bob dylan
// widget_id = Widget_id
func (i Ident) Capitalize() Ident {
var x string
if len(i.Parts) == 0 {
return New("")
}
x = string(unicode.ToTitle(rune(i.Original[0])))
if len(i.Original) > 1 {
x += i.Original[1:]
}
return New(x)
}

83
vendor/github.com/gobuffalo/flect/custom_data.go generated vendored Normal file
View File

@@ -0,0 +1,83 @@
package flect
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
)
func init() {
loadCustomData("inflections.json", "INFLECT_PATH", "could not read inflection file", LoadInflections)
loadCustomData("acronyms.json", "ACRONYMS_PATH", "could not read acronyms file", LoadAcronyms)
}
//CustomDataParser are functions that parse data like acronyms or
//plurals in the shape of a io.Reader it receives.
type CustomDataParser func(io.Reader) error
func loadCustomData(defaultFile, env, readErrorMessage string, parser CustomDataParser) {
pwd, _ := os.Getwd()
path, found := os.LookupEnv(env)
if !found {
path = filepath.Join(pwd, defaultFile)
}
if _, err := os.Stat(path); err != nil {
return
}
b, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("%s %s (%s)\n", readErrorMessage, path, err)
return
}
if err = parser(bytes.NewReader(b)); err != nil {
fmt.Println(err)
}
}
//LoadAcronyms loads rules from io.Reader param
func LoadAcronyms(r io.Reader) error {
m := []string{}
err := json.NewDecoder(r).Decode(&m)
if err != nil {
return fmt.Errorf("could not decode acronyms JSON from reader: %s", err)
}
acronymsMoot.Lock()
defer acronymsMoot.Unlock()
for _, acronym := range m {
baseAcronyms[acronym] = true
}
return nil
}
//LoadInflections loads rules from io.Reader param
func LoadInflections(r io.Reader) error {
m := map[string]string{}
err := json.NewDecoder(r).Decode(&m)
if err != nil {
return fmt.Errorf("could not decode inflection JSON from reader: %s", err)
}
pluralMoot.Lock()
defer pluralMoot.Unlock()
singularMoot.Lock()
defer singularMoot.Unlock()
for s, p := range m {
singleToPlural[s] = p
pluralToSingle[p] = s
}
return nil
}

34
vendor/github.com/gobuffalo/flect/dasherize.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
package flect
import (
"strings"
"unicode"
)
// Dasherize returns an alphanumeric, lowercased, dashed string
// Donald E. Knuth = donald-e-knuth
// Test with + sign = test-with-sign
// admin/WidgetID = admin-widget-id
func Dasherize(s string) string {
return New(s).Dasherize().String()
}
// Dasherize returns an alphanumeric, lowercased, dashed string
// Donald E. Knuth = donald-e-knuth
// Test with + sign = test-with-sign
// admin/WidgetID = admin-widget-id
func (i Ident) Dasherize() Ident {
var parts []string
for _, part := range i.Parts {
var x string
for _, c := range part {
if unicode.IsLetter(c) || unicode.IsDigit(c) {
x += string(c)
}
}
parts = xappend(parts, x)
}
return New(strings.ToLower(strings.Join(parts, "-")))
}

43
vendor/github.com/gobuffalo/flect/flect.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
/*
Package flect is a new inflection engine to replace [https://github.com/markbates/inflect](https://github.com/markbates/inflect) designed to be more modular, more readable, and easier to fix issues on than the original.
*/
package flect
import (
"strings"
"unicode"
)
var spaces = []rune{'_', ' ', ':', '-', '/'}
func isSpace(c rune) bool {
for _, r := range spaces {
if r == c {
return true
}
}
return unicode.IsSpace(c)
}
func xappend(a []string, ss ...string) []string {
for _, s := range ss {
s = strings.TrimSpace(s)
for _, x := range spaces {
s = strings.Trim(s, string(x))
}
if _, ok := baseAcronyms[strings.ToUpper(s)]; ok {
s = strings.ToUpper(s)
}
if s != "" {
a = append(a, s)
}
}
return a
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}

8
vendor/github.com/gobuffalo/flect/go.mod generated vendored Normal file
View File

@@ -0,0 +1,8 @@
module github.com/gobuffalo/flect
go 1.12
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/stretchr/testify v1.3.0
)

9
vendor/github.com/gobuffalo/flect/go.sum generated vendored Normal file
View File

@@ -0,0 +1,9 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=

31
vendor/github.com/gobuffalo/flect/humanize.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
package flect
import (
"strings"
)
// Humanize returns first letter of sentence capitalized
// employee_salary = Employee salary
// employee_id = employee ID
// employee_mobile_number = Employee mobile number
func Humanize(s string) string {
return New(s).Humanize().String()
}
// Humanize First letter of sentence capitalized
func (i Ident) Humanize() Ident {
if len(i.Original) == 0 {
return New("")
}
var parts []string
for index, part := range i.Parts {
if index == 0 {
part = strings.Title(i.Parts[0])
}
parts = xappend(parts, part)
}
return New(strings.Join(parts, " "))
}

106
vendor/github.com/gobuffalo/flect/ident.go generated vendored Normal file
View File

@@ -0,0 +1,106 @@
package flect
import (
"encoding"
"strings"
"unicode"
"unicode/utf8"
)
// Ident represents the string and it's parts
type Ident struct {
Original string
Parts []string
}
// String implements fmt.Stringer and returns the original string
func (i Ident) String() string {
return i.Original
}
// New creates a new Ident from the string
func New(s string) Ident {
i := Ident{
Original: s,
Parts: toParts(s),
}
return i
}
func toParts(s string) []string {
parts := []string{}
s = strings.TrimSpace(s)
if len(s) == 0 {
return parts
}
if _, ok := baseAcronyms[strings.ToUpper(s)]; ok {
return []string{strings.ToUpper(s)}
}
var prev rune
var x string
for _, c := range s {
cs := string(c)
// fmt.Println("### cs ->", cs)
// fmt.Println("### unicode.IsControl(c) ->", unicode.IsControl(c))
// fmt.Println("### unicode.IsDigit(c) ->", unicode.IsDigit(c))
// fmt.Println("### unicode.IsGraphic(c) ->", unicode.IsGraphic(c))
// fmt.Println("### unicode.IsLetter(c) ->", unicode.IsLetter(c))
// fmt.Println("### unicode.IsLower(c) ->", unicode.IsLower(c))
// fmt.Println("### unicode.IsMark(c) ->", unicode.IsMark(c))
// fmt.Println("### unicode.IsPrint(c) ->", unicode.IsPrint(c))
// fmt.Println("### unicode.IsPunct(c) ->", unicode.IsPunct(c))
// fmt.Println("### unicode.IsSpace(c) ->", unicode.IsSpace(c))
// fmt.Println("### unicode.IsTitle(c) ->", unicode.IsTitle(c))
// fmt.Println("### unicode.IsUpper(c) ->", unicode.IsUpper(c))
if !utf8.ValidRune(c) {
continue
}
if isSpace(c) {
parts = xappend(parts, x)
x = cs
prev = c
continue
}
if unicode.IsUpper(c) && !unicode.IsUpper(prev) {
parts = xappend(parts, x)
x = cs
prev = c
continue
}
if unicode.IsUpper(c) && baseAcronyms[strings.ToUpper(x)] {
parts = xappend(parts, x)
x = cs
prev = c
continue
}
if unicode.IsLetter(c) || unicode.IsDigit(c) || unicode.IsPunct(c) || c == '`' {
prev = c
x += cs
continue
}
parts = xappend(parts, x)
x = ""
prev = c
}
parts = xappend(parts, x)
return parts
}
var _ encoding.TextUnmarshaler = &Ident{}
var _ encoding.TextMarshaler = &Ident{}
//UnmarshalText unmarshalls byte array into the Ident
func (i *Ident) UnmarshalText(data []byte) error {
(*i) = New(string(data))
return nil
}
//MarshalText marshals Ident into byte array
func (i Ident) MarshalText() ([]byte, error) {
return []byte(i.Original), nil
}

13
vendor/github.com/gobuffalo/flect/lower_upper.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
package flect
import "strings"
// ToUpper is a convience wrapper for strings.ToUpper
func (i Ident) ToUpper() Ident {
return New(strings.ToUpper(i.Original))
}
// ToLower is a convience wrapper for strings.ToLower
func (i Ident) ToLower() Ident {
return New(strings.ToLower(i.Original))
}

43
vendor/github.com/gobuffalo/flect/ordinalize.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
package flect
import (
"fmt"
"strconv"
)
// Ordinalize converts a number to an ordinal version
// 42 = 42nd
// 45 = 45th
// 1 = 1st
func Ordinalize(s string) string {
return New(s).Ordinalize().String()
}
// Ordinalize converts a number to an ordinal version
// 42 = 42nd
// 45 = 45th
// 1 = 1st
func (i Ident) Ordinalize() Ident {
number, err := strconv.Atoi(i.Original)
if err != nil {
return i
}
var s string
switch abs(number) % 100 {
case 11, 12, 13:
s = fmt.Sprintf("%dth", number)
default:
switch abs(number) % 10 {
case 1:
s = fmt.Sprintf("%dst", number)
case 2:
s = fmt.Sprintf("%dnd", number)
case 3:
s = fmt.Sprintf("%drd", number)
}
}
if s != "" {
return New(s)
}
return New(fmt.Sprintf("%dth", number))
}

25
vendor/github.com/gobuffalo/flect/pascalize.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
package flect
import (
"unicode"
)
// Pascalize returns a string with each segment capitalized
// user = User
// bob dylan = BobDylan
// widget_id = WidgetID
func Pascalize(s string) string {
return New(s).Pascalize().String()
}
// Pascalize returns a string with each segment capitalized
// user = User
// bob dylan = BobDylan
// widget_id = WidgetID
func (i Ident) Pascalize() Ident {
c := i.Camelize()
if len(c.String()) == 0 {
return c
}
return New(string(unicode.ToUpper(rune(c.Original[0]))) + c.Original[1:])
}

240
vendor/github.com/gobuffalo/flect/plural_rules.go generated vendored Normal file
View File

@@ -0,0 +1,240 @@
package flect
var pluralRules = []rule{}
// AddPlural adds a rule that will replace the given suffix with the replacement suffix.
func AddPlural(suffix string, repl string) {
pluralMoot.Lock()
defer pluralMoot.Unlock()
pluralRules = append(pluralRules, rule{
suffix: suffix,
fn: func(s string) string {
s = s[:len(s)-len(suffix)]
return s + repl
},
})
pluralRules = append(pluralRules, rule{
suffix: repl,
fn: noop,
})
}
var singleToPlural = map[string]string{
"human": "humans",
"matrix": "matrices",
"vertix": "vertices",
"index": "indices",
"mouse": "mice",
"louse": "lice",
"ress": "resses",
"ox": "oxen",
"quiz": "quizzes",
"series": "series",
"octopus": "octopi",
"equipment": "equipment",
"information": "information",
"rice": "rice",
"money": "money",
"species": "species",
"fish": "fish",
"sheep": "sheep",
"jeans": "jeans",
"police": "police",
"dear": "dear",
"goose": "geese",
"tooth": "teeth",
"foot": "feet",
"bus": "buses",
"fez": "fezzes",
"piano": "pianos",
"halo": "halos",
"photo": "photos",
"aircraft": "aircraft",
"alumna": "alumnae",
"alumnus": "alumni",
"analysis": "analyses",
"antenna": "antennas",
"antithesis": "antitheses",
"apex": "apexes",
"appendix": "appendices",
"axis": "axes",
"bacillus": "bacilli",
"bacterium": "bacteria",
"basis": "bases",
"beau": "beaus",
"bison": "bison",
"bureau": "bureaus",
"campus": "campuses",
"château": "châteaux",
"codex": "codices",
"concerto": "concertos",
"corpus": "corpora",
"crisis": "crises",
"curriculum": "curriculums",
"deer": "deer",
"diagnosis": "diagnoses",
"die": "dice",
"dwarf": "dwarves",
"ellipsis": "ellipses",
"erratum": "errata",
"faux pas": "faux pas",
"focus": "foci",
"formula": "formulas",
"fungus": "fungi",
"genus": "genera",
"graffito": "graffiti",
"grouse": "grouse",
"half": "halves",
"hoof": "hooves",
"hypothesis": "hypotheses",
"larva": "larvae",
"libretto": "librettos",
"loaf": "loaves",
"locus": "loci",
"minutia": "minutiae",
"moose": "moose",
"nebula": "nebulae",
"nucleus": "nuclei",
"oasis": "oases",
"offspring": "offspring",
"opus": "opera",
"parenthesis": "parentheses",
"phenomenon": "phenomena",
"phylum": "phyla",
"prognosis": "prognoses",
"radius": "radiuses",
"referendum": "referendums",
"salmon": "salmon",
"shrimp": "shrimp",
"stimulus": "stimuli",
"stratum": "strata",
"swine": "swine",
"syllabus": "syllabi",
"symposium": "symposiums",
"synopsis": "synopses",
"tableau": "tableaus",
"thesis": "theses",
"thief": "thieves",
"trout": "trout",
"tuna": "tuna",
"vertebra": "vertebrae",
"vita": "vitae",
"vortex": "vortices",
"wharf": "wharves",
"wife": "wives",
"wolf": "wolves",
"datum": "data",
"testis": "testes",
"alias": "aliases",
"house": "houses",
"shoe": "shoes",
"news": "news",
"ovum": "ova",
"foo": "foos",
}
var pluralToSingle = map[string]string{}
func init() {
for k, v := range singleToPlural {
pluralToSingle[v] = k
}
}
func init() {
AddPlural("campus", "campuses")
AddPlural("man", "men")
AddPlural("tz", "tzes")
AddPlural("alias", "aliases")
AddPlural("oasis", "oasis")
AddPlural("wife", "wives")
AddPlural("basis", "basis")
AddPlural("atum", "ata")
AddPlural("adium", "adia")
AddPlural("actus", "acti")
AddPlural("irus", "iri")
AddPlural("iterion", "iteria")
AddPlural("dium", "diums")
AddPlural("ovum", "ova")
AddPlural("ize", "izes")
AddPlural("dge", "dges")
AddPlural("focus", "foci")
AddPlural("child", "children")
AddPlural("oaf", "oaves")
AddPlural("randum", "randa")
AddPlural("base", "bases")
AddPlural("atus", "atuses")
AddPlural("ode", "odes")
AddPlural("person", "people")
AddPlural("va", "vae")
AddPlural("leus", "li")
AddPlural("oot", "eet")
AddPlural("oose", "eese")
AddPlural("box", "boxes")
AddPlural("ium", "ia")
AddPlural("sis", "ses")
AddPlural("nna", "nnas")
AddPlural("eses", "esis")
AddPlural("stis", "stes")
AddPlural("ex", "ices")
AddPlural("ula", "ulae")
AddPlural("isis", "ises")
AddPlural("ouses", "ouse")
AddPlural("olves", "olf")
AddPlural("lf", "lves")
AddPlural("rf", "rves")
AddPlural("afe", "aves")
AddPlural("bfe", "bves")
AddPlural("cfe", "cves")
AddPlural("dfe", "dves")
AddPlural("efe", "eves")
AddPlural("gfe", "gves")
AddPlural("hfe", "hves")
AddPlural("ife", "ives")
AddPlural("jfe", "jves")
AddPlural("kfe", "kves")
AddPlural("lfe", "lves")
AddPlural("mfe", "mves")
AddPlural("nfe", "nves")
AddPlural("ofe", "oves")
AddPlural("pfe", "pves")
AddPlural("qfe", "qves")
AddPlural("rfe", "rves")
AddPlural("sfe", "sves")
AddPlural("tfe", "tves")
AddPlural("ufe", "uves")
AddPlural("vfe", "vves")
AddPlural("wfe", "wves")
AddPlural("xfe", "xves")
AddPlural("yfe", "yves")
AddPlural("zfe", "zves")
AddPlural("hive", "hives")
AddPlural("quy", "quies")
AddPlural("by", "bies")
AddPlural("cy", "cies")
AddPlural("dy", "dies")
AddPlural("fy", "fies")
AddPlural("gy", "gies")
AddPlural("hy", "hies")
AddPlural("jy", "jies")
AddPlural("ky", "kies")
AddPlural("ly", "lies")
AddPlural("my", "mies")
AddPlural("ny", "nies")
AddPlural("py", "pies")
AddPlural("qy", "qies")
AddPlural("ry", "ries")
AddPlural("sy", "sies")
AddPlural("ty", "ties")
AddPlural("vy", "vies")
AddPlural("wy", "wies")
AddPlural("xy", "xies")
AddPlural("zy", "zies")
AddPlural("x", "xes")
AddPlural("ch", "ches")
AddPlural("ss", "sses")
AddPlural("sh", "shes")
AddPlural("oe", "oes")
AddPlural("io", "ios")
AddPlural("o", "oes")
}

49
vendor/github.com/gobuffalo/flect/pluralize.go generated vendored Normal file
View File

@@ -0,0 +1,49 @@
package flect
import (
"strings"
"sync"
)
var pluralMoot = &sync.RWMutex{}
// Pluralize returns a plural version of the string
// user = users
// person = people
// datum = data
func Pluralize(s string) string {
return New(s).Pluralize().String()
}
// Pluralize returns a plural version of the string
// user = users
// person = people
// datum = data
func (i Ident) Pluralize() Ident {
s := i.Original
if len(s) == 0 {
return New("")
}
pluralMoot.RLock()
defer pluralMoot.RUnlock()
ls := strings.ToLower(s)
if _, ok := pluralToSingle[ls]; ok {
return i
}
if p, ok := singleToPlural[ls]; ok {
return New(p)
}
for _, r := range pluralRules {
if strings.HasSuffix(ls, r.suffix) {
return New(r.fn(s))
}
}
if strings.HasSuffix(ls, "s") {
return i
}
return New(i.String() + "s")
}

10
vendor/github.com/gobuffalo/flect/rule.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package flect
type ruleFn func(string) string
type rule struct {
suffix string
fn ruleFn
}
func noop(s string) string { return s }

122
vendor/github.com/gobuffalo/flect/singular_rules.go generated vendored Normal file
View File

@@ -0,0 +1,122 @@
package flect
var singularRules = []rule{}
// AddSingular adds a rule that will replace the given suffix with the replacement suffix.
func AddSingular(ext string, repl string) {
singularMoot.Lock()
defer singularMoot.Unlock()
singularRules = append(singularRules, rule{
suffix: ext,
fn: func(s string) string {
s = s[:len(s)-len(ext)]
return s + repl
},
})
singularRules = append(singularRules, rule{
suffix: repl,
fn: func(s string) string {
return s
},
})
}
func init() {
AddSingular("ria", "rion")
AddSingular("news", "news")
AddSingular("halves", "half")
AddSingular("appendix", "appendix")
AddSingular("zzes", "zz")
AddSingular("ulas", "ula")
AddSingular("psis", "pse")
AddSingular("genus", "genera")
AddSingular("phyla", "phylum")
AddSingular("odice", "odex")
AddSingular("oxen", "ox")
AddSingular("ianos", "iano")
AddSingular("ulus", "uli")
AddSingular("mice", "mouse")
AddSingular("ouses", "ouse")
AddSingular("mni", "mnus")
AddSingular("ocus", "oci")
AddSingular("shoes", "shoe")
AddSingular("oasis", "oasis")
AddSingular("lice", "louse")
AddSingular("men", "man")
AddSingular("ta", "tum")
AddSingular("ia", "ium")
AddSingular("tives", "tive")
AddSingular("ldren", "ld")
AddSingular("people", "person")
AddSingular("aves", "afe")
AddSingular("uses", "us")
AddSingular("bves", "bfe")
AddSingular("cves", "cfe")
AddSingular("dves", "dfe")
AddSingular("eves", "efe")
AddSingular("gves", "gfe")
AddSingular("hves", "hfe")
AddSingular("chives", "chive")
AddSingular("ives", "ife")
AddSingular("movies", "movie")
AddSingular("jeans", "jeans")
AddSingular("cesses", "cess")
AddSingular("cess", "cess")
AddSingular("acti", "actus")
AddSingular("itzes", "itz")
AddSingular("usses", "uss")
AddSingular("uss", "uss")
AddSingular("jves", "jfe")
AddSingular("kves", "kfe")
AddSingular("mves", "mfe")
AddSingular("nves", "nfe")
AddSingular("moves", "move")
AddSingular("oves", "ofe")
AddSingular("pves", "pfe")
AddSingular("qves", "qfe")
AddSingular("sves", "sfe")
AddSingular("tves", "tfe")
AddSingular("uves", "ufe")
AddSingular("vves", "vfe")
AddSingular("wves", "wfe")
AddSingular("xves", "xfe")
AddSingular("yves", "yfe")
AddSingular("zves", "zfe")
AddSingular("hives", "hive")
AddSingular("lves", "lf")
AddSingular("rves", "rf")
AddSingular("quies", "quy")
AddSingular("bies", "by")
AddSingular("cies", "cy")
AddSingular("dies", "dy")
AddSingular("fies", "fy")
AddSingular("gies", "gy")
AddSingular("hies", "hy")
AddSingular("jies", "jy")
AddSingular("kies", "ky")
AddSingular("lies", "ly")
AddSingular("mies", "my")
AddSingular("nies", "ny")
AddSingular("pies", "py")
AddSingular("qies", "qy")
AddSingular("ries", "ry")
AddSingular("sies", "sy")
AddSingular("ties", "ty")
AddSingular("vies", "vy")
AddSingular("wies", "wy")
AddSingular("xies", "xy")
AddSingular("zies", "zy")
AddSingular("xes", "x")
AddSingular("ches", "ch")
AddSingular("sses", "ss")
AddSingular("shes", "sh")
AddSingular("oes", "o")
AddSingular("ress", "ress")
AddSingular("iri", "irus")
AddSingular("irus", "irus")
AddSingular("tuses", "tus")
AddSingular("tus", "tus")
AddSingular("s", "")
AddSingular("ss", "ss")
}

44
vendor/github.com/gobuffalo/flect/singularize.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
package flect
import (
"strings"
"sync"
)
var singularMoot = &sync.RWMutex{}
// Singularize returns a singular version of the string
// users = user
// data = datum
// people = person
func Singularize(s string) string {
return New(s).Singularize().String()
}
// Singularize returns a singular version of the string
// users = user
// data = datum
// people = person
func (i Ident) Singularize() Ident {
s := i.Original
if len(s) == 0 {
return i
}
singularMoot.RLock()
defer singularMoot.RUnlock()
ls := strings.ToLower(s)
if p, ok := pluralToSingle[ls]; ok {
return New(p)
}
if _, ok := singleToPlural[ls]; ok {
return i
}
for _, r := range singularRules {
if strings.HasSuffix(ls, r.suffix) {
return New(r.fn(s))
}
}
return i
}

30
vendor/github.com/gobuffalo/flect/titleize.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
package flect
import (
"strings"
"unicode"
)
// Titleize will capitalize the start of each part
// "Nice to see you!" = "Nice To See You!"
// "i've read a book! have you?" = "I've Read A Book! Have You?"
// "This is `code` ok" = "This Is `code` OK"
func Titleize(s string) string {
return New(s).Titleize().String()
}
// Titleize will capitalize the start of each part
// "Nice to see you!" = "Nice To See You!"
// "i've read a book! have you?" = "I've Read A Book! Have You?"
// "This is `code` ok" = "This Is `code` OK"
func (i Ident) Titleize() Ident {
var parts []string
for _, part := range i.Parts {
x := string(unicode.ToTitle(rune(part[0])))
if len(part) > 1 {
x += part[1:]
}
parts = append(parts, x)
}
return New(strings.Join(parts, " "))
}

34
vendor/github.com/gobuffalo/flect/underscore.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
package flect
import (
"strings"
"unicode"
)
// Underscore a string
// bob dylan = bob_dylan
// Nice to see you! = nice_to_see_you
// widgetID = widget_id
func Underscore(s string) string {
return New(s).Underscore().String()
}
// Underscore a string
// bob dylan = bob_dylan
// Nice to see you! = nice_to_see_you
// widgetID = widget_id
func (i Ident) Underscore() Ident {
var out []string
for _, part := range i.Parts {
var x string
for _, c := range part {
if unicode.IsLetter(c) || unicode.IsDigit(c) {
x += string(c)
}
}
if x != "" {
out = append(out, x)
}
}
return New(strings.ToLower(strings.Join(out, "_")))
}

4
vendor/github.com/gobuffalo/flect/version.go generated vendored Normal file
View File

@@ -0,0 +1,4 @@
package flect
//Version holds Flect version number
const Version = "v0.1.5"

21
vendor/github.com/spf13/afero/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,21 @@
sudo: false
language: go
go:
- 1.9
- "1.10"
- tip
os:
- linux
- osx
matrix:
allow_failures:
- go: tip
fast_finish: true
script:
- go build
- go test -race -v ./...

174
vendor/github.com/spf13/afero/LICENSE.txt generated vendored Normal file
View File

@@ -0,0 +1,174 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

452
vendor/github.com/spf13/afero/README.md generated vendored Normal file
View File

@@ -0,0 +1,452 @@
![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
A FileSystem Abstraction System for Go
[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Overview
Afero is an filesystem framework providing a simple, uniform and universal API
interacting with any filesystem, as an abstraction layer providing interfaces,
types and methods. Afero has an exceptionally clean interface and simple design
without needless constructors or initialization methods.
Afero is also a library providing a base set of interoperable backend
filesystems that make it easy to work with afero while retaining all the power
and benefit of the os and ioutil packages.
Afero provides significant improvements over using the os package alone, most
notably the ability to create mock and testing filesystems without relying on the disk.
It is suitable for use in a any situation where you would consider using the OS
package as it provides an additional abstraction that makes it easy to use a
memory backed file system during testing. It also adds support for the http
filesystem for full interoperability.
## Afero Features
* A single consistent API for accessing a variety of filesystems
* Interoperation between a variety of file system types
* A set of interfaces to encourage and enforce interoperability between backends
* An atomic cross platform memory backed file system
* Support for compositional (union) file systems by combining multiple file systems acting as one
* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
* A set of utility functions ported from io, ioutil & hugo to be afero aware
# Using Afero
Afero is easy to use and easier to adopt.
A few different ways you could use Afero:
* Use the interfaces alone to define you own file system.
* Wrap for the OS packages.
* Define different filesystems for different parts of your application.
* Use Afero for mock filesystems while testing
## Step 1: Install Afero
First use go get to install the latest version of the library.
$ go get github.com/spf13/afero
Next include Afero in your application.
```go
import "github.com/spf13/afero"
```
## Step 2: Declare a backend
First define a package variable and set it to a pointer to a filesystem.
```go
var AppFs = afero.NewMemMapFs()
or
var AppFs = afero.NewOsFs()
```
It is important to note that if you repeat the composite literal you
will be using a completely new and isolated filesystem. In the case of
OsFs it will still use the same underlying filesystem but will reduce
the ability to drop in other filesystems as desired.
## Step 3: Use it like you would the OS package
Throughout your application use any function and method like you normally
would.
So if my application before had:
```go
os.Open('/tmp/foo')
```
We would replace it with:
```go
AppFs.Open('/tmp/foo')
```
`AppFs` being the variable we defined above.
## List of all available functions
File System Methods Available:
```go
Chmod(name string, mode os.FileMode) : error
Chtimes(name string, atime time.Time, mtime time.Time) : error
Create(name string) : File, error
Mkdir(name string, perm os.FileMode) : error
MkdirAll(path string, perm os.FileMode) : error
Name() : string
Open(name string) : File, error
OpenFile(name string, flag int, perm os.FileMode) : File, error
Remove(name string) : error
RemoveAll(path string) : error
Rename(oldname, newname string) : error
Stat(name string) : os.FileInfo, error
```
File Interfaces and Methods Available:
```go
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() : string
Readdir(count int) : []os.FileInfo, error
Readdirnames(n int) : []string, error
Stat() : os.FileInfo, error
Sync() : error
Truncate(size int64) : error
WriteString(s string) : ret int, err error
```
In some applications it may make sense to define a new package that
simply exports the file system variable for easy access from anywhere.
## Using Afero's utility functions
Afero provides a set of functions to make it easier to use the underlying file systems.
These functions have been primarily ported from io & ioutil with some developed for Hugo.
The afero utilities support all afero compatible backends.
The list of utilities includes:
```go
DirExists(path string) (bool, error)
Exists(path string) (bool, error)
FileContainsBytes(filename string, subslice []byte) (bool, error)
GetTempDir(subPath string) string
IsDir(path string) (bool, error)
IsEmpty(path string) (bool, error)
ReadDir(dirname string) ([]os.FileInfo, error)
ReadFile(filename string) ([]byte, error)
SafeWriteReader(path string, r io.Reader) (err error)
TempDir(dir, prefix string) (name string, err error)
TempFile(dir, prefix string) (f File, err error)
Walk(root string, walkFn filepath.WalkFunc) error
WriteFile(filename string, data []byte, perm os.FileMode) error
WriteReader(path string, r io.Reader) (err error)
```
For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
They are available under two different approaches to use. You can either call
them directly where the first parameter of each function will be the file
system, or you can declare a new `Afero`, a custom type used to bind these
functions as methods to a given filesystem.
### Calling utilities directly
```go
fs := new(afero.MemMapFs)
f, err := afero.TempFile(fs,"", "ioutil-test")
```
### Calling via Afero
```go
fs := afero.NewMemMapFs()
afs := &afero.Afero{Fs: fs}
f, err := afs.TempFile("", "ioutil-test")
```
## Using Afero for Testing
There is a large benefit to using a mock filesystem for testing. It has a
completely blank state every time it is initialized and can be easily
reproducible regardless of OS. You could create files to your hearts content
and the file access would be fast while also saving you from all the annoying
issues with deleting temporary files, Windows file locking, etc. The MemMapFs
backend is perfect for testing.
* Much faster than performing I/O operations on disk
* Avoid security issues and permissions
* Far more control. 'rm -rf /' with confidence
* Test setup is far more easier to do
* No test cleanup needed
One way to accomplish this is to define a variable as mentioned above.
In your application this will be set to afero.NewOsFs() during testing you
can set it to afero.NewMemMapFs().
It wouldn't be uncommon to have each test initialize a blank slate memory
backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
appropriate in my application code. This approach ensures that Tests are order
independent, with no test relying on the state left by an earlier test.
Then in my tests I would initialize a new MemMapFs for each test:
```go
func TestExist(t *testing.T) {
appFS := afero.NewMemMapFs()
// create test files and directories
appFS.MkdirAll("src/a", 0755)
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
name := "src/c"
_, err := appFS.Stat(name)
if os.IsNotExist(err) {
t.Errorf("file \"%s\" does not exist.\n", name)
}
}
```
# Available Backends
## Operating System Native
### OsFs
The first is simply a wrapper around the native OS calls. This makes it
very easy to use as all of the calls are the same as the existing OS
calls. It also makes it trivial to have your code use the OS during
operation and a mock filesystem during testing or as needed.
```go
appfs := afero.NewOsFs()
appfs.MkdirAll("src/a", 0755))
```
## Memory Backed Storage
### MemMapFs
Afero also provides a fully atomic memory backed filesystem perfect for use in
mocking and to speed up unnecessary disk io when persistence isnt
necessary. It is fully concurrent and will work within go routines
safely.
```go
mm := afero.NewMemMapFs()
mm.MkdirAll("src/a", 0755))
```
#### InMemoryFile
As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
backed file implementation. This can be used in other memory backed file
systems with ease. Plans are to add a radix tree memory stored file
system using InMemoryFile.
## Network Interfaces
### SftpFs
Afero has experimental support for secure file transfer protocol (sftp). Which can
be used to perform file operations over a encrypted channel.
## Filtering Backends
### BasePathFs
The BasePathFs restricts all operations to a given path within an Fs.
The given file name to the operations on this Fs will be prepended with
the base path before calling the source Fs.
```go
bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
```
### ReadOnlyFs
A thin wrapper around the source Fs providing a read only view.
```go
fs := afero.NewReadOnlyFs(afero.NewOsFs())
_, err := fs.Create("/file.txt")
// err = syscall.EPERM
```
# RegexpFs
A filtered view on file names, any file NOT matching
the passed regexp will be treated as non-existing.
Files not matching the regexp provided will not be created.
Directories are not filtered.
```go
fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
// err = syscall.ENOENT
```
### HttpFs
Afero provides an http compatible backend which can wrap any of the existing
backends.
The Http package requires a slightly specific version of Open which
returns an http.File type.
Afero provides an httpFs file system which satisfies this requirement.
Any Afero FileSystem can be used as an httpFs.
```go
httpFs := afero.NewHttpFs(<ExistingFS>)
fileserver := http.FileServer(httpFs.Dir(<PATH>)))
http.Handle("/", fileserver)
```
## Composite Backends
Afero provides the ability have two filesystems (or more) act as a single
file system.
### CacheOnReadFs
The CacheOnReadFs will lazily make copies of any accessed files from the base
layer into the overlay. Subsequent reads will be pulled from the overlay
directly permitting the request is within the cache duration of when it was
created in the overlay.
If the base filesystem is writeable, any changes to files will be
done first to the base, then to the overlay layer. Write calls to open file
handles like `Write()` or `Truncate()` to the overlay first.
To writing files to the overlay only, you can use the overlay Fs directly (not
via the union Fs).
Cache files in the layer for the given time.Duration, a cache duration of 0
means "forever" meaning the file will not be re-requested from the base ever.
A read-only base will make the overlay also read-only but still copy files
from the base to the overlay when they're not present (or outdated) in the
caching layer.
```go
base := afero.NewOsFs()
layer := afero.NewMemMapFs()
ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
```
### CopyOnWriteFs()
The CopyOnWriteFs is a read only base file system with a potentially
writeable layer on top.
Read operations will first look in the overlay and if not found there, will
serve the file from the base.
Changes to the file system will only be made in the overlay.
Any attempt to modify a file found only in the base will copy the file to the
overlay layer before modification (including opening a file with a writable
handle).
Removing and Renaming files present only in the base layer is not currently
permitted. If a file is present in the base layer and the overlay, only the
overlay will be removed/renamed.
```go
base := afero.NewOsFs()
roBase := afero.NewReadOnlyFs(base)
ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
```
In this example all write operations will only occur in memory (MemMapFs)
leaving the base filesystem (OsFs) untouched.
## Desired/possible backends
The following is a short list of possible backends we hope someone will
implement:
* SSH
* ZIP
* TAR
* S3
# About the project
## What's in the name
Afero comes from the latin roots Ad-Facere.
**"Ad"** is a prefix meaning "to".
**"Facere"** is a form of the root "faciō" making "make or do".
The literal meaning of afero is "to make" or "to do" which seems very fitting
for a library that allows one to make files and directories and do things with them.
The English word that shares the same roots as Afero is "affair". Affair shares
the same concept but as a noun it means "something that is made or done" or "an
object of a particular type".
It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
Googles very well.
## Release Notes
* **0.10.0** 2015.12.10
* Full compatibility with Windows
* Introduction of afero utilities
* Test suite rewritten to work cross platform
* Normalize paths for MemMapFs
* Adding Sync to the file interface
* **Breaking Change** Walk and ReadDir have changed parameter order
* Moving types used by MemMapFs to a subpackage
* General bugfixes and improvements
* **0.9.0** 2015.11.05
* New Walk function similar to filepath.Walk
* MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
* MemMapFs.Remove now really deletes the file
* InMemoryFile.Readdir and Readdirnames work correctly
* InMemoryFile functions lock it for concurrent access
* Test suite improvements
* **0.8.0** 2014.10.28
* First public version
* Interfaces feel ready for people to build using
* Interfaces satisfy all known uses
* MemMapFs passes the majority of the OS test suite
* OsFs passes the majority of the OS test suite
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Contributors
Names in no particular order:
* [spf13](https://github.com/spf13)
* [jaqx0r](https://github.com/jaqx0r)
* [mbertschler](https://github.com/mbertschler)
* [xor-gate](https://github.com/xor-gate)
## License
Afero is released under the Apache 2.0 license. See
[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)

108
vendor/github.com/spf13/afero/afero.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package afero provides types and methods for interacting with the filesystem,
// as an abstraction layer.
// Afero also provides a few implementations that are mostly interoperable. One that
// uses the operating system filesystem, one that uses memory to store files
// (cross platform) and an interface that should be implemented if you want to
// provide your own filesystem.
package afero
import (
"errors"
"io"
"os"
"time"
)
type Afero struct {
Fs
}
// File represents a file in the filesystem.
type File interface {
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() string
Readdir(count int) ([]os.FileInfo, error)
Readdirnames(n int) ([]string, error)
Stat() (os.FileInfo, error)
Sync() error
Truncate(size int64) error
WriteString(s string) (ret int, err error)
}
// Fs is the filesystem interface.
//
// Any simulated or real filesystem should implement this interface.
type Fs interface {
// Create creates a file in the filesystem, returning the file and an
// error, if any happens.
Create(name string) (File, error)
// Mkdir creates a directory in the filesystem, return an error if any
// happens.
Mkdir(name string, perm os.FileMode) error
// MkdirAll creates a directory path and all parents that does not exist
// yet.
MkdirAll(path string, perm os.FileMode) error
// Open opens a file, returning it or an error, if any happens.
Open(name string) (File, error)
// OpenFile opens a file using the given flags and the given mode.
OpenFile(name string, flag int, perm os.FileMode) (File, error)
// Remove removes a file identified by name, returning an error, if any
// happens.
Remove(name string) error
// RemoveAll removes a directory path and any children it contains. It
// does not fail if the path does not exist (return nil).
RemoveAll(path string) error
// Rename renames a file.
Rename(oldname, newname string) error
// Stat returns a FileInfo describing the named file, or an error, if any
// happens.
Stat(name string) (os.FileInfo, error)
// The name of this FileSystem
Name() string
//Chmod changes the mode of the named file to mode.
Chmod(name string, mode os.FileMode) error
//Chtimes changes the access and modification times of the named file
Chtimes(name string, atime time.Time, mtime time.Time) error
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("Out of range")
ErrTooLarge = errors.New("Too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)

15
vendor/github.com/spf13/afero/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,15 @@
version: '{build}'
clone_folder: C:\gopath\src\github.com\spf13\afero
environment:
GOPATH: C:\gopath
build_script:
- cmd: >-
go version
go env
go get -v github.com/spf13/afero/...
go build github.com/spf13/afero
test_script:
- cmd: go test -race -v github.com/spf13/afero/...

180
vendor/github.com/spf13/afero/basepath.go generated vendored Normal file
View File

@@ -0,0 +1,180 @@
package afero
import (
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
var _ Lstater = (*BasePathFs)(nil)
// The BasePathFs restricts all operations to a given path within an Fs.
// The given file name to the operations on this Fs will be prepended with
// the base path before calling the base Fs.
// Any file name (after filepath.Clean()) outside this base path will be
// treated as non existing file.
//
// Note that it does not clean the error messages on return, so you may
// reveal the real path on errors.
type BasePathFs struct {
source Fs
path string
}
type BasePathFile struct {
File
path string
}
func (f *BasePathFile) Name() string {
sourcename := f.File.Name()
return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
}
func NewBasePathFs(source Fs, path string) Fs {
return &BasePathFs{source: source, path: path}
}
// on a file outside the base path it returns the given file name and an error,
// else the given file with the base path prepended
func (b *BasePathFs) RealPath(name string) (path string, err error) {
if err := validateBasePathName(name); err != nil {
return name, err
}
bpath := filepath.Clean(b.path)
path = filepath.Clean(filepath.Join(bpath, name))
if !strings.HasPrefix(path, bpath) {
return name, os.ErrNotExist
}
return path, nil
}
func validateBasePathName(name string) error {
if runtime.GOOS != "windows" {
// Not much to do here;
// the virtual file paths all look absolute on *nix.
return nil
}
// On Windows a common mistake would be to provide an absolute OS path
// We could strip out the base part, but that would not be very portable.
if filepath.IsAbs(name) {
return os.ErrNotExist
}
return nil
}
func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chtimes", Path: name, Err: err}
}
return b.source.Chtimes(name, atime, mtime)
}
func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chmod", Path: name, Err: err}
}
return b.source.Chmod(name, mode)
}
func (b *BasePathFs) Name() string {
return "BasePathFs"
}
func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "stat", Path: name, Err: err}
}
return b.source.Stat(name)
}
func (b *BasePathFs) Rename(oldname, newname string) (err error) {
if oldname, err = b.RealPath(oldname); err != nil {
return &os.PathError{Op: "rename", Path: oldname, Err: err}
}
if newname, err = b.RealPath(newname); err != nil {
return &os.PathError{Op: "rename", Path: newname, Err: err}
}
return b.source.Rename(oldname, newname)
}
func (b *BasePathFs) RemoveAll(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove_all", Path: name, Err: err}
}
return b.source.RemoveAll(name)
}
func (b *BasePathFs) Remove(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
return b.source.Remove(name)
}
func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
}
sourcef, err := b.source.OpenFile(name, flag, mode)
if err != nil {
return nil, err
}
return &BasePathFile{sourcef, b.path}, nil
}
func (b *BasePathFs) Open(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "open", Path: name, Err: err}
}
sourcef, err := b.source.Open(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.Mkdir(name, mode)
}
func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.MkdirAll(name, mode)
}
func (b *BasePathFs) Create(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "create", Path: name, Err: err}
}
sourcef, err := b.source.Create(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
name, err := b.RealPath(name)
if err != nil {
return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
}
if lstater, ok := b.source.(Lstater); ok {
return lstater.LstatIfPossible(name)
}
fi, err := b.source.Stat(name)
return fi, false, err
}
// vim: ts=4 sw=4 noexpandtab nolist syn=go

290
vendor/github.com/spf13/afero/cacheOnReadFs.go generated vendored Normal file
View File

@@ -0,0 +1,290 @@
package afero
import (
"os"
"syscall"
"time"
)
// If the cache duration is 0, cache time will be unlimited, i.e. once
// a file is in the layer, the base will never be read again for this file.
//
// For cache times greater than 0, the modification time of a file is
// checked. Note that a lot of file system implementations only allow a
// resolution of a second for timestamps... or as the godoc for os.Chtimes()
// states: "The underlying filesystem may truncate or round the values to a
// less precise time unit."
//
// This caching union will forward all write calls also to the base file
// system first. To prevent writing to the base Fs, wrap it in a read-only
// filter - Note: this will also make the overlay read-only, for writing files
// in the overlay, use the overlay Fs directly, not via the union Fs.
type CacheOnReadFs struct {
base Fs
layer Fs
cacheTime time.Duration
}
func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
}
type cacheState int
const (
// not present in the overlay, unknown if it exists in the base:
cacheMiss cacheState = iota
// present in the overlay and in base, base file is newer:
cacheStale
// present in the overlay - with cache time == 0 it may exist in the base,
// with cacheTime > 0 it exists in the base and is same age or newer in the
// overlay
cacheHit
// happens if someone writes directly to the overlay without
// going through this union
cacheLocal
)
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
func (u *CacheOnReadFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chtimes(name, atime, mtime)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chtimes(name, atime, mtime)
}
if err != nil {
return err
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chmod(name, mode)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chmod(name, mode)
}
if err != nil {
return err
}
return u.layer.Chmod(name, mode)
}
func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheMiss:
return u.base.Stat(name)
default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
return fi, nil
}
}
func (u *CacheOnReadFs) Rename(oldname, newname string) error {
st, _, err := u.cacheStatus(oldname)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Rename(oldname, newname)
case cacheStale, cacheMiss:
if err := u.copyToLayer(oldname); err != nil {
return err
}
err = u.base.Rename(oldname, newname)
}
if err != nil {
return err
}
return u.layer.Rename(oldname, newname)
}
func (u *CacheOnReadFs) Remove(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.Remove(name)
}
if err != nil {
return err
}
return u.layer.Remove(name)
}
func (u *CacheOnReadFs) RemoveAll(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.RemoveAll(name)
}
if err != nil {
return err
}
return u.layer.RemoveAll(name)
}
func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
if err := u.copyToLayer(name); err != nil {
return nil, err
}
}
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.layer.OpenFile(name, flag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
return &UnionFile{Base: bfi, Layer: lfi}, nil
}
return u.layer.OpenFile(name, flag, perm)
}
func (u *CacheOnReadFs) Open(name string) (File, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal:
return u.layer.Open(name)
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if bfi.IsDir() {
return u.base.Open(name)
}
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
case cacheStale:
if !fi.IsDir() {
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
}
case cacheHit:
if !fi.IsDir() {
return u.layer.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.layer.Open(name)
if err != nil && bfile == nil {
return nil, err
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *CacheOnReadFs) Name() string {
return "CacheOnReadFs"
}
func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm)
}
func (u *CacheOnReadFs) Create(name string) (File, error) {
bfh, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfh, err := u.layer.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
bfh.Close()
return nil, err
}
return &UnionFile{Base: bfh, Layer: lfh}, nil
}

22
vendor/github.com/spf13/afero/const_bsds.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
// Copyright © 2016 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build darwin openbsd freebsd netbsd dragonfly
package afero
import (
"syscall"
)
const BADFD = syscall.EBADF

25
vendor/github.com/spf13/afero/const_win_unix.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
// Copyright © 2016 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !darwin
// +build !openbsd
// +build !freebsd
// +build !dragonfly
// +build !netbsd
package afero
import (
"syscall"
)
const BADFD = syscall.EBADFD

293
vendor/github.com/spf13/afero/copyOnWriteFs.go generated vendored Normal file
View File

@@ -0,0 +1,293 @@
package afero
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
)
var _ Lstater = (*CopyOnWriteFs)(nil)
// The CopyOnWriteFs is a union filesystem: a read only base file system with
// a possibly writeable layer on top. Changes to the file system will only
// be made in the overlay: Changing an existing file in the base layer which
// is not present in the overlay will copy the file to the overlay ("changing"
// includes also calls to e.g. Chtimes() and Chmod()).
//
// Reading directories is currently only supported via Open(), not OpenFile().
type CopyOnWriteFs struct {
base Fs
layer Fs
}
func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
return &CopyOnWriteFs{base: base, layer: layer}
}
// Returns true if the file is not in the overlay
func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
if _, err := u.layer.Stat(name); err == nil {
return false, nil
}
_, err := u.base.Stat(name)
if err != nil {
if oerr, ok := err.(*os.PathError); ok {
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
return false, nil
}
}
if err == syscall.ENOENT {
return false, nil
}
}
return true, err
}
func (u *CopyOnWriteFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chmod(name, mode)
}
func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
fi, err := u.layer.Stat(name)
if err != nil {
isNotExist := u.isNotExist(err)
if isNotExist {
return u.base.Stat(name)
}
return nil, err
}
return fi, nil
}
func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
llayer, ok1 := u.layer.(Lstater)
lbase, ok2 := u.base.(Lstater)
if ok1 {
fi, b, err := llayer.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
if ok2 {
fi, b, err := lbase.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
fi, err := u.Stat(name)
return fi, false, err
}
func (u *CopyOnWriteFs) isNotExist(err error) bool {
if e, ok := err.(*os.PathError); ok {
err = e.Err
}
if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
return true
}
return false
}
// Renaming files present only in the base layer is not permitted
func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
b, err := u.isBaseFile(oldname)
if err != nil {
return err
}
if b {
return syscall.EPERM
}
return u.layer.Rename(oldname, newname)
}
// Removing files present only in the base layer is not permitted. If
// a file is present in the base layer and the overlay, only the overlay
// will be removed.
func (u *CopyOnWriteFs) Remove(name string) error {
err := u.layer.Remove(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) RemoveAll(name string) error {
err := u.layer.RemoveAll(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
if b {
if err = u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
dir := filepath.Dir(name)
isaDir, err := IsDir(u.base, dir)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if isaDir {
if err = u.layer.MkdirAll(dir, 0777); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
isaDir, err = IsDir(u.layer, dir)
if err != nil {
return nil, err
}
if isaDir {
return u.layer.OpenFile(name, flag, perm)
}
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
}
if b {
return u.base.OpenFile(name, flag, perm)
}
return u.layer.OpenFile(name, flag, perm)
}
// This function handles the 9 different possibilities caused
// by the union which are the intersection of the following...
// layer: doesn't exist, exists as a file, and exists as a directory
// base: doesn't exist, exists as a file, and exists as a directory
func (u *CopyOnWriteFs) Open(name string) (File, error) {
// Since the overlay overrides the base we check that first
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
// If overlay doesn't exist, return the base (base state irrelevant)
if b {
return u.base.Open(name)
}
// If overlay is a file, return it (base state irrelevant)
dir, err := IsDir(u.layer, name)
if err != nil {
return nil, err
}
if !dir {
return u.layer.Open(name)
}
// Overlay is a directory, base state now matters.
// Base state has 3 states to check but 2 outcomes:
// A. It's a file or non-readable in the base (return just the overlay)
// B. It's an accessible directory in the base (return a UnionFile)
// If base is file or nonreadable, return overlay
dir, err = IsDir(u.base, name)
if !dir || err != nil {
return u.layer.Open(name)
}
// Both base & layer are directories
// Return union file (if opens are without error)
bfile, bErr := u.base.Open(name)
lfile, lErr := u.layer.Open(name)
// If either have errors at this point something is very wrong. Return nil and the errors
if bErr != nil || lErr != nil {
return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
return ErrFileExists
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Name() string {
return "CopyOnWriteFs"
}
func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
// This is in line with how os.MkdirAll behaves.
return nil
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Create(name string) (File, error) {
return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
}

3
vendor/github.com/spf13/afero/go.mod generated vendored Normal file
View File

@@ -0,0 +1,3 @@
module github.com/spf13/afero
require golang.org/x/text v0.3.0

2
vendor/github.com/spf13/afero/go.sum generated vendored Normal file
View File

@@ -0,0 +1,2 @@
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

110
vendor/github.com/spf13/afero/httpFs.go generated vendored Normal file
View File

@@ -0,0 +1,110 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
)
type httpDir struct {
basePath string
fs HttpFs
}
func (d httpDir) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
strings.Contains(name, "\x00") {
return nil, errors.New("http: invalid character in file path")
}
dir := string(d.basePath)
if dir == "" {
dir = "."
}
f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
if err != nil {
return nil, err
}
return f, nil
}
type HttpFs struct {
source Fs
}
func NewHttpFs(source Fs) *HttpFs {
return &HttpFs{source: source}
}
func (h HttpFs) Dir(s string) *httpDir {
return &httpDir{basePath: s, fs: h}
}
func (h HttpFs) Name() string { return "h HttpFs" }
func (h HttpFs) Create(name string) (File, error) {
return h.source.Create(name)
}
func (h HttpFs) Chmod(name string, mode os.FileMode) error {
return h.source.Chmod(name, mode)
}
func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return h.source.Chtimes(name, atime, mtime)
}
func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
return h.source.Mkdir(name, perm)
}
func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
return h.source.MkdirAll(path, perm)
}
func (h HttpFs) Open(name string) (http.File, error) {
f, err := h.source.Open(name)
if err == nil {
if httpfile, ok := f.(http.File); ok {
return httpfile, nil
}
}
return nil, err
}
func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return h.source.OpenFile(name, flag, perm)
}
func (h HttpFs) Remove(name string) error {
return h.source.Remove(name)
}
func (h HttpFs) RemoveAll(path string) error {
return h.source.RemoveAll(path)
}
func (h HttpFs) Rename(oldname, newname string) error {
return h.source.Rename(oldname, newname)
}
func (h HttpFs) Stat(name string) (os.FileInfo, error) {
return h.source.Stat(name)
}

230
vendor/github.com/spf13/afero/ioutil.go generated vendored Normal file
View File

@@ -0,0 +1,230 @@
// Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"time"
)
// byName implements sort.Interface.
type byName []os.FileInfo
func (f byName) Len() int { return len(f) }
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
// ReadDir reads the directory named by dirname and returns
// a list of sorted directory entries.
func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
return ReadDir(a.Fs, dirname)
}
func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Sort(byName(list))
return list, nil
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func (a Afero) ReadFile(filename string) ([]byte, error) {
return ReadFile(a.Fs, filename)
}
func ReadFile(fs Fs, filename string) ([]byte, error) {
f, err := fs.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
// It's a good but not certain bet that FileInfo will tell us exactly how much to
// read, so let's try it but be prepared for the answer to be wrong.
var n int64
if fi, err := f.Stat(); err == nil {
// Don't preallocate a huge buffer, just in case.
if size := fi.Size(); size < 1e9 {
n = size
}
}
// As initial capacity for readAll, use n + a little extra in case Size is zero,
// and to avoid another allocation after Read has filled the buffer. The readAll
// call will read into its allocated internal buffer cheaply. If the size was
// wrong, we'll either waste some space off the end or reallocate as needed, but
// in the overwhelmingly common case we'll get it just right.
return readAll(f, n+bytes.MinRead)
}
// readAll reads from r until an error or EOF and returns the data it read
// from the internal buffer allocated with a specified capacity.
func readAll(r io.Reader, capacity int64) (b []byte, err error) {
buf := bytes.NewBuffer(make([]byte, 0, capacity))
// If the buffer overflows, we will get bytes.ErrTooLarge.
// Return that as an error. Any other panic remains.
defer func() {
e := recover()
if e == nil {
return
}
if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
err = panicErr
} else {
panic(e)
}
}()
_, err = buf.ReadFrom(r)
return buf.Bytes(), err
}
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func ReadAll(r io.Reader) ([]byte, error) {
return readAll(r, bytes.MinRead)
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
return WriteFile(a.Fs, filename, data, perm)
}
func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
// Random number state.
// We generate random temporary file names so that there's a good
// chance the file doesn't exist yet - keeps the number of tries in
// TempFile to a minimum.
var rand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
randmu.Lock()
r := rand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// TempFile creates a new temporary file in the directory dir
// with a name beginning with prefix, opens the file for reading
// and writing, and returns the resulting *File.
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func (a Afero) TempFile(dir, prefix string) (f File, err error) {
return TempFile(a.Fs, dir, prefix)
}
func TempFile(fs Fs, dir, prefix string) (f File, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextSuffix())
f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}
// TempDir creates a new temporary directory in the directory dir
// with a name beginning with prefix and returns the path of the
// new directory. If dir is the empty string, TempDir uses the
// default directory for temporary files (see os.TempDir).
// Multiple programs calling TempDir simultaneously
// will not choose the same directory. It is the caller's responsibility
// to remove the directory when no longer needed.
func (a Afero) TempDir(dir, prefix string) (name string, err error) {
return TempDir(a.Fs, dir, prefix)
}
func TempDir(fs Fs, dir, prefix string) (name string, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
try := filepath.Join(dir, prefix+nextSuffix())
err = fs.Mkdir(try, 0700)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
if err == nil {
name = try
}
break
}
return
}

27
vendor/github.com/spf13/afero/lstater.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
// Copyright © 2018 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
)
// Lstater is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem.
// Else it will call Stat.
// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not.
type Lstater interface {
LstatIfPossible(name string) (os.FileInfo, bool, error)
}

110
vendor/github.com/spf13/afero/match.go generated vendored Normal file
View File

@@ -0,0 +1,110 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2009 The Go Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"path/filepath"
"sort"
"strings"
)
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in Match. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the Separator is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
//
// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
// built-ins from that package.
func Glob(fs Fs, pattern string) (matches []string, err error) {
if !hasMeta(pattern) {
// Lstat not supported by a ll filesystems.
if _, err = lstatIfPossible(fs, pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := filepath.Split(pattern)
switch dir {
case "":
dir = "."
case string(filepath.Separator):
// nothing
default:
dir = dir[0 : len(dir)-1] // chop off trailing separator
}
if !hasMeta(dir) {
return glob(fs, dir, file, nil)
}
var m []string
m, err = Glob(fs, dir)
if err != nil {
return
}
for _, d := range m {
matches, err = glob(fs, d, file, matches)
if err != nil {
return
}
}
return
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := fs.Stat(dir)
if err != nil {
return
}
if !fi.IsDir() {
return
}
d, err := fs.Open(dir)
if err != nil {
return
}
defer d.Close()
names, _ := d.Readdirnames(-1)
sort.Strings(names)
for _, n := range names {
matched, err := filepath.Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, filepath.Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
return strings.IndexAny(path, "*?[") >= 0
}

37
vendor/github.com/spf13/afero/mem/dir.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
type Dir interface {
Len() int
Names() []string
Files() []*FileData
Add(*FileData)
Remove(*FileData)
}
func RemoveFromMemDir(dir *FileData, f *FileData) {
dir.memDir.Remove(f)
}
func AddToMemDir(dir *FileData, f *FileData) {
dir.memDir.Add(f)
}
func InitializeDir(d *FileData) {
if d.memDir == nil {
d.dir = true
d.memDir = &DirMap{}
}
}

43
vendor/github.com/spf13/afero/mem/dirmap.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
// Copyright © 2015 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import "sort"
type DirMap map[string]*FileData
func (m DirMap) Len() int { return len(m) }
func (m DirMap) Add(f *FileData) { m[f.name] = f }
func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
func (m DirMap) Files() (files []*FileData) {
for _, f := range m {
files = append(files, f)
}
sort.Sort(filesSorter(files))
return files
}
// implement sort.Interface for []*FileData
type filesSorter []*FileData
func (s filesSorter) Len() int { return len(s) }
func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
func (m DirMap) Names() (names []string) {
for x := range m {
names = append(names, x)
}
return names
}

317
vendor/github.com/spf13/afero/mem/file.go generated vendored Normal file
View File

@@ -0,0 +1,317 @@
// Copyright © 2015 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import (
"bytes"
"errors"
"io"
"os"
"path/filepath"
"sync"
"sync/atomic"
)
import "time"
const FilePathSeparator = string(filepath.Separator)
type File struct {
// atomic requires 64-bit alignment for struct field access
at int64
readDirCount int64
closed bool
readOnly bool
fileData *FileData
}
func NewFileHandle(data *FileData) *File {
return &File{fileData: data}
}
func NewReadOnlyFileHandle(data *FileData) *File {
return &File{fileData: data, readOnly: true}
}
func (f File) Data() *FileData {
return f.fileData
}
type FileData struct {
sync.Mutex
name string
data []byte
memDir Dir
dir bool
mode os.FileMode
modtime time.Time
}
func (d *FileData) Name() string {
d.Lock()
defer d.Unlock()
return d.name
}
func CreateFile(name string) *FileData {
return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
}
func CreateDir(name string) *FileData {
return &FileData{name: name, memDir: &DirMap{}, dir: true}
}
func ChangeFileName(f *FileData, newname string) {
f.Lock()
f.name = newname
f.Unlock()
}
func SetMode(f *FileData, mode os.FileMode) {
f.Lock()
f.mode = mode
f.Unlock()
}
func SetModTime(f *FileData, mtime time.Time) {
f.Lock()
setModTime(f, mtime)
f.Unlock()
}
func setModTime(f *FileData, mtime time.Time) {
f.modtime = mtime
}
func GetFileInfo(f *FileData) *FileInfo {
return &FileInfo{f}
}
func (f *File) Open() error {
atomic.StoreInt64(&f.at, 0)
atomic.StoreInt64(&f.readDirCount, 0)
f.fileData.Lock()
f.closed = false
f.fileData.Unlock()
return nil
}
func (f *File) Close() error {
f.fileData.Lock()
f.closed = true
if !f.readOnly {
setModTime(f.fileData, time.Now())
}
f.fileData.Unlock()
return nil
}
func (f *File) Name() string {
return f.fileData.Name()
}
func (f *File) Stat() (os.FileInfo, error) {
return &FileInfo{f.fileData}, nil
}
func (f *File) Sync() error {
return nil
}
func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
if !f.fileData.dir {
return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")}
}
var outLength int64
f.fileData.Lock()
files := f.fileData.memDir.Files()[f.readDirCount:]
if count > 0 {
if len(files) < count {
outLength = int64(len(files))
} else {
outLength = int64(count)
}
if len(files) == 0 {
err = io.EOF
}
} else {
outLength = int64(len(files))
}
f.readDirCount += outLength
f.fileData.Unlock()
res = make([]os.FileInfo, outLength)
for i := range res {
res[i] = &FileInfo{files[i]}
}
return res, err
}
func (f *File) Readdirnames(n int) (names []string, err error) {
fi, err := f.Readdir(n)
names = make([]string, len(fi))
for i, f := range fi {
_, names[i] = filepath.Split(f.Name())
}
return names, err
}
func (f *File) Read(b []byte) (n int, err error) {
f.fileData.Lock()
defer f.fileData.Unlock()
if f.closed == true {
return 0, ErrFileClosed
}
if len(b) > 0 && int(f.at) == len(f.fileData.data) {
return 0, io.EOF
}
if int(f.at) > len(f.fileData.data) {
return 0, io.ErrUnexpectedEOF
}
if len(f.fileData.data)-int(f.at) >= len(b) {
n = len(b)
} else {
n = len(f.fileData.data) - int(f.at)
}
copy(b, f.fileData.data[f.at:f.at+int64(n)])
atomic.AddInt64(&f.at, int64(n))
return
}
func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
atomic.StoreInt64(&f.at, off)
return f.Read(b)
}
func (f *File) Truncate(size int64) error {
if f.closed == true {
return ErrFileClosed
}
if f.readOnly {
return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
}
if size < 0 {
return ErrOutOfRange
}
if size > int64(len(f.fileData.data)) {
diff := size - int64(len(f.fileData.data))
f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
} else {
f.fileData.data = f.fileData.data[0:size]
}
setModTime(f.fileData, time.Now())
return nil
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.closed == true {
return 0, ErrFileClosed
}
switch whence {
case 0:
atomic.StoreInt64(&f.at, offset)
case 1:
atomic.AddInt64(&f.at, int64(offset))
case 2:
atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
}
return f.at, nil
}
func (f *File) Write(b []byte) (n int, err error) {
if f.readOnly {
return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
}
n = len(b)
cur := atomic.LoadInt64(&f.at)
f.fileData.Lock()
defer f.fileData.Unlock()
diff := cur - int64(len(f.fileData.data))
var tail []byte
if n+int(cur) < len(f.fileData.data) {
tail = f.fileData.data[n+int(cur):]
}
if diff > 0 {
f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...)
f.fileData.data = append(f.fileData.data, tail...)
} else {
f.fileData.data = append(f.fileData.data[:cur], b...)
f.fileData.data = append(f.fileData.data, tail...)
}
setModTime(f.fileData, time.Now())
atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
return
}
func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
atomic.StoreInt64(&f.at, off)
return f.Write(b)
}
func (f *File) WriteString(s string) (ret int, err error) {
return f.Write([]byte(s))
}
func (f *File) Info() *FileInfo {
return &FileInfo{f.fileData}
}
type FileInfo struct {
*FileData
}
// Implements os.FileInfo
func (s *FileInfo) Name() string {
s.Lock()
_, name := filepath.Split(s.name)
s.Unlock()
return name
}
func (s *FileInfo) Mode() os.FileMode {
s.Lock()
defer s.Unlock()
return s.mode
}
func (s *FileInfo) ModTime() time.Time {
s.Lock()
defer s.Unlock()
return s.modtime
}
func (s *FileInfo) IsDir() bool {
s.Lock()
defer s.Unlock()
return s.dir
}
func (s *FileInfo) Sys() interface{} { return nil }
func (s *FileInfo) Size() int64 {
if s.IsDir() {
return int64(42)
}
s.Lock()
defer s.Unlock()
return int64(len(s.data))
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("Out of range")
ErrTooLarge = errors.New("Too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)

365
vendor/github.com/spf13/afero/memmap.go generated vendored Normal file
View File

@@ -0,0 +1,365 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"fmt"
"log"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/spf13/afero/mem"
)
type MemMapFs struct {
mu sync.RWMutex
data map[string]*mem.FileData
init sync.Once
}
func NewMemMapFs() Fs {
return &MemMapFs{}
}
func (m *MemMapFs) getData() map[string]*mem.FileData {
m.init.Do(func() {
m.data = make(map[string]*mem.FileData)
// Root should always exist, right?
// TODO: what about windows?
m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
})
return m.data
}
func (*MemMapFs) Name() string { return "MemMapFS" }
func (m *MemMapFs) Create(name string) (File, error) {
name = normalizePath(name)
m.mu.Lock()
file := mem.CreateFile(name)
m.getData()[name] = file
m.registerWithParent(file)
m.mu.Unlock()
return mem.NewFileHandle(file), nil
}
func (m *MemMapFs) unRegisterWithParent(fileName string) error {
f, err := m.lockfreeOpen(fileName)
if err != nil {
return err
}
parent := m.findParent(f)
if parent == nil {
log.Panic("parent of ", f.Name(), " is nil")
}
parent.Lock()
mem.RemoveFromMemDir(parent, f)
parent.Unlock()
return nil
}
func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
pdir, _ := filepath.Split(f.Name())
pdir = filepath.Clean(pdir)
pfile, err := m.lockfreeOpen(pdir)
if err != nil {
return nil
}
return pfile
}
func (m *MemMapFs) registerWithParent(f *mem.FileData) {
if f == nil {
return
}
parent := m.findParent(f)
if parent == nil {
pdir := filepath.Dir(filepath.Clean(f.Name()))
err := m.lockfreeMkdir(pdir, 0777)
if err != nil {
//log.Println("Mkdir error:", err)
return
}
parent, err = m.lockfreeOpen(pdir)
if err != nil {
//log.Println("Open after Mkdir error:", err)
return
}
}
parent.Lock()
mem.InitializeDir(parent)
mem.AddToMemDir(parent, f)
parent.Unlock()
}
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
name = normalizePath(name)
x, ok := m.getData()[name]
if ok {
// Only return ErrFileExists if it's a file, not a directory.
i := mem.FileInfo{FileData: x}
if !i.IsDir() {
return ErrFileExists
}
} else {
item := mem.CreateDir(name)
m.getData()[name] = item
m.registerWithParent(item)
}
return nil
}
func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
_, ok := m.getData()[name]
m.mu.RUnlock()
if ok {
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
}
m.mu.Lock()
item := mem.CreateDir(name)
m.getData()[name] = item
m.registerWithParent(item)
m.mu.Unlock()
m.Chmod(name, perm|os.ModeDir)
return nil
}
func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
err := m.Mkdir(path, perm)
if err != nil {
if err.(*os.PathError).Err == ErrFileExists {
return nil
}
return err
}
return nil
}
// Handle some relative paths
func normalizePath(path string) string {
path = filepath.Clean(path)
switch path {
case ".":
return FilePathSeparator
case "..":
return FilePathSeparator
default:
return path
}
}
func (m *MemMapFs) Open(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewReadOnlyFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) openWrite(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) open(name string) (*mem.FileData, error) {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
}
return f, nil
}
func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
name = normalizePath(name)
f, ok := m.getData()[name]
if ok {
return f, nil
} else {
return nil, ErrFileNotFound
}
}
func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
chmod := false
file, err := m.openWrite(name)
if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
file, err = m.Create(name)
chmod = true
}
if err != nil {
return nil, err
}
if flag == os.O_RDONLY {
file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
}
if flag&os.O_APPEND > 0 {
_, err = file.Seek(0, os.SEEK_END)
if err != nil {
file.Close()
return nil, err
}
}
if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
err = file.Truncate(0)
if err != nil {
file.Close()
return nil, err
}
}
if chmod {
m.Chmod(name, perm)
}
return file, nil
}
func (m *MemMapFs) Remove(name string) error {
name = normalizePath(name)
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.getData()[name]; ok {
err := m.unRegisterWithParent(name)
if err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
delete(m.getData(), name)
} else {
return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
}
return nil
}
func (m *MemMapFs) RemoveAll(path string) error {
path = normalizePath(path)
m.mu.Lock()
m.unRegisterWithParent(path)
m.mu.Unlock()
m.mu.RLock()
defer m.mu.RUnlock()
for p, _ := range m.getData() {
if strings.HasPrefix(p, path) {
m.mu.RUnlock()
m.mu.Lock()
delete(m.getData(), p)
m.mu.Unlock()
m.mu.RLock()
}
}
return nil
}
func (m *MemMapFs) Rename(oldname, newname string) error {
oldname = normalizePath(oldname)
newname = normalizePath(newname)
if oldname == newname {
return nil
}
m.mu.RLock()
defer m.mu.RUnlock()
if _, ok := m.getData()[oldname]; ok {
m.mu.RUnlock()
m.mu.Lock()
m.unRegisterWithParent(oldname)
fileData := m.getData()[oldname]
delete(m.getData(), oldname)
mem.ChangeFileName(fileData, newname)
m.getData()[newname] = fileData
m.registerWithParent(fileData)
m.mu.Unlock()
m.mu.RLock()
} else {
return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
}
return nil
}
func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
f, err := m.Open(name)
if err != nil {
return nil, err
}
fi := mem.GetFileInfo(f.(*mem.File).Data())
return fi, nil
}
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetMode(f, mode)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetModTime(f, mtime)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) List() {
for _, x := range m.data {
y := mem.FileInfo{FileData: x}
fmt.Println(x.Name(), y.Size())
}
}
// func debugMemMapList(fs Fs) {
// if x, ok := fs.(*MemMapFs); ok {
// x.List()
// }
// }

101
vendor/github.com/spf13/afero/os.go generated vendored Normal file
View File

@@ -0,0 +1,101 @@
// Copyright © 2014 Steve Francia <spf@spf13.com>.
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"time"
)
var _ Lstater = (*OsFs)(nil)
// OsFs is a Fs implementation that uses functions provided by the os package.
//
// For details in any method, check the documentation of the os package
// (http://golang.org/pkg/os/).
type OsFs struct{}
func NewOsFs() Fs {
return &OsFs{}
}
func (OsFs) Name() string { return "OsFs" }
func (OsFs) Create(name string) (File, error) {
f, e := os.Create(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Mkdir(name string, perm os.FileMode) error {
return os.Mkdir(name, perm)
}
func (OsFs) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func (OsFs) Open(name string) (File, error) {
f, e := os.Open(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
f, e := os.OpenFile(name, flag, perm)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Remove(name string) error {
return os.Remove(name)
}
func (OsFs) RemoveAll(path string) error {
return os.RemoveAll(path)
}
func (OsFs) Rename(oldname, newname string) error {
return os.Rename(oldname, newname)
}
func (OsFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
}
func (OsFs) Chmod(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(name, atime, mtime)
}
func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fi, err := os.Lstat(name)
return fi, true, err
}

106
vendor/github.com/spf13/afero/path.go generated vendored Normal file
View File

@@ -0,0 +1,106 @@
// Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia <spf@spf13.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"sort"
)
// readDirNames reads the directory named by dirname and returns
// a sorted list of directory entries.
// adapted from https://golang.org/src/path/filepath/path.go
func readDirNames(fs Fs, dirname string) ([]string, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
names, err := f.Readdirnames(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
err := walkFn(path, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() {
return nil
}
names, err := readDirNames(fs, path)
if err != nil {
return walkFn(path, info, err)
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := lstatIfPossible(fs, filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// if the filesystem supports it, use Lstat, else use fs.Stat
func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
if lfs, ok := fs.(Lstater); ok {
fi, _, err := lfs.LstatIfPossible(path)
return fi, err
}
return fs.Stat(path)
}
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by walkFn. The files are walked in lexical
// order, which makes the output deterministic but means that for very
// large directories Walk can be inefficient.
// Walk does not follow symbolic links.
func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
return Walk(a.Fs, root, walkFn)
}
func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
info, err := lstatIfPossible(fs, root)
if err != nil {
return walkFn(root, nil, err)
}
return walk(fs, root, info, walkFn)
}

80
vendor/github.com/spf13/afero/readonlyfs.go generated vendored Normal file
View File

@@ -0,0 +1,80 @@
package afero
import (
"os"
"syscall"
"time"
)
var _ Lstater = (*ReadOnlyFs)(nil)
type ReadOnlyFs struct {
source Fs
}
func NewReadOnlyFs(source Fs) Fs {
return &ReadOnlyFs{source: source}
}
func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
return ReadDir(r.source, name)
}
func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Name() string {
return "ReadOnlyFilter"
}
func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
return r.source.Stat(name)
}
func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
if lsf, ok := r.source.(Lstater); ok {
return lsf.LstatIfPossible(name)
}
fi, err := r.Stat(name)
return fi, false, err
}
func (r *ReadOnlyFs) Rename(o, n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) RemoveAll(p string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Remove(n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
return nil, syscall.EPERM
}
return r.source.OpenFile(name, flag, perm)
}
func (r *ReadOnlyFs) Open(n string) (File, error) {
return r.source.Open(n)
}
func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Create(n string) (File, error) {
return nil, syscall.EPERM
}

214
vendor/github.com/spf13/afero/regexpfs.go generated vendored Normal file
View File

@@ -0,0 +1,214 @@
package afero
import (
"os"
"regexp"
"syscall"
"time"
)
// The RegexpFs filters files (not directories) by regular expression. Only
// files matching the given regexp will be allowed, all others get a ENOENT error (
// "No such file or directory").
//
type RegexpFs struct {
re *regexp.Regexp
source Fs
}
func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
return &RegexpFs{source: source, re: re}
}
type RegexpFile struct {
f File
re *regexp.Regexp
}
func (r *RegexpFs) matchesName(name string) error {
if r.re == nil {
return nil
}
if r.re.MatchString(name) {
return nil
}
return syscall.ENOENT
}
func (r *RegexpFs) dirOrMatches(name string) error {
dir, err := IsDir(r.source, name)
if err != nil {
return err
}
if dir {
return nil
}
return r.matchesName(name)
}
func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chtimes(name, a, m)
}
func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chmod(name, mode)
}
func (r *RegexpFs) Name() string {
return "RegexpFs"
}
func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.Stat(name)
}
func (r *RegexpFs) Rename(oldname, newname string) error {
dir, err := IsDir(r.source, oldname)
if err != nil {
return err
}
if dir {
return nil
}
if err := r.matchesName(oldname); err != nil {
return err
}
if err := r.matchesName(newname); err != nil {
return err
}
return r.source.Rename(oldname, newname)
}
func (r *RegexpFs) RemoveAll(p string) error {
dir, err := IsDir(r.source, p)
if err != nil {
return err
}
if !dir {
if err := r.matchesName(p); err != nil {
return err
}
}
return r.source.RemoveAll(p)
}
func (r *RegexpFs) Remove(name string) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Remove(name)
}
func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.OpenFile(name, flag, perm)
}
func (r *RegexpFs) Open(name string) (File, error) {
dir, err := IsDir(r.source, name)
if err != nil {
return nil, err
}
if !dir {
if err := r.matchesName(name); err != nil {
return nil, err
}
}
f, err := r.source.Open(name)
return &RegexpFile{f: f, re: r.re}, nil
}
func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
return r.source.Mkdir(n, p)
}
func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
return r.source.MkdirAll(n, p)
}
func (r *RegexpFs) Create(name string) (File, error) {
if err := r.matchesName(name); err != nil {
return nil, err
}
return r.source.Create(name)
}
func (f *RegexpFile) Close() error {
return f.f.Close()
}
func (f *RegexpFile) Read(s []byte) (int, error) {
return f.f.Read(s)
}
func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
return f.f.ReadAt(s, o)
}
func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
return f.f.Seek(o, w)
}
func (f *RegexpFile) Write(s []byte) (int, error) {
return f.f.Write(s)
}
func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
return f.f.WriteAt(s, o)
}
func (f *RegexpFile) Name() string {
return f.f.Name()
}
func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
var rfi []os.FileInfo
rfi, err = f.f.Readdir(c)
if err != nil {
return nil, err
}
for _, i := range rfi {
if i.IsDir() || f.re.MatchString(i.Name()) {
fi = append(fi, i)
}
}
return fi, nil
}
func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
fi, err := f.Readdir(c)
if err != nil {
return nil, err
}
for _, s := range fi {
n = append(n, s.Name())
}
return n, nil
}
func (f *RegexpFile) Stat() (os.FileInfo, error) {
return f.f.Stat()
}
func (f *RegexpFile) Sync() error {
return f.f.Sync()
}
func (f *RegexpFile) Truncate(s int64) error {
return f.f.Truncate(s)
}
func (f *RegexpFile) WriteString(s string) (int, error) {
return f.f.WriteString(s)
}

320
vendor/github.com/spf13/afero/unionFile.go generated vendored Normal file
View File

@@ -0,0 +1,320 @@
package afero
import (
"io"
"os"
"path/filepath"
"syscall"
)
// The UnionFile implements the afero.File interface and will be returned
// when reading a directory present at least in the overlay or opening a file
// for writing.
//
// The calls to
// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
// base and the overlay - for files present in both layers, only those
// from the overlay will be used.
//
// When opening files for writing (Create() / OpenFile() with the right flags)
// the operations will be done in both layers, starting with the overlay. A
// successful read in the overlay will move the cursor position in the base layer
// by the number of bytes read.
type UnionFile struct {
Base File
Layer File
Merger DirsMerger
off int
files []os.FileInfo
}
func (f *UnionFile) Close() error {
// first close base, so we have a newer timestamp in the overlay. If we'd close
// the overlay first, we'd get a cacheStale the next time we access this file
// -> cache would be useless ;-)
if f.Base != nil {
f.Base.Close()
}
if f.Layer != nil {
return f.Layer.Close()
}
return BADFD
}
func (f *UnionFile) Read(s []byte) (int, error) {
if f.Layer != nil {
n, err := f.Layer.Read(s)
if (err == nil || err == io.EOF) && f.Base != nil {
// advance the file position also in the base file, the next
// call may be a write at this position (or a seek with SEEK_CUR)
if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
// only overwrite err in case the seek fails: we need to
// report an eventual io.EOF to the caller
err = seekErr
}
}
return n, err
}
if f.Base != nil {
return f.Base.Read(s)
}
return 0, BADFD
}
func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
if f.Layer != nil {
n, err := f.Layer.ReadAt(s, o)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o+int64(n), os.SEEK_SET)
}
return n, err
}
if f.Base != nil {
return f.Base.ReadAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
if f.Layer != nil {
pos, err = f.Layer.Seek(o, w)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o, w)
}
return pos, err
}
if f.Base != nil {
return f.Base.Seek(o, w)
}
return 0, BADFD
}
func (f *UnionFile) Write(s []byte) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.Write(s)
if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
_, err = f.Base.Write(s)
}
return n, err
}
if f.Base != nil {
return f.Base.Write(s)
}
return 0, BADFD
}
func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteAt(s, o)
if err == nil && f.Base != nil {
_, err = f.Base.WriteAt(s, o)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Name() string {
if f.Layer != nil {
return f.Layer.Name()
}
return f.Base.Name()
}
// DirsMerger is how UnionFile weaves two directories together.
// It takes the FileInfo slices from the layer and the base and returns a
// single view.
type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
var files = make(map[string]os.FileInfo)
for _, fi := range lofi {
files[fi.Name()] = fi
}
for _, fi := range bofi {
if _, exists := files[fi.Name()]; !exists {
files[fi.Name()] = fi
}
}
rfi := make([]os.FileInfo, len(files))
i := 0
for _, fi := range files {
rfi[i] = fi
i++
}
return rfi, nil
}
// Readdir will weave the two directories together and
// return a single view of the overlayed directories.
// At the end of the directory view, the error is io.EOF if c > 0.
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
var merge DirsMerger = f.Merger
if merge == nil {
merge = defaultUnionMergeDirsFn
}
if f.off == 0 {
var lfi []os.FileInfo
if f.Layer != nil {
lfi, err = f.Layer.Readdir(-1)
if err != nil {
return nil, err
}
}
var bfi []os.FileInfo
if f.Base != nil {
bfi, err = f.Base.Readdir(-1)
if err != nil {
return nil, err
}
}
merged, err := merge(lfi, bfi)
if err != nil {
return nil, err
}
f.files = append(f.files, merged...)
}
if c <= 0 && len(f.files) == 0 {
return f.files, nil
}
if f.off >= len(f.files) {
return nil, io.EOF
}
if c <= 0 {
return f.files[f.off:], nil
}
if c > len(f.files) {
c = len(f.files)
}
defer func() { f.off += c }()
return f.files[f.off:c], nil
}
func (f *UnionFile) Readdirnames(c int) ([]string, error) {
rfi, err := f.Readdir(c)
if err != nil {
return nil, err
}
var names []string
for _, fi := range rfi {
names = append(names, fi.Name())
}
return names, nil
}
func (f *UnionFile) Stat() (os.FileInfo, error) {
if f.Layer != nil {
return f.Layer.Stat()
}
if f.Base != nil {
return f.Base.Stat()
}
return nil, BADFD
}
func (f *UnionFile) Sync() (err error) {
if f.Layer != nil {
err = f.Layer.Sync()
if err == nil && f.Base != nil {
err = f.Base.Sync()
}
return err
}
if f.Base != nil {
return f.Base.Sync()
}
return BADFD
}
func (f *UnionFile) Truncate(s int64) (err error) {
if f.Layer != nil {
err = f.Layer.Truncate(s)
if err == nil && f.Base != nil {
err = f.Base.Truncate(s)
}
return err
}
if f.Base != nil {
return f.Base.Truncate(s)
}
return BADFD
}
func (f *UnionFile) WriteString(s string) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteString(s)
if err == nil && f.Base != nil {
_, err = f.Base.WriteString(s)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteString(s)
}
return 0, BADFD
}
func copyToLayer(base Fs, layer Fs, name string) error {
bfh, err := base.Open(name)
if err != nil {
return err
}
defer bfh.Close()
// First make sure the directory exists
exists, err := Exists(layer, filepath.Dir(name))
if err != nil {
return err
}
if !exists {
err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return err
}
}
// Create the file on the overlay
lfh, err := layer.Create(name)
if err != nil {
return err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
layer.Remove(name)
lfh.Close()
return err
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
layer.Remove(name)
lfh.Close()
return syscall.EIO
}
err = lfh.Close()
if err != nil {
layer.Remove(name)
lfh.Close()
return err
}
return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
}

330
vendor/github.com/spf13/afero/util.go generated vendored Normal file
View File

@@ -0,0 +1,330 @@
// Copyright ©2015 Steve Francia <spf@spf13.com>
// Portions Copyright ©2015 The Hugo Authors
// Portions Copyright 2016-present Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"unicode"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
// Filepath separator defined by os.Separator.
const FilePathSeparator = string(filepath.Separator)
// Takes a reader and a path and writes the content
func (a Afero) WriteReader(path string, r io.Reader) (err error) {
return WriteReader(a.Fs, path, r)
}
func WriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
if err != nil {
if err != os.ErrExist {
return err
}
}
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
// Same as WriteReader but checks to see if file/directory already exists.
func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
return SafeWriteReader(a.Fs, path, r)
}
func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
if err != nil {
return
}
}
exists, err := Exists(fs, path)
if err != nil {
return
}
if exists {
return fmt.Errorf("%v already exists", path)
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
func (a Afero) GetTempDir(subPath string) string {
return GetTempDir(a.Fs, subPath)
}
// GetTempDir returns the default temp directory with trailing slash
// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
func GetTempDir(fs Fs, subPath string) string {
addSlash := func(p string) string {
if FilePathSeparator != p[len(p)-1:] {
p = p + FilePathSeparator
}
return p
}
dir := addSlash(os.TempDir())
if subPath != "" {
// preserve windows backslash :-(
if FilePathSeparator == "\\" {
subPath = strings.Replace(subPath, "\\", "____", -1)
}
dir = dir + UnicodeSanitize((subPath))
if FilePathSeparator == "\\" {
dir = strings.Replace(dir, "____", "\\", -1)
}
if exists, _ := Exists(fs, dir); exists {
return addSlash(dir)
}
err := fs.MkdirAll(dir, 0777)
if err != nil {
panic(err)
}
dir = addSlash(dir)
}
return dir
}
// Rewrite string to remove non-standard path characters
func UnicodeSanitize(s string) string {
source := []rune(s)
target := make([]rune, 0, len(source))
for _, r := range source {
if unicode.IsLetter(r) ||
unicode.IsDigit(r) ||
unicode.IsMark(r) ||
r == '.' ||
r == '/' ||
r == '\\' ||
r == '_' ||
r == '-' ||
r == '%' ||
r == ' ' ||
r == '#' {
target = append(target, r)
}
}
return string(target)
}
// Transform characters with accents into plain forms.
func NeuterAccents(s string) string {
t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
result, _, _ := transform.String(t, string(s))
return result
}
func isMn(r rune) bool {
return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
}
func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
return FileContainsBytes(a.Fs, filename, subslice)
}
// Check if a file contains a specified byte slice.
func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslice), nil
}
func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
return FileContainsAnyBytes(a.Fs, filename, subslices)
}
// Check if a file contains any of the specified byte slices.
func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslices...), nil
}
// readerContains reports whether any of the subslices is within r.
func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
if r == nil || len(subslices) == 0 {
return false
}
largestSlice := 0
for _, sl := range subslices {
if len(sl) > largestSlice {
largestSlice = len(sl)
}
}
if largestSlice == 0 {
return false
}
bufflen := largestSlice * 4
halflen := bufflen / 2
buff := make([]byte, bufflen)
var err error
var n, i int
for {
i++
if i == 1 {
n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
} else {
if i != 2 {
// shift left to catch overlapping matches
copy(buff[:], buff[halflen:])
}
n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
}
if n > 0 {
for _, sl := range subslices {
if bytes.Contains(buff, sl) {
return true
}
}
}
if err != nil {
break
}
}
return false
}
func (a Afero) DirExists(path string) (bool, error) {
return DirExists(a.Fs, path)
}
// DirExists checks if a path exists and is a directory.
func DirExists(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err == nil && fi.IsDir() {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (a Afero) IsDir(path string) (bool, error) {
return IsDir(a.Fs, path)
}
// IsDir checks if a given path is a directory.
func IsDir(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
return fi.IsDir(), nil
}
func (a Afero) IsEmpty(path string) (bool, error) {
return IsEmpty(a.Fs, path)
}
// IsEmpty checks if a given file or directory is empty.
func IsEmpty(fs Fs, path string) (bool, error) {
if b, _ := Exists(fs, path); !b {
return false, fmt.Errorf("%q path does not exist", path)
}
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
if fi.IsDir() {
f, err := fs.Open(path)
if err != nil {
return false, err
}
defer f.Close()
list, err := f.Readdir(-1)
return len(list) == 0, nil
}
return fi.Size() == 0, nil
}
func (a Afero) Exists(path string) (bool, error) {
return Exists(a.Fs, path)
}
// Check if a file or directory exists.
func Exists(fs Fs, path string) (bool, error) {
_, err := fs.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
combinedPath := filepath.Join(basePathFs.path, relativePath)
if parent, ok := basePathFs.source.(*BasePathFs); ok {
return FullBaseFsPath(parent, combinedPath)
}
return combinedPath
}

17
vendor/modules.txt vendored
View File

@@ -142,6 +142,8 @@ github.com/go-redis/redis/internal/proto
github.com/go-redis/redis/internal/util github.com/go-redis/redis/internal/util
# github.com/go-sql-driver/mysql v1.4.1 # github.com/go-sql-driver/mysql v1.4.1
github.com/go-sql-driver/mysql github.com/go-sql-driver/mysql
# github.com/gobuffalo/flect v0.1.5
github.com/gobuffalo/flect
# github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6 # github.com/gocraft/dbr v0.0.0-20180507214907-a0fd650918f6
github.com/gocraft/dbr github.com/gocraft/dbr
github.com/gocraft/dbr/dialect github.com/gocraft/dbr/dialect
@@ -427,6 +429,9 @@ github.com/sergi/go-diff/diffmatchpatch
github.com/sony/sonyflake github.com/sony/sonyflake
# github.com/speps/go-hashids v2.0.0+incompatible # github.com/speps/go-hashids v2.0.0+incompatible
github.com/speps/go-hashids github.com/speps/go-hashids
# github.com/spf13/afero v1.2.2
github.com/spf13/afero
github.com/spf13/afero/mem
# github.com/spf13/cobra v0.0.3 # github.com/spf13/cobra v0.0.3
github.com/spf13/cobra github.com/spf13/cobra
# github.com/spf13/pflag v1.0.3 # github.com/spf13/pflag v1.0.3
@@ -509,8 +514,8 @@ golang.org/x/sys/cpu
golang.org/x/text/secure/bidirule golang.org/x/text/secure/bidirule
golang.org/x/text/unicode/bidi golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm golang.org/x/text/unicode/norm
golang.org/x/text/width
golang.org/x/text/transform golang.org/x/text/transform
golang.org/x/text/width
golang.org/x/text/encoding golang.org/x/text/encoding
golang.org/x/text/encoding/charmap golang.org/x/text/encoding/charmap
golang.org/x/text/encoding/htmlindex golang.org/x/text/encoding/htmlindex
@@ -1100,6 +1105,16 @@ sigs.k8s.io/controller-runtime/pkg/webhook/types
sigs.k8s.io/controller-runtime/pkg/client/config sigs.k8s.io/controller-runtime/pkg/client/config
sigs.k8s.io/controller-runtime/pkg/envtest/printer sigs.k8s.io/controller-runtime/pkg/envtest/printer
sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics
# sigs.k8s.io/controller-tools v0.1.12
sigs.k8s.io/controller-tools/cmd/controller-gen
sigs.k8s.io/controller-tools/pkg/crd/generator
sigs.k8s.io/controller-tools/pkg/rbac
sigs.k8s.io/controller-tools/pkg/webhook
sigs.k8s.io/controller-tools/pkg/crd/util
sigs.k8s.io/controller-tools/pkg/internal/codegen
sigs.k8s.io/controller-tools/pkg/internal/codegen/parse
sigs.k8s.io/controller-tools/pkg/util
sigs.k8s.io/controller-tools/pkg/internal/general
# sigs.k8s.io/testing_frameworks v0.1.1 # sigs.k8s.io/testing_frameworks v0.1.1
sigs.k8s.io/testing_frameworks/integration sigs.k8s.io/testing_frameworks/integration
sigs.k8s.io/testing_frameworks/integration/addr sigs.k8s.io/testing_frameworks/integration/addr

201
vendor/sigs.k8s.io/controller-tools/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,210 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"log"
"os"
"path/filepath"
"github.com/spf13/cobra"
crdgenerator "sigs.k8s.io/controller-tools/pkg/crd/generator"
"sigs.k8s.io/controller-tools/pkg/rbac"
"sigs.k8s.io/controller-tools/pkg/webhook"
)
func main() {
rootCmd := &cobra.Command{
Use: "controller-gen",
Short: "A reference implementation generation tool for Kubernetes APIs.",
Long: `A reference implementation generation tool for Kubernetes APIs.`,
Example: ` # Generate RBAC manifests for a project
controller-gen rbac
# Generate CRD manifests for a project
controller-gen crd
# Run all the generators for a given project
controller-gen all
`,
}
rootCmd.AddCommand(
newRBACCmd(),
newCRDCmd(),
newWebhookCmd(),
newAllSubCmd(),
)
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func newRBACCmd() *cobra.Command {
o := &rbac.ManifestOptions{}
o.SetDefaults()
cmd := &cobra.Command{
Use: "rbac",
Short: "Generates RBAC manifests",
Long: `Generate RBAC manifests from the RBAC annotations in Go source files.
Usage:
# controller-gen rbac [--name manager] [--input-dir input_dir] [--output-dir output_dir]
`,
Run: func(_ *cobra.Command, _ []string) {
if err := rbac.Generate(o); err != nil {
log.Fatal(err)
}
fmt.Printf("RBAC manifests generated under '%s' directory\n", o.OutputDir)
},
}
f := cmd.Flags()
f.StringVar(&o.Name, "name", o.Name, "name to be used as prefix in identifier for manifests")
f.StringVar(&o.ServiceAccount, "service-account", o.ServiceAccount, "service account to bind the role to")
f.StringVar(&o.Namespace, "service-account-namespace", o.Namespace, "namespace of the service account to bind the role to")
f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files")
f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved")
f.StringVar(&o.RoleFile, "role-file", o.RoleFile, "output file for the role manifest")
f.StringVar(&o.BindingFile, "binding-file", o.BindingFile, "output file for the role binding manifest")
return cmd
}
func newCRDCmd() *cobra.Command {
g := &crdgenerator.Generator{}
cmd := &cobra.Command{
Use: "crd",
Short: "Generates CRD manifests",
Long: `Generate CRD manifests from the Type definitions in Go source files.
Usage:
# controller-gen crd [--domain k8s.io] [--root-path input_dir] [--output-dir output_dir]
`,
Run: func(_ *cobra.Command, _ []string) {
if err := g.ValidateAndInitFields(); err != nil {
log.Fatal(err)
}
if err := g.Do(); err != nil {
log.Fatal(err)
}
fmt.Printf("CRD files generated, files can be found under path %s.\n", g.OutputDir)
},
}
f := cmd.Flags()
f.StringVar(&g.RootPath, "root-path", "", "working dir, must have PROJECT file under the path or parent path if domain not set")
f.StringVar(&g.OutputDir, "output-dir", "", "output directory, default to 'config/crds' under root path")
f.StringVar(&g.Domain, "domain", "", "domain of the resources, will try to fetch it from PROJECT file if not specified")
f.StringVar(&g.Namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set")
f.BoolVar(&g.SkipMapValidation, "skip-map-validation", true, "if set to true, skip generating OpenAPI validation schema for map type in CRD.")
f.StringVar(&g.APIsPath, "apis-path", "pkg/apis", "the path to search for apis relative to the current directory")
f.StringVar(&g.APIsPkg, "apis-pkg", "", "the absolute Go pkg name for current project's api pkg.")
return cmd
}
func newAllSubCmd() *cobra.Command {
var (
projectDir, namespace string
)
cmd := &cobra.Command{
Use: "all",
Short: "runs all generators for a project",
Long: `Run all available generators for a given project
Usage:
# controller-gen all
`,
Run: func(_ *cobra.Command, _ []string) {
if projectDir == "" {
currDir, err := os.Getwd()
if err != nil {
log.Fatalf("project-dir missing, failed to use current directory: %v", err)
}
projectDir = currDir
}
crdGen := &crdgenerator.Generator{
RootPath: projectDir,
OutputDir: filepath.Join(projectDir, "config", "crds"),
Namespace: namespace,
SkipMapValidation: true,
}
if err := crdGen.ValidateAndInitFields(); err != nil {
log.Fatal(err)
}
if err := crdGen.Do(); err != nil {
log.Fatal(err)
}
fmt.Printf("CRD manifests generated under '%s' \n", crdGen.OutputDir)
// RBAC generation
rbacOptions := &rbac.ManifestOptions{}
rbacOptions.SetDefaults()
if err := rbac.Generate(rbacOptions); err != nil {
log.Fatal(err)
}
fmt.Printf("RBAC manifests generated under '%s' \n", rbacOptions.OutputDir)
o := &webhook.Options{
WriterOptions: webhook.WriterOptions{
InputDir: filepath.Join(projectDir, "pkg"),
OutputDir: filepath.Join(projectDir, "config", "webhook"),
PatchOutputDir: filepath.Join(projectDir, "config", "default"),
},
}
o.SetDefaults()
if err := webhook.Generate(o); err != nil {
log.Fatal(err)
}
fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir)
},
}
f := cmd.Flags()
f.StringVar(&projectDir, "project-dir", "", "project directory, it must have PROJECT file")
f.StringVar(&namespace, "namespace", "", "CRD namespace, treat it as cluster scoped if not set")
return cmd
}
func newWebhookCmd() *cobra.Command {
o := &webhook.Options{}
o.SetDefaults()
cmd := &cobra.Command{
Use: "webhook",
Short: "Generates webhook related manifests",
Long: `Generate webhook related manifests from the webhook annotations in Go source files.
Usage:
# controller-gen webhook [--input-dir input_dir] [--output-dir output_dir] [--patch-output-dir patch-output_dir]
`,
Run: func(_ *cobra.Command, _ []string) {
if err := webhook.Generate(o); err != nil {
log.Fatal(err)
}
fmt.Printf("webhook manifests generated under '%s' directory\n", o.OutputDir)
},
}
f := cmd.Flags()
f.StringVar(&o.InputDir, "input-dir", o.InputDir, "input directory pointing to Go source files")
f.StringVar(&o.OutputDir, "output-dir", o.OutputDir, "output directory where generated manifests will be saved.")
f.StringVar(&o.PatchOutputDir, "patch-output-dir", o.PatchOutputDir, "output directory where generated kustomize patch will be saved.")
return cmd
}

View File

@@ -0,0 +1,207 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generator
import (
"fmt"
"log"
"os"
"path"
"strings"
"github.com/ghodss/yaml"
"github.com/spf13/afero"
extensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/gengo/args"
"k8s.io/gengo/types"
crdutil "sigs.k8s.io/controller-tools/pkg/crd/util"
"sigs.k8s.io/controller-tools/pkg/internal/codegen"
"sigs.k8s.io/controller-tools/pkg/internal/codegen/parse"
"sigs.k8s.io/controller-tools/pkg/util"
)
// Generator generates CRD manifests from API resource definitions defined in Go source files.
type Generator struct {
RootPath string
OutputDir string
Domain string
Namespace string
SkipMapValidation bool
// OutFs is filesystem to be used for writing out the result
OutFs afero.Fs
// apisPkg is the absolute Go pkg name for current project's 'pkg/apis' pkg.
// This is needed to determine if a Type belongs to the project or it is a referred Type.
apisPkg string
// APIsPath and APIsPkg allow customized generation for Go types existing under directories other than pkg/apis
APIsPath string
APIsPkg string
}
// ValidateAndInitFields validate and init generator fields.
func (c *Generator) ValidateAndInitFields() error {
var err error
if c.OutFs == nil {
c.OutFs = afero.NewOsFs()
}
if len(c.RootPath) == 0 {
// Take current path as root path if not specified.
c.RootPath, err = os.Getwd()
if err != nil {
return err
}
}
// Validate root path is under go src path
if !crdutil.IsUnderGoSrcPath(c.RootPath) {
return fmt.Errorf("command must be run from path under $GOPATH/src/<package>")
}
// If Domain is not explicitly specified,
// try to search for PROJECT file as a basis.
if len(c.Domain) == 0 {
if !crdutil.PathHasProjectFile(c.RootPath) {
return fmt.Errorf("PROJECT file missing in dir %s", c.RootPath)
}
c.Domain = crdutil.GetDomainFromProject(c.RootPath)
}
err = c.setAPIsPkg()
if err != nil {
return err
}
// Init output directory
if c.OutputDir == "" {
c.OutputDir = path.Join(c.RootPath, "config/crds")
}
return nil
}
// Do manages CRD generation.
func (c *Generator) Do() error {
arguments := args.Default()
b, err := arguments.NewBuilder()
if err != nil {
return fmt.Errorf("failed making a parser: %v", err)
}
// Switch working directory to root path.
if err := os.Chdir(c.RootPath); err != nil {
return fmt.Errorf("failed switching working dir: %v", err)
}
if err := b.AddDirRecursive("./" + c.APIsPath); err != nil {
return fmt.Errorf("failed making a parser: %v", err)
}
ctx, err := parse.NewContext(b)
if err != nil {
return fmt.Errorf("failed making a context: %v", err)
}
arguments.CustomArgs = &parse.Options{SkipMapValidation: c.SkipMapValidation}
// TODO: find an elegant way to fulfill the domain in APIs.
p := parse.NewAPIs(ctx, arguments, c.Domain, c.apisPkg)
crds := c.getCrds(p)
return c.writeCRDs(crds)
}
func (c *Generator) writeCRDs(crds map[string][]byte) error {
// Ensure output dir exists.
if err := c.OutFs.MkdirAll(c.OutputDir, os.FileMode(0700)); err != nil {
return err
}
for file, crd := range crds {
outFile := path.Join(c.OutputDir, file)
if err := (&util.FileWriter{Fs: c.OutFs}).WriteFile(outFile, crd); err != nil {
return err
}
}
return nil
}
func getCRDFileName(resource *codegen.APIResource) string {
elems := []string{resource.Group, resource.Version, strings.ToLower(resource.Kind)}
return strings.Join(elems, "_") + ".yaml"
}
func (c *Generator) getCrds(p *parse.APIs) map[string][]byte {
crds := map[string]extensionsv1beta1.CustomResourceDefinition{}
for _, g := range p.APIs.Groups {
for _, v := range g.Versions {
for _, r := range v.Resources {
crd := r.CRD
// ignore types which do not belong to this project
if !c.belongsToAPIsPkg(r.Type) {
continue
}
if len(c.Namespace) > 0 {
crd.Namespace = c.Namespace
}
fileName := getCRDFileName(r)
crds[fileName] = crd
}
}
}
result := map[string][]byte{}
for file, crd := range crds {
b, err := yaml.Marshal(crd)
if err != nil {
log.Fatalf("Error: %v", err)
}
result[file] = b
}
return result
}
// belongsToAPIsPkg returns true if type t is defined under pkg/apis pkg of
// current project.
func (c *Generator) belongsToAPIsPkg(t *types.Type) bool {
return strings.HasPrefix(t.Name.Package, c.apisPkg)
}
func (c *Generator) setAPIsPkg() error {
var err error
if c.APIsPath == "" {
c.APIsPath = "pkg/apis"
}
c.apisPkg = c.APIsPkg
if c.apisPkg == "" {
// Validate apis directory exists under working path
apisPath := path.Join(c.RootPath, c.APIsPath)
if _, err := os.Stat(apisPath); err != nil {
return fmt.Errorf("error validating apis path %s: %v", apisPath, err)
}
c.apisPkg, err = crdutil.DirToGoPkg(apisPath)
if err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,117 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bufio"
"fmt"
gobuild "go/build"
"log"
"os"
"path"
"path/filepath"
"strings"
)
// IsGoSrcPath validate if given path is of path $GOPATH/src.
func IsGoSrcPath(filePath string) bool {
for _, gopath := range getGoPaths() {
goSrc := path.Join(gopath, "src")
if filePath == goSrc {
return true
}
}
return false
}
// IsUnderGoSrcPath validate if given path is under path $GOPATH/src.
func IsUnderGoSrcPath(filePath string) bool {
for _, gopath := range getGoPaths() {
goSrc := path.Join(gopath, "src")
if strings.HasPrefix(filepath.Dir(filePath), goSrc) {
return true
}
}
return false
}
// DirToGoPkg returns the Gopkg for the given directory if it exists
// under a GOPATH otherwise returns error. For example,
// /Users/x/go/src/github.com/y/z ==> github.com/y/z
func DirToGoPkg(dir string) (pkg string, err error) {
goPaths := getGoPaths()
for _, gopath := range goPaths {
goSrc := path.Join(gopath, "src")
if !strings.HasPrefix(dir, goSrc) {
continue
}
pkg, err := filepath.Rel(goSrc, dir)
if err == nil {
return pkg, err
}
}
return "", fmt.Errorf("dir '%s' does not exist under any GOPATH %v", dir, goPaths)
}
func getGoPaths() []string {
gopaths := os.Getenv("GOPATH")
if len(gopaths) == 0 {
gopaths = gobuild.Default.GOPATH
}
return filepath.SplitList(gopaths)
}
// PathHasProjectFile validate if PROJECT file exists under the path.
func PathHasProjectFile(filePath string) bool {
if _, err := os.Stat(path.Join(filePath, "PROJECT")); os.IsNotExist(err) {
return false
}
return true
}
// GetDomainFromProject get domain information from the PROJECT file under the path.
func GetDomainFromProject(rootPath string) string {
var domain string
file, err := os.Open(path.Join(rootPath, "PROJECT"))
if err != nil {
log.Fatal(err)
}
defer func() {
if err := file.Close(); err != nil {
log.Fatal(err)
}
}()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "domain:") {
domainInfo := strings.Split(scanner.Text(), ":")
if len(domainInfo) != 2 {
log.Fatalf("Unexpected domain info: %s", scanner.Text())
}
domain = strings.Replace(domainInfo[1], " ", "", -1)
break
}
}
return domain
}

View File

@@ -0,0 +1,287 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parse
import (
"fmt"
"path"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/gengo/types"
"sigs.k8s.io/controller-tools/pkg/internal/codegen"
)
type genUnversionedType struct {
Type *types.Type
Resource *codegen.APIResource
}
func (b *APIs) parseAPIs() {
apis := &codegen.APIs{
Domain: b.Domain,
Package: b.APIsPkg,
Groups: map[string]*codegen.APIGroup{},
Rules: b.Rules,
Informers: b.Informers,
}
for group, versionMap := range b.ByGroupVersionKind {
apiGroup := &codegen.APIGroup{
Group: group,
GroupTitle: strings.Title(group),
Domain: b.Domain,
Versions: map[string]*codegen.APIVersion{},
UnversionedResources: map[string]*codegen.APIResource{},
}
for version, kindMap := range versionMap {
apiVersion := &codegen.APIVersion{
Domain: b.Domain,
Group: group,
Version: version,
Resources: map[string]*codegen.APIResource{},
}
for kind, resource := range kindMap {
apiResource := &codegen.APIResource{
Domain: resource.Domain,
Version: resource.Version,
Group: resource.Group,
Resource: resource.Resource,
Type: resource.Type,
REST: resource.REST,
Kind: resource.Kind,
Subresources: resource.Subresources,
StatusStrategy: resource.StatusStrategy,
Strategy: resource.Strategy,
NonNamespaced: resource.NonNamespaced,
ShortName: resource.ShortName,
}
parseDoc(resource, apiResource)
apiVersion.Resources[kind] = apiResource
// Set the package for the api version
apiVersion.Pkg = b.context.Universe[resource.Type.Name.Package]
// Set the package for the api group
apiGroup.Pkg = b.context.Universe[filepath.Dir(resource.Type.Name.Package)]
if apiGroup.Pkg != nil {
apiGroup.PkgPath = apiGroup.Pkg.Path
}
apiGroup.UnversionedResources[kind] = apiResource
}
apiGroup.Versions[version] = apiVersion
}
b.parseStructs(apiGroup)
apis.Groups[group] = apiGroup
}
apis.Pkg = b.context.Universe[b.APIsPkg]
b.APIs = apis
}
func (b *APIs) parseStructs(apigroup *codegen.APIGroup) {
remaining := []genUnversionedType{}
for _, version := range apigroup.Versions {
for _, resource := range version.Resources {
remaining = append(remaining, genUnversionedType{resource.Type, resource})
}
}
for _, version := range b.SubByGroupVersionKind[apigroup.Group] {
for _, kind := range version {
remaining = append(remaining, genUnversionedType{kind, nil})
}
}
done := sets.String{}
for len(remaining) > 0 {
// Pop the next element from the list
next := remaining[0]
remaining[0] = remaining[len(remaining)-1]
remaining = remaining[:len(remaining)-1]
// Already processed this type. Skip it
if done.Has(next.Type.Name.Name) {
continue
}
done.Insert(next.Type.Name.Name)
// Generate the struct and append to the list
result, additionalTypes := parseType(next.Type)
// This is a resource, so generate the client
if b.genClient(next.Type) {
result.GenClient = true
result.GenDeepCopy = true
}
if next.Resource != nil {
result.NonNamespaced = IsNonNamespaced(next.Type)
}
if b.genDeepCopy(next.Type) {
result.GenDeepCopy = true
}
apigroup.Structs = append(apigroup.Structs, result)
// Add the newly discovered subtypes
for _, at := range additionalTypes {
remaining = append(remaining, genUnversionedType{at, nil})
}
}
}
// parseType parses the type into a Struct, and returns a list of types that
// need to be parsed
func parseType(t *types.Type) (*codegen.Struct, []*types.Type) {
remaining := []*types.Type{}
s := &codegen.Struct{
Name: t.Name.Name,
GenClient: false,
GenUnversioned: true, // Generate unversioned structs by default
}
for _, c := range t.CommentLines {
if strings.Contains(c, "+genregister:unversioned=false") {
// Don't generate the unversioned struct
s.GenUnversioned = false
}
}
for _, member := range t.Members {
uType := member.Type.Name.Name
memberName := member.Name
uImport := ""
// Use the element type for Pointers, Maps and Slices
mSubType := member.Type
hasElem := false
for mSubType.Elem != nil {
mSubType = mSubType.Elem
hasElem = true
}
if hasElem {
// Strip the package from the field type
uType = strings.Replace(member.Type.String(), mSubType.Name.Package+".", "", 1)
}
base := filepath.Base(member.Type.String())
samepkg := t.Name.Package == mSubType.Name.Package
// If not in the same package, calculate the import pkg
if !samepkg {
parts := strings.Split(base, ".")
if len(parts) > 1 {
// Don't generate unversioned types for core types, just use the versioned types
if strings.HasPrefix(mSubType.Name.Package, "k8s.io/api/") {
// Import the package under an alias so it doesn't conflict with other groups
// having the same version
importAlias := path.Base(path.Dir(mSubType.Name.Package)) + path.Base(mSubType.Name.Package)
uImport = fmt.Sprintf("%s \"%s\"", importAlias, mSubType.Name.Package)
if hasElem {
// Replace the full package with the alias when referring to the type
uType = strings.Replace(member.Type.String(), mSubType.Name.Package, importAlias, 1)
} else {
// Replace the full package with the alias when referring to the type
uType = fmt.Sprintf("%s.%s", importAlias, parts[1])
}
} else {
switch member.Type.Name.Package {
case "k8s.io/apimachinery/pkg/apis/meta/v1":
// Use versioned types for meta/v1
uImport = fmt.Sprintf("%s \"%s\"", "metav1", "k8s.io/apimachinery/pkg/apis/meta/v1")
uType = "metav1." + parts[1]
default:
// Use unversioned types for everything else
t := member.Type
if t.Elem != nil {
// handle Pointers, Maps, Slices
// We need to parse the package from the Type String
t = t.Elem
str := member.Type.String()
startPkg := strings.LastIndexAny(str, "*]")
endPkg := strings.LastIndexAny(str, ".")
pkg := str[startPkg+1 : endPkg]
name := str[endPkg+1:]
prefix := str[:startPkg+1]
uImportBase := path.Base(pkg)
uImportName := path.Base(path.Dir(pkg)) + uImportBase
uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg)
uType = prefix + uImportName + "." + name
} else {
// handle non- Pointer, Maps, Slices
pkg := t.Name.Package
name := t.Name.Name
// Come up with the alias the package is imported under
// Concatenate with directory package to reduce naming collisions
uImportBase := path.Base(pkg)
uImportName := path.Base(path.Dir(pkg)) + uImportBase
// Create the import statement
uImport = fmt.Sprintf("%s \"%s\"", uImportName, pkg)
// Create the field type name - should be <pkgalias>.<TypeName>
uType = uImportName + "." + name
}
}
}
}
}
if member.Embedded {
memberName = ""
}
s.Fields = append(s.Fields, &codegen.Field{
Name: memberName,
VersionedPackage: member.Type.Name.Package,
UnversionedImport: uImport,
UnversionedType: uType,
})
// Add this member Type for processing if it isn't a primitive and
// is part of the same API group
if !mSubType.IsPrimitive() && GetGroup(mSubType) == GetGroup(t) {
remaining = append(remaining, mSubType)
}
}
return s, remaining
}
func (b *APIs) genClient(c *types.Type) bool {
comments := Comments(c.CommentLines)
resource := comments.getTag("resource", ":") + comments.getTag("kubebuilder:resource", ":")
return len(resource) > 0
}
func (b *APIs) genDeepCopy(c *types.Type) bool {
comments := Comments(c.CommentLines)
return comments.hasTag("subresource-request")
}
func parseDoc(resource, apiResource *codegen.APIResource) {
if HasDocAnnotation(resource.Type) {
resource.DocAnnotation = getDocAnnotation(resource.Type, "warning", "note")
apiResource.DocAnnotation = resource.DocAnnotation
}
}

View File

@@ -0,0 +1,42 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parse
import (
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/parser"
)
// NewContext returns a new Context from the builder
func NewContext(p *parser.Builder) (*generator.Context, error) {
return generator.NewContext(p, NameSystems(), DefaultNameSystem())
}
// DefaultNameSystem returns public by default.
func DefaultNameSystem() string {
return "public"
}
// NameSystems returns the name system used by the generators in this package.
// e.g. black-magic
func NameSystems() namer.NameSystems {
return namer.NameSystems{
"public": namer.NewPublicNamer(1),
"raw": namer.NewRawNamer("", nil),
}
}

View File

@@ -0,0 +1,639 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parse
import (
"bytes"
"encoding/json"
"fmt"
"log"
"regexp"
"strconv"
"strings"
"text/template"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/gengo/types"
)
// parseCRDs populates the CRD field of each Group.Version.Resource,
// creating validations using the annotations on type fields.
func (b *APIs) parseCRDs() {
for _, group := range b.APIs.Groups {
for _, version := range group.Versions {
for _, resource := range version.Resources {
if IsAPIResource(resource.Type) {
resource.JSONSchemaProps, resource.Validation =
b.typeToJSONSchemaProps(resource.Type, sets.NewString(), []string{}, true)
// Note: Drop the Type field at the root level of validation
// schema. Refer to following issue for details.
// https://github.com/kubernetes/kubernetes/issues/65293
resource.JSONSchemaProps.Type = ""
j, err := json.MarshalIndent(resource.JSONSchemaProps, "", " ")
if err != nil {
log.Fatalf("Could not Marshall validation %v\n", err)
}
resource.ValidationComments = string(j)
resource.CRD = v1beta1.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s.%s.%s", resource.Resource, resource.Group, resource.Domain),
Labels: map[string]string{"controller-tools.k8s.io": "1.0"},
},
Spec: v1beta1.CustomResourceDefinitionSpec{
Group: fmt.Sprintf("%s.%s", resource.Group, resource.Domain),
Version: resource.Version,
Names: v1beta1.CustomResourceDefinitionNames{
Kind: resource.Kind,
Plural: resource.Resource,
},
Validation: &v1beta1.CustomResourceValidation{
OpenAPIV3Schema: &resource.JSONSchemaProps,
},
},
}
if resource.NonNamespaced {
resource.CRD.Spec.Scope = "Cluster"
} else {
resource.CRD.Spec.Scope = "Namespaced"
}
if hasCategories(resource.Type) {
categoriesTag := getCategoriesTag(resource.Type)
categories := strings.Split(categoriesTag, ",")
resource.CRD.Spec.Names.Categories = categories
resource.Categories = categories
}
if hasSingular(resource.Type) {
singularName := getSingularName(resource.Type)
resource.CRD.Spec.Names.Singular = singularName
}
if hasStatusSubresource(resource.Type) {
if resource.CRD.Spec.Subresources == nil {
resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{}
}
resource.CRD.Spec.Subresources.Status = &v1beta1.CustomResourceSubresourceStatus{}
}
resource.CRD.Status.Conditions = []v1beta1.CustomResourceDefinitionCondition{}
resource.CRD.Status.StoredVersions = []string{}
if hasScaleSubresource(resource.Type) {
if resource.CRD.Spec.Subresources == nil {
resource.CRD.Spec.Subresources = &v1beta1.CustomResourceSubresources{}
}
jsonPath, err := parseScaleParams(resource.Type)
if err != nil {
log.Fatalf("failed in parsing CRD, error: %v", err.Error())
}
resource.CRD.Spec.Subresources.Scale = &v1beta1.CustomResourceSubresourceScale{
SpecReplicasPath: jsonPath[specReplicasPath],
StatusReplicasPath: jsonPath[statusReplicasPath],
}
labelSelctor, ok := jsonPath[labelSelectorPath]
if ok && labelSelctor != "" {
resource.CRD.Spec.Subresources.Scale.LabelSelectorPath = &labelSelctor
}
}
if hasPrintColumn(resource.Type) {
result, err := parsePrintColumnParams(resource.Type)
if err != nil {
log.Fatalf("failed to parse printcolumn annotations, error: %v", err.Error())
}
resource.CRD.Spec.AdditionalPrinterColumns = result
}
if len(resource.ShortName) > 0 {
resource.CRD.Spec.Names.ShortNames = strings.Split(resource.ShortName, ";")
}
}
}
}
}
}
func (b *APIs) getTime() string {
return `v1beta1.JSONSchemaProps{
Type: "string",
Format: "date-time",
}`
}
func (b *APIs) getDuration() string {
return `v1beta1.JSONSchemaProps{
Type: "string",
}`
}
func (b *APIs) getQuantity() string {
return `v1beta1.JSONSchemaProps{
Type: "string",
}`
}
func (b *APIs) objSchema() string {
return `v1beta1.JSONSchemaProps{
Type: "object",
}`
}
// typeToJSONSchemaProps returns a JSONSchemaProps object and its serialization
// in Go that describe the JSONSchema validations for the given type.
func (b *APIs) typeToJSONSchemaProps(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) {
// Special cases
time := types.Name{Name: "Time", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"}
duration := types.Name{Name: "Duration", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"}
quantity := types.Name{Name: "Quantity", Package: "k8s.io/apimachinery/pkg/api/resource"}
meta := types.Name{Name: "ObjectMeta", Package: "k8s.io/apimachinery/pkg/apis/meta/v1"}
unstructured := types.Name{Name: "Unstructured", Package: "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"}
rawExtension := types.Name{Name: "RawExtension", Package: "k8s.io/apimachinery/pkg/runtime"}
intOrString := types.Name{Name: "IntOrString", Package: "k8s.io/apimachinery/pkg/util/intstr"}
// special types first
specialTypeProps := v1beta1.JSONSchemaProps{
Description: parseDescription(comments),
}
for _, l := range comments {
getValidation(l, &specialTypeProps)
}
switch t.Name {
case time:
specialTypeProps.Type = "string"
specialTypeProps.Format = "date-time"
return specialTypeProps, b.getTime()
case duration:
specialTypeProps.Type = "string"
return specialTypeProps, b.getDuration()
case quantity:
specialTypeProps.Type = "string"
return specialTypeProps, b.getQuantity()
case meta, unstructured, rawExtension:
specialTypeProps.Type = "object"
return specialTypeProps, b.objSchema()
case intOrString:
specialTypeProps.AnyOf = []v1beta1.JSONSchemaProps{
{
Type: "string",
},
{
Type: "integer",
},
}
return specialTypeProps, b.objSchema()
}
var v v1beta1.JSONSchemaProps
var s string
switch t.Kind {
case types.Builtin:
v, s = b.parsePrimitiveValidation(t, found, comments)
case types.Struct:
v, s = b.parseObjectValidation(t, found, comments, isRoot)
case types.Map:
v, s = b.parseMapValidation(t, found, comments)
case types.Slice:
v, s = b.parseArrayValidation(t, found, comments)
case types.Array:
v, s = b.parseArrayValidation(t, found, comments)
case types.Pointer:
v, s = b.typeToJSONSchemaProps(t.Elem, found, comments, false)
case types.Alias:
v, s = b.typeToJSONSchemaProps(t.Underlying, found, comments, false)
default:
log.Fatalf("Unknown supported Kind %v\n", t.Kind)
}
return v, s
}
var jsonRegex = regexp.MustCompile("json:\"([a-zA-Z0-9,]+)\"")
type primitiveTemplateArgs struct {
v1beta1.JSONSchemaProps
Value string
Format string
EnumValue string // TODO check type of enum value to match the type of field
Description string
}
var primitiveTemplate = template.Must(template.New("map-template").Parse(
`v1beta1.JSONSchemaProps{
{{ if .Pattern -}}
Pattern: "{{ .Pattern }}",
{{ end -}}
{{ if .Maximum -}}
Maximum: getFloat({{ .Maximum }}),
{{ end -}}
{{ if .ExclusiveMaximum -}}
ExclusiveMaximum: {{ .ExclusiveMaximum }},
{{ end -}}
{{ if .Minimum -}}
Minimum: getFloat({{ .Minimum }}),
{{ end -}}
{{ if .ExclusiveMinimum -}}
ExclusiveMinimum: {{ .ExclusiveMinimum }},
{{ end -}}
Type: "{{ .Value }}",
{{ if .Format -}}
Format: "{{ .Format }}",
{{ end -}}
{{ if .EnumValue -}}
Enum: {{ .EnumValue }},
{{ end -}}
{{ if .MaxLength -}}
MaxLength: getInt({{ .MaxLength }}),
{{ end -}}
{{ if .MinLength -}}
MinLength: getInt({{ .MinLength }}),
{{ end -}}
}`))
// parsePrimitiveValidation returns a JSONSchemaProps object and its
// serialization in Go that describe the validations for the given primitive
// type.
func (b *APIs) parsePrimitiveValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) {
props := v1beta1.JSONSchemaProps{Type: string(t.Name.Name)}
for _, l := range comments {
getValidation(l, &props)
}
buff := &bytes.Buffer{}
var n, f, s, d string
switch t.Name.Name {
case "int", "int64", "uint64":
n = "integer"
f = "int64"
case "int32", "uint32":
n = "integer"
f = "int32"
case "float", "float32":
n = "number"
f = "float"
case "float64":
n = "number"
f = "double"
case "bool":
n = "boolean"
case "string":
n = "string"
f = props.Format
default:
n = t.Name.Name
}
if props.Enum != nil {
s = parseEnumToString(props.Enum)
}
d = parseDescription(comments)
if err := primitiveTemplate.Execute(buff, primitiveTemplateArgs{props, n, f, s, d}); err != nil {
log.Fatalf("%v", err)
}
props.Type = n
props.Format = f
props.Description = d
return props, buff.String()
}
type mapTempateArgs struct {
Result string
SkipMapValidation bool
}
var mapTemplate = template.Must(template.New("map-template").Parse(
`v1beta1.JSONSchemaProps{
Type: "object",
{{if not .SkipMapValidation}}AdditionalProperties: &v1beta1.JSONSchemaPropsOrBool{
Allows: true,
Schema: &{{.Result}},
},{{end}}
}`))
// parseMapValidation returns a JSONSchemaProps object and its serialization in
// Go that describe the validations for the given map type.
func (b *APIs) parseMapValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) {
additionalProps, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false)
additionalProps.Description = ""
props := v1beta1.JSONSchemaProps{
Type: "object",
Description: parseDescription(comments),
}
parseOption := b.arguments.CustomArgs.(*Options)
if !parseOption.SkipMapValidation {
props.AdditionalProperties = &v1beta1.JSONSchemaPropsOrBool{
Allows: true,
Schema: &additionalProps}
}
for _, l := range comments {
getValidation(l, &props)
}
buff := &bytes.Buffer{}
if err := mapTemplate.Execute(buff, mapTempateArgs{Result: result, SkipMapValidation: parseOption.SkipMapValidation}); err != nil {
log.Fatalf("%v", err)
}
return props, buff.String()
}
var arrayTemplate = template.Must(template.New("array-template").Parse(
`v1beta1.JSONSchemaProps{
Type: "{{.Type}}",
{{ if .Format -}}
Format: "{{.Format}}",
{{ end -}}
{{ if .MaxItems -}}
MaxItems: getInt({{ .MaxItems }}),
{{ end -}}
{{ if .MinItems -}}
MinItems: getInt({{ .MinItems }}),
{{ end -}}
{{ if .UniqueItems -}}
UniqueItems: {{ .UniqueItems }},
{{ end -}}
{{ if .Items -}}
Items: &v1beta1.JSONSchemaPropsOrArray{
Schema: &{{.ItemsSchema}},
},
{{ end -}}
}`))
type arrayTemplateArgs struct {
v1beta1.JSONSchemaProps
ItemsSchema string
}
// parseArrayValidation returns a JSONSchemaProps object and its serialization in
// Go that describe the validations for the given array type.
func (b *APIs) parseArrayValidation(t *types.Type, found sets.String, comments []string) (v1beta1.JSONSchemaProps, string) {
items, result := b.typeToJSONSchemaProps(t.Elem, found, comments, false)
items.Description = ""
props := v1beta1.JSONSchemaProps{
Type: "array",
Items: &v1beta1.JSONSchemaPropsOrArray{Schema: &items},
Description: parseDescription(comments),
}
// To represent byte arrays in the generated code, the property of the OpenAPI definition
// should have string as its type and byte as its format.
if t.Name.Name == "[]byte" {
props.Type = "string"
props.Format = "byte"
props.Items = nil
props.Description = parseDescription(comments)
}
for _, l := range comments {
getValidation(l, &props)
}
if t.Name.Name != "[]byte" {
// Except for the byte array special case above, the "format" property
// should be applied to the array items and not the array itself.
props.Format = ""
}
buff := &bytes.Buffer{}
if err := arrayTemplate.Execute(buff, arrayTemplateArgs{props, result}); err != nil {
log.Fatalf("%v", err)
}
return props, buff.String()
}
type objectTemplateArgs struct {
v1beta1.JSONSchemaProps
Fields map[string]string
Required []string
IsRoot bool
}
var objectTemplate = template.Must(template.New("object-template").Parse(
`v1beta1.JSONSchemaProps{
{{ if not .IsRoot -}}
Type: "object",
{{ end -}}
Properties: map[string]v1beta1.JSONSchemaProps{
{{ range $k, $v := .Fields -}}
"{{ $k }}": {{ $v }},
{{ end -}}
},
{{if .Required}}Required: []string{
{{ range $k, $v := .Required -}}
"{{ $v }}",
{{ end -}}
},{{ end -}}
}`))
// parseObjectValidation returns a JSONSchemaProps object and its serialization in
// Go that describe the validations for the given object type.
func (b *APIs) parseObjectValidation(t *types.Type, found sets.String, comments []string, isRoot bool) (v1beta1.JSONSchemaProps, string) {
buff := &bytes.Buffer{}
props := v1beta1.JSONSchemaProps{
Type: "object",
Description: parseDescription(comments),
}
for _, l := range comments {
getValidation(l, &props)
}
if strings.HasPrefix(t.Name.String(), "k8s.io/api") {
if err := objectTemplate.Execute(buff, objectTemplateArgs{props, nil, nil, false}); err != nil {
log.Fatalf("%v", err)
}
} else {
m, result, required := b.getMembers(t, found)
props.Properties = m
props.Required = required
if err := objectTemplate.Execute(buff, objectTemplateArgs{props, result, required, isRoot}); err != nil {
log.Fatalf("%v", err)
}
}
return props, buff.String()
}
// getValidation parses the validation tags from the comment and sets the
// validation rules on the given JSONSchemaProps.
func getValidation(comment string, props *v1beta1.JSONSchemaProps) {
comment = strings.TrimLeft(comment, " ")
if !strings.HasPrefix(comment, "+kubebuilder:validation:") {
return
}
c := strings.Replace(comment, "+kubebuilder:validation:", "", -1)
parts := strings.Split(c, "=")
if len(parts) != 2 {
log.Fatalf("Expected +kubebuilder:validation:<key>=<value> actual: %s", comment)
return
}
switch parts[0] {
case "Maximum":
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
log.Fatalf("Could not parse float from %s: %v", comment, err)
return
}
props.Maximum = &f
case "ExclusiveMaximum":
b, err := strconv.ParseBool(parts[1])
if err != nil {
log.Fatalf("Could not parse bool from %s: %v", comment, err)
return
}
props.ExclusiveMaximum = b
case "Minimum":
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
log.Fatalf("Could not parse float from %s: %v", comment, err)
return
}
props.Minimum = &f
case "ExclusiveMinimum":
b, err := strconv.ParseBool(parts[1])
if err != nil {
log.Fatalf("Could not parse bool from %s: %v", comment, err)
return
}
props.ExclusiveMinimum = b
case "MaxLength":
i, err := strconv.Atoi(parts[1])
v := int64(i)
if err != nil {
log.Fatalf("Could not parse int from %s: %v", comment, err)
return
}
props.MaxLength = &v
case "MinLength":
i, err := strconv.Atoi(parts[1])
v := int64(i)
if err != nil {
log.Fatalf("Could not parse int from %s: %v", comment, err)
return
}
props.MinLength = &v
case "Pattern":
props.Pattern = parts[1]
case "MaxItems":
if props.Type == "array" {
i, err := strconv.Atoi(parts[1])
v := int64(i)
if err != nil {
log.Fatalf("Could not parse int from %s: %v", comment, err)
return
}
props.MaxItems = &v
}
case "MinItems":
if props.Type == "array" {
i, err := strconv.Atoi(parts[1])
v := int64(i)
if err != nil {
log.Fatalf("Could not parse int from %s: %v", comment, err)
return
}
props.MinItems = &v
}
case "UniqueItems":
if props.Type == "array" {
b, err := strconv.ParseBool(parts[1])
if err != nil {
log.Fatalf("Could not parse bool from %s: %v", comment, err)
return
}
props.UniqueItems = b
}
case "MultipleOf":
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
log.Fatalf("Could not parse float from %s: %v", comment, err)
return
}
props.MultipleOf = &f
case "Enum":
if props.Type != "array" {
value := strings.Split(parts[1], ",")
enums := []v1beta1.JSON{}
for _, s := range value {
checkType(props, s, &enums)
}
props.Enum = enums
}
case "Format":
props.Format = parts[1]
default:
log.Fatalf("Unsupport validation: %s", comment)
}
}
// getMembers builds maps by field name of the JSONSchemaProps and their Go
// serializations.
func (b *APIs) getMembers(t *types.Type, found sets.String) (map[string]v1beta1.JSONSchemaProps, map[string]string, []string) {
members := map[string]v1beta1.JSONSchemaProps{}
result := map[string]string{}
required := []string{}
// Don't allow recursion until we support it through refs
// TODO: Support recursion
if found.Has(t.Name.String()) {
fmt.Printf("Breaking recursion for type %s", t.Name.String())
return members, result, required
}
found.Insert(t.Name.String())
for _, member := range t.Members {
tags := jsonRegex.FindStringSubmatch(member.Tags)
if len(tags) == 0 {
// Skip fields without json tags
//fmt.Printf("Skipping member %s %s\n", member.Name, member.Type.Name.String())
continue
}
ts := strings.Split(tags[1], ",")
name := member.Name
strat := ""
if len(ts) > 0 && len(ts[0]) > 0 {
name = ts[0]
}
if len(ts) > 1 {
strat = ts[1]
}
// Inline "inline" structs
if strat == "inline" {
m, r, re := b.getMembers(member.Type, found)
for n, v := range m {
members[n] = v
}
for n, v := range r {
result[n] = v
}
required = append(required, re...)
} else {
m, r := b.typeToJSONSchemaProps(member.Type, found, member.CommentLines, false)
members[name] = m
result[name] = r
if !strings.HasSuffix(strat, "omitempty") {
required = append(required, name)
}
}
}
defer found.Delete(t.Name.String())
return members, result, required
}

View File

@@ -0,0 +1,161 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parse
import (
"fmt"
"log"
"strings"
"github.com/gobuffalo/flect"
"k8s.io/gengo/types"
"sigs.k8s.io/controller-tools/pkg/internal/codegen"
"sigs.k8s.io/controller-tools/pkg/internal/general"
)
// parseIndex indexes all types with the comment "// +resource=RESOURCE" by GroupVersionKind and
// GroupKindVersion
func (b *APIs) parseIndex() {
// Index resource by group, version, kind
b.ByGroupVersionKind = map[string]map[string]map[string]*codegen.APIResource{}
// Index resources by group, kind, version
b.ByGroupKindVersion = map[string]map[string]map[string]*codegen.APIResource{}
// Index subresources by group, version, kind
b.SubByGroupVersionKind = map[string]map[string]map[string]*types.Type{}
for _, c := range b.context.Order {
// The type is a subresource, add it to the subresource index
if IsAPISubresource(c) {
group := GetGroup(c)
version := GetVersion(c, group)
kind := GetKind(c, group)
if _, f := b.SubByGroupVersionKind[group]; !f {
b.SubByGroupVersionKind[group] = map[string]map[string]*types.Type{}
}
if _, f := b.SubByGroupVersionKind[group][version]; !f {
b.SubByGroupVersionKind[group][version] = map[string]*types.Type{}
}
b.SubByGroupVersionKind[group][version][kind] = c
}
// If it isn't a subresource or resource, continue to the next type
if !IsAPIResource(c) {
continue
}
// Parse out the resource information
r := &codegen.APIResource{
Type: c,
NonNamespaced: IsNonNamespaced(c),
}
r.Group = GetGroup(c)
r.Version = GetVersion(c, r.Group)
r.Kind = GetKind(c, r.Group)
r.Domain = b.Domain
// TODO: revisit the part...
if r.Resource == "" {
r.Resource = flect.Pluralize(strings.ToLower(r.Kind))
}
rt, err := parseResourceAnnotation(c)
if err != nil {
log.Fatalf("failed to parse resource annotations, error: %v", err.Error())
}
if rt.Resource != "" {
r.Resource = rt.Resource
}
r.ShortName = rt.ShortName
// Copy the Status strategy to mirror the non-status strategy
r.StatusStrategy = strings.TrimSuffix(r.Strategy, "Strategy")
r.StatusStrategy = fmt.Sprintf("%sStatusStrategy", r.StatusStrategy)
// Initialize the map entries so they aren't nill
if _, f := b.ByGroupKindVersion[r.Group]; !f {
b.ByGroupKindVersion[r.Group] = map[string]map[string]*codegen.APIResource{}
}
if _, f := b.ByGroupKindVersion[r.Group][r.Kind]; !f {
b.ByGroupKindVersion[r.Group][r.Kind] = map[string]*codegen.APIResource{}
}
if _, f := b.ByGroupVersionKind[r.Group]; !f {
b.ByGroupVersionKind[r.Group] = map[string]map[string]*codegen.APIResource{}
}
if _, f := b.ByGroupVersionKind[r.Group][r.Version]; !f {
b.ByGroupVersionKind[r.Group][r.Version] = map[string]*codegen.APIResource{}
}
// Add the resource to the map
b.ByGroupKindVersion[r.Group][r.Kind][r.Version] = r
b.ByGroupVersionKind[r.Group][r.Version][r.Kind] = r
r.Type = c
}
}
// resourceTags contains the tags present in a "+resource=" comment
type resourceTags struct {
Resource string
REST string
Strategy string
ShortName string
}
// resourceAnnotationValue is a helper function to extract resource annotation.
func resourceAnnotationValue(tag string) (resourceTags, error) {
res := resourceTags{}
for _, elem := range strings.Split(tag, ",") {
key, value, err := general.ParseKV(elem)
if err != nil {
return resourceTags{}, fmt.Errorf("// +kubebuilder:resource: tags must be key value pairs. Expected "+
"keys [path=<resourcepath>] "+
"Got string: [%s]", tag)
}
switch key {
case "path":
res.Resource = value
case "shortName":
res.ShortName = value
default:
return resourceTags{}, fmt.Errorf("The given input %s is invalid", value)
}
}
return res, nil
}
// parseResourceAnnotation parses the tags in a "+resource=" comment into a resourceTags struct.
func parseResourceAnnotation(t *types.Type) (resourceTags, error) {
finalResult := resourceTags{}
var resourceAnnotationFound bool
for _, comment := range t.CommentLines {
anno := general.GetAnnotation(comment, "kubebuilder:resource")
if len(anno) == 0 {
continue
}
result, err := resourceAnnotationValue(anno)
if err != nil {
return resourceTags{}, err
}
if resourceAnnotationFound {
return resourceTags{}, fmt.Errorf("resource annotation should only exists once per type")
}
resourceAnnotationFound = true
finalResult = result
}
return finalResult, nil
}

View File

@@ -0,0 +1,151 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parse
import (
"bufio"
"go/build"
"log"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/types"
"sigs.k8s.io/controller-tools/pkg/internal/codegen"
)
// APIs is the information of a collection of API
type APIs struct {
context *generator.Context
arguments *args.GeneratorArgs
Domain string
VersionedPkgs sets.String
UnversionedPkgs sets.String
APIsPkg string
APIsPkgRaw *types.Package
GroupNames sets.String
APIs *codegen.APIs
Controllers []codegen.Controller
ByGroupKindVersion map[string]map[string]map[string]*codegen.APIResource
ByGroupVersionKind map[string]map[string]map[string]*codegen.APIResource
SubByGroupVersionKind map[string]map[string]map[string]*types.Type
Groups map[string]types.Package
Rules []rbacv1.PolicyRule
Informers map[v1.GroupVersionKind]bool
}
// NewAPIs returns a new APIs instance with given context.
func NewAPIs(context *generator.Context, arguments *args.GeneratorArgs, domain, apisPkg string) *APIs {
b := &APIs{
context: context,
arguments: arguments,
Domain: domain,
APIsPkg: apisPkg,
}
b.parsePackages()
b.parseGroupNames()
b.parseIndex()
b.parseAPIs()
b.parseCRDs()
if len(b.Domain) == 0 {
b.parseDomain()
}
return b
}
// parseGroupNames initializes b.GroupNames with the set of all groups
func (b *APIs) parseGroupNames() {
b.GroupNames = sets.String{}
for p := range b.UnversionedPkgs {
pkg := b.context.Universe[p]
if pkg == nil {
// If the input had no Go files, for example.
continue
}
b.GroupNames.Insert(filepath.Base(p))
}
}
// parsePackages parses out the sets of Versioned, Unversioned packages and identifies the root Apis package.
func (b *APIs) parsePackages() {
b.VersionedPkgs = sets.NewString()
b.UnversionedPkgs = sets.NewString()
for _, o := range b.context.Order {
if IsAPIResource(o) {
versioned := o.Name.Package
b.VersionedPkgs.Insert(versioned)
unversioned := filepath.Dir(versioned)
b.UnversionedPkgs.Insert(unversioned)
}
}
}
// parseDomain parses the domain from the apis/doc.go file comment "// +domain=YOUR_DOMAIN".
func (b *APIs) parseDomain() {
pkg := b.context.Universe[b.APIsPkg]
if pkg == nil {
// If the input had no Go files, for example.
panic(errors.Errorf("Missing apis package."))
}
comments := Comments(pkg.Comments)
b.Domain = comments.getTag("domain", "=")
if len(b.Domain) == 0 {
b.Domain = parseDomainFromFiles(b.context.Inputs)
if len(b.Domain) == 0 {
panic("Could not find string matching // +domain=.+ in apis/doc.go")
}
}
}
func parseDomainFromFiles(paths []string) string {
var domain string
for _, path := range paths {
if strings.HasSuffix(path, "pkg/apis") {
filePath := strings.Join([]string{build.Default.GOPATH, "src", path, "doc.go"}, "/")
lines := []string{}
file, err := os.Open(filePath)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
if strings.HasPrefix(scanner.Text(), "//") {
lines = append(lines, strings.Replace(scanner.Text(), "// ", "", 1))
}
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
comments := Comments(lines)
domain = comments.getTag("domain", "=")
break
}
}
return domain
}

View File

@@ -0,0 +1,539 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package parse
import (
"fmt"
"log"
"path/filepath"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/gengo/types"
)
const (
specReplicasPath = "specpath"
statusReplicasPath = "statuspath"
labelSelectorPath = "selectorpath"
jsonPathError = "invalid scale path. specpath, statuspath key-value pairs are required, only selectorpath key-value is optinal. For example: // +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=.spec.Label"
printColumnName = "name"
printColumnType = "type"
printColumnDescr = "description"
printColumnPath = "JSONPath"
printColumnFormat = "format"
printColumnPri = "priority"
printColumnError = "invalid printcolumn path. name,type, and JSONPath are required kye-value pairs and rest of the fields are optinal. For example: // +kubebuilder:printcolumn:name=abc,type=string,JSONPath=status"
)
// Options contains the parser options
type Options struct {
SkipMapValidation bool
// SkipRBACValidation flag determines whether to check RBAC annotations
// for the controller or not at parse stage.
SkipRBACValidation bool
}
// IsAPIResource returns true if either of the two conditions become true:
// 1. t has a +resource/+kubebuilder:resource comment tag
// 2. t has TypeMeta and ObjectMeta in its member list.
func IsAPIResource(t *types.Type) bool {
for _, c := range t.CommentLines {
if strings.Contains(c, "+resource") || strings.Contains(c, "+kubebuilder:resource") {
return true
}
}
typeMetaFound, objMetaFound := false, false
for _, m := range t.Members {
if m.Name == "TypeMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta" {
typeMetaFound = true
}
if m.Name == "ObjectMeta" && m.Type.String() == "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta" {
objMetaFound = true
}
if typeMetaFound && objMetaFound {
return true
}
}
return false
}
// IsNonNamespaced returns true if t has a +nonNamespaced comment tag
func IsNonNamespaced(t *types.Type) bool {
if !IsAPIResource(t) {
return false
}
for _, c := range t.CommentLines {
if strings.Contains(c, "+genclient:nonNamespaced") {
return true
}
}
for _, c := range t.SecondClosestCommentLines {
if strings.Contains(c, "+genclient:nonNamespaced") {
return true
}
}
return false
}
// IsController returns true if t has a +controller or +kubebuilder:controller tag
func IsController(t *types.Type) bool {
for _, c := range t.CommentLines {
if strings.Contains(c, "+controller") || strings.Contains(c, "+kubebuilder:controller") {
return true
}
}
return false
}
// IsRBAC returns true if t has a +rbac or +kubebuilder:rbac tag
func IsRBAC(t *types.Type) bool {
for _, c := range t.CommentLines {
if strings.Contains(c, "+rbac") || strings.Contains(c, "+kubebuilder:rbac") {
return true
}
}
return false
}
// hasPrintColumn returns true if t has a +printcolumn or +kubebuilder:printcolumn annotation.
func hasPrintColumn(t *types.Type) bool {
for _, c := range t.CommentLines {
if strings.Contains(c, "+printcolumn") || strings.Contains(c, "+kubebuilder:printcolumn") {
return true
}
}
return false
}
// IsInformer returns true if t has a +informers or +kubebuilder:informers tag
func IsInformer(t *types.Type) bool {
for _, c := range t.CommentLines {
if strings.Contains(c, "+informers") || strings.Contains(c, "+kubebuilder:informers") {
return true
}
}
return false
}
// IsAPISubresource returns true if t has a +subresource-request comment tag
func IsAPISubresource(t *types.Type) bool {
for _, c := range t.CommentLines {
if strings.Contains(c, "+subresource-request") {
return true
}
}
return false
}
// HasSubresource returns true if t is an APIResource with one or more Subresources
func HasSubresource(t *types.Type) bool {
if !IsAPIResource(t) {
return false
}
for _, c := range t.CommentLines {
if strings.Contains(c, "subresource") {
return true
}
}
return false
}
// hasStatusSubresource returns true if t is an APIResource annotated with
// +kubebuilder:subresource:status
func hasStatusSubresource(t *types.Type) bool {
if !IsAPIResource(t) {
return false
}
for _, c := range t.CommentLines {
if strings.Contains(c, "+kubebuilder:subresource:status") {
return true
}
}
return false
}
// hasScaleSubresource returns true if t is an APIResource annotated with
// +kubebuilder:subresource:scale
func hasScaleSubresource(t *types.Type) bool {
if !IsAPIResource(t) {
return false
}
for _, c := range t.CommentLines {
if strings.Contains(c, "+kubebuilder:subresource:scale") {
return true
}
}
return false
}
// hasCategories returns true if t is an APIResource annotated with
// +kubebuilder:categories
func hasCategories(t *types.Type) bool {
if !IsAPIResource(t) {
return false
}
for _, c := range t.CommentLines {
if strings.Contains(c, "+kubebuilder:categories") {
return true
}
}
return false
}
// HasDocAnnotation returns true if t is an APIResource with doc annotation
// +kubebuilder:doc
func HasDocAnnotation(t *types.Type) bool {
if !IsAPIResource(t) {
return false
}
for _, c := range t.CommentLines {
if strings.Contains(c, "+kubebuilder:doc") {
return true
}
}
return false
}
// hasSingular returns true if t is an APIResource annotated with
// +kubebuilder:singular
func hasSingular(t *types.Type) bool {
if !IsAPIResource(t) {
return false
}
for _, c := range t.CommentLines{
if strings.Contains(c, "+kubebuilder:singular"){
return true
}
}
return false
}
// IsUnversioned returns true if t is in given group, and not in versioned path.
func IsUnversioned(t *types.Type, group string) bool {
return IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) && GetGroup(t) == group
}
// IsVersioned returns true if t is in given group, and in versioned path.
func IsVersioned(t *types.Type, group string) bool {
dir := filepath.Base(filepath.Dir(filepath.Dir(t.Name.Package)))
return IsApisDir(dir) && GetGroup(t) == group
}
// GetVersion returns version of t.
func GetVersion(t *types.Type, group string) string {
if !IsVersioned(t, group) {
panic(errors.Errorf("Cannot get version for unversioned type %v", t.Name))
}
return filepath.Base(t.Name.Package)
}
// GetGroup returns group of t.
func GetGroup(t *types.Type) string {
return filepath.Base(GetGroupPackage(t))
}
// GetGroupPackage returns group package of t.
func GetGroupPackage(t *types.Type) string {
if IsApisDir(filepath.Base(filepath.Dir(t.Name.Package))) {
return t.Name.Package
}
return filepath.Dir(t.Name.Package)
}
// GetKind returns kind of t.
func GetKind(t *types.Type, group string) string {
if !IsVersioned(t, group) && !IsUnversioned(t, group) {
panic(errors.Errorf("Cannot get kind for type not in group %v", t.Name))
}
return t.Name.Name
}
// IsApisDir returns true if a directory path is a Kubernetes api directory
func IsApisDir(dir string) bool {
return dir == "apis" || dir == "api"
}
// Comments is a structure for using comment tags on go structs and fields
type Comments []string
// GetTags returns the value for the first comment with a prefix matching "+name="
// e.g. "+name=foo\n+name=bar" would return "foo"
func (c Comments) getTag(name, sep string) string {
for _, c := range c {
prefix := fmt.Sprintf("+%s%s", name, sep)
if strings.HasPrefix(c, prefix) {
return strings.Replace(c, prefix, "", 1)
}
}
return ""
}
// hasTag returns true if the Comments has a tag with the given name
func (c Comments) hasTag(name string) bool {
for _, c := range c {
prefix := fmt.Sprintf("+%s", name)
if strings.HasPrefix(c, prefix) {
return true
}
}
return false
}
// GetTags returns the value for all comments with a prefix and separator. E.g. for "name" and "="
// "+name=foo\n+name=bar" would return []string{"foo", "bar"}
func (c Comments) getTags(name, sep string) []string {
tags := []string{}
for _, c := range c {
prefix := fmt.Sprintf("+%s%s", name, sep)
if strings.HasPrefix(c, prefix) {
tags = append(tags, strings.Replace(c, prefix, "", 1))
}
}
return tags
}
// getCategoriesTag returns the value of the +kubebuilder:categories tags
func getCategoriesTag(c *types.Type) string {
comments := Comments(c.CommentLines)
resource := comments.getTag("kubebuilder:categories", "=")
if len(resource) == 0 {
panic(errors.Errorf("Must specify +kubebuilder:categories comment for type %v", c.Name))
}
return resource
}
// getSingularName returns the value of the +kubebuilder:singular tag
func getSingularName(c *types.Type) string {
comments := Comments(c.CommentLines)
singular := comments.getTag("kubebuilder:singular", "=")
if len(singular) == 0 {
panic(errors.Errorf("Must specify a value to use with +kubebuilder:singular comment for type %v", c.Name))
}
return singular
}
// getDocAnnotation parse annotations of "+kubebuilder:doc:" with tags of "warning" or "doc" for control generating doc config.
// E.g. +kubebuilder:doc:warning=foo +kubebuilder:doc:note=bar
func getDocAnnotation(t *types.Type, tags ...string) map[string]string {
annotation := make(map[string]string)
for _, tag := range tags {
for _, c := range t.CommentLines {
prefix := fmt.Sprintf("+kubebuilder:doc:%s=", tag)
if strings.HasPrefix(c, prefix) {
annotation[tag] = strings.Replace(c, prefix, "", 1)
}
}
}
return annotation
}
// parseByteValue returns the literal digital number values from a byte array
func parseByteValue(b []byte) string {
elem := strings.Join(strings.Fields(fmt.Sprintln(b)), ",")
elem = strings.TrimPrefix(elem, "[")
elem = strings.TrimSuffix(elem, "]")
return elem
}
// parseDescription parse comments above each field in the type definition.
func parseDescription(res []string) string {
var temp strings.Builder
var desc string
for _, comment := range res {
if !(strings.Contains(comment, "+kubebuilder") || strings.Contains(comment, "+optional")) {
temp.WriteString(comment)
temp.WriteString(" ")
desc = strings.TrimRight(temp.String(), " ")
}
}
return desc
}
// parseEnumToString returns a representive validated go format string from JSONSchemaProps schema
func parseEnumToString(value []v1beta1.JSON) string {
res := "[]v1beta1.JSON{"
prefix := "v1beta1.JSON{[]byte{"
for _, v := range value {
res = res + prefix + parseByteValue(v.Raw) + "}},"
}
return strings.TrimSuffix(res, ",") + "}"
}
// check type of enum element value to match type of field
func checkType(props *v1beta1.JSONSchemaProps, s string, enums *[]v1beta1.JSON) {
// TODO support more types check
switch props.Type {
case "int", "int64", "uint64":
if _, err := strconv.ParseInt(s, 0, 64); err != nil {
log.Fatalf("Invalid integer value [%v] for a field of integer type", s)
}
*enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))})
case "int32", "unit32":
if _, err := strconv.ParseInt(s, 0, 32); err != nil {
log.Fatalf("Invalid integer value [%v] for a field of integer32 type", s)
}
*enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))})
case "float", "float32":
if _, err := strconv.ParseFloat(s, 32); err != nil {
log.Fatalf("Invalid float value [%v] for a field of float32 type", s)
}
*enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))})
case "float64":
if _, err := strconv.ParseFloat(s, 64); err != nil {
log.Fatalf("Invalid float value [%v] for a field of float type", s)
}
*enums = append(*enums, v1beta1.JSON{Raw: []byte(fmt.Sprintf("%v", s))})
case "string":
*enums = append(*enums, v1beta1.JSON{Raw: []byte(`"` + s + `"`)})
}
}
// Scale subresource requires specpath, statuspath, selectorpath key values, represents for JSONPath of
// SpecReplicasPath, StatusReplicasPath, LabelSelectorPath separately. e.g.
// +kubebuilder:subresource:scale:specpath=.spec.replica,statuspath=.status.replica,selectorpath=
func parseScaleParams(t *types.Type) (map[string]string, error) {
jsonPath := make(map[string]string)
for _, c := range t.CommentLines {
if strings.Contains(c, "+kubebuilder:subresource:scale") {
paths := strings.Replace(c, "+kubebuilder:subresource:scale:", "", -1)
path := strings.Split(paths, ",")
if len(path) < 2 {
return nil, fmt.Errorf(jsonPathError)
}
for _, s := range path {
kv := strings.Split(s, "=")
if kv[0] == specReplicasPath || kv[0] == statusReplicasPath || kv[0] == labelSelectorPath {
jsonPath[kv[0]] = kv[1]
} else {
return nil, fmt.Errorf(jsonPathError)
}
}
var ok bool
_, ok = jsonPath[specReplicasPath]
if !ok {
return nil, fmt.Errorf(jsonPathError)
}
_, ok = jsonPath[statusReplicasPath]
if !ok {
return nil, fmt.Errorf(jsonPathError)
}
return jsonPath, nil
}
}
return nil, fmt.Errorf(jsonPathError)
}
// printColumnKV parses key-value string formatted as "foo=bar" and returns key and value.
func printColumnKV(s string) (key, value string, err error) {
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
err = fmt.Errorf("invalid key value pair")
return key, value, err
}
key, value = kv[0], kv[1]
if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") {
value = value[1 : len(value)-1]
}
return key, value, err
}
// helperPrintColumn is a helper function for the parsePrintColumnParams to compute printer columns.
func helperPrintColumn(parts string, comment string) (v1beta1.CustomResourceColumnDefinition, error) {
config := v1beta1.CustomResourceColumnDefinition{}
var count int
part := strings.Split(parts, ",")
if len(part) < 3 {
return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError)
}
for _, elem := range strings.Split(parts, ",") {
key, value, err := printColumnKV(elem)
if err != nil {
return v1beta1.CustomResourceColumnDefinition{},
fmt.Errorf("//+kubebuilder:printcolumn: tags must be key value pairs.Expected "+
"keys [name=<name>,type=<type>,description=<descr>,format=<format>] "+
"Got string: [%s]", parts)
}
if key == printColumnName || key == printColumnType || key == printColumnPath {
count++
}
switch key {
case printColumnName:
config.Name = value
case printColumnType:
if value == "integer" || value == "number" || value == "string" || value == "boolean" || value == "date" {
config.Type = value
} else {
return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnType)
}
case printColumnFormat:
if config.Type == "integer" && (value == "int32" || value == "int64") {
config.Format = value
} else if config.Type == "number" && (value == "float" || value == "double") {
config.Format = value
} else if config.Type == "string" && (value == "byte" || value == "date" || value == "date-time" || value == "password") {
config.Format = value
} else {
return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnFormat)
}
case printColumnPath:
config.JSONPath = value
case printColumnPri:
i, err := strconv.Atoi(value)
v := int32(i)
if err != nil {
return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf("invalid value for %s printcolumn", printColumnPri)
}
config.Priority = v
case printColumnDescr:
config.Description = value
default:
return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError)
}
}
if count != 3 {
return v1beta1.CustomResourceColumnDefinition{}, fmt.Errorf(printColumnError)
}
return config, nil
}
// printcolumn requires name,type,JSONPath fields and rest of the field are optional
// +kubebuilder:printcolumn:name=<name>,type=<type>,description=<desc>,JSONPath:<.spec.Name>,priority=<int32>,format=<format>
func parsePrintColumnParams(t *types.Type) ([]v1beta1.CustomResourceColumnDefinition, error) {
result := []v1beta1.CustomResourceColumnDefinition{}
for _, comment := range t.CommentLines {
if strings.Contains(comment, "+kubebuilder:printcolumn") {
parts := strings.Replace(comment, "+kubebuilder:printcolumn:", "", -1)
res, err := helperPrintColumn(parts, comment)
if err != nil {
return []v1beta1.CustomResourceColumnDefinition{}, err
}
result = append(result, res)
}
}
return result, nil
}

View File

@@ -0,0 +1,213 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package codegen
import (
"sort"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/gengo/types"
)
// APIs is the information of a collection of API
type APIs struct {
// Domain is the domain portion of the group - e.g. k8s.io
Domain string
// Package is the name of the root API package - e.g. github.com/my-org/my-repo/pkg/apis
Package string
// Pkg the Package for the root API package
Pkg *types.Package
// Groups is the list of API groups found under the apis package
Groups map[string]*APIGroup
Rules []rbacv1.PolicyRule
Informers map[v1.GroupVersionKind]bool
}
// GetRules get rules of the APIs
func (apis *APIs) GetRules() []rbacv1.PolicyRule {
rules := []rbacv1.PolicyRule{}
rulesIndex := map[v1.GroupResource]sets.String{}
for _, rule := range apis.Rules {
for _, g := range rule.APIGroups {
for _, r := range rule.Resources {
gr := v1.GroupResource{
Group: g,
Resource: r,
}
if _, found := rulesIndex[gr]; !found {
rulesIndex[gr] = sets.NewString()
}
rulesIndex[gr].Insert(rule.Verbs...)
}
}
}
for gr, v := range rulesIndex {
verbs := v.List()
sort.Strings(verbs)
rule := rbacv1.PolicyRule{
Resources: []string{gr.Resource},
APIGroups: []string{gr.Group},
Verbs: verbs,
}
rules = append(rules, rule)
}
return rules
}
// APIGroup contains information of an API group.
type APIGroup struct {
// Package is the name of the go package the api group is under - e.g. github.com/me/apiserver-helloworld/apis
Package string
// Domain is the domain portion of the group - e.g. k8s.io
Domain string
// Group is the short name of the group - e.g. mushroomkingdom
Group string
GroupTitle string
// Versions is the list of all versions for this group keyed by name
Versions map[string]*APIVersion
UnversionedResources map[string]*APIResource
// Structs is a list of unversioned definitions that must be generated
Structs []*Struct
Pkg *types.Package
PkgPath string
}
// Struct contains information of a struct.
type Struct struct {
// Name is the name of the type
Name string
// genClient
GenClient bool
GenDeepCopy bool
NonNamespaced bool
GenUnversioned bool
// Fields is the list of fields appearing in the struct
Fields []*Field
}
// Field contains information of a field.
type Field struct {
// Name is the name of the field
Name string
// For versioned Kubernetes types, this is the versioned package
VersionedPackage string
// For versioned Kubernetes types, this is the unversioned package
UnversionedImport string
UnversionedType string
}
// APIVersion contains information of an API version.
type APIVersion struct {
// Domain is the group domain - e.g. k8s.io
Domain string
// Group is the group name - e.g. mushroomkingdom
Group string
// Version is the api version - e.g. v1beta1
Version string
// Resources is a list of resources appearing in the API version keyed by name
Resources map[string]*APIResource
// Pkg is the Package object from code-gen
Pkg *types.Package
}
// APIResource contains information of an API resource.
type APIResource struct {
// Domain is the group domain - e.g. k8s.io
Domain string
// Group is the group name - e.g. mushroomkingdom
Group string
// Version is the api version - e.g. v1beta1
Version string
// Kind is the resource name - e.g. PeachesCastle
Kind string
// Resource is the resource name - e.g. peachescastles
Resource string
// REST is the rest.Storage implementation used to handle requests
// This field is optional. The standard REST implementation will be used
// by default.
REST string
// Subresources is a map of subresources keyed by name
Subresources map[string]*APISubresource
// Type is the Type object from code-gen
Type *types.Type
// Strategy is name of the struct to use for the strategy
Strategy string
// Strategy is name of the struct to use for the strategy
StatusStrategy string
// NonNamespaced indicates that the resource kind is non namespaced
NonNamespaced bool
ShortName string
JSONSchemaProps v1beta1.JSONSchemaProps
CRD v1beta1.CustomResourceDefinition
Validation string
ValidationComments string
// DocAnnotation is a map of annotations by name for doc. e.g. warning, notes message
DocAnnotation map[string]string
// Categories is a list of categories the resource is part of.
Categories []string
}
// APISubresource contains information of an API subresource.
type APISubresource struct {
// Domain is the group domain - e.g. k8s.io
Domain string
// Group is the group name - e.g. mushroomkingdom
Group string
// Version is the api version - e.g. v1beta1
Version string
// Kind is the resource name - e.g. PeachesCastle
Kind string
// Resource is the resource name - e.g. peachescastles
Resource string
// Request is the subresource request type - e.g. ScaleCastle
Request string
// REST is the rest.Storage implementation used to handle requests
REST string
// Path is the subresource path - e.g. scale
Path string
// ImportPackage is the import statement that must appear for the Request
ImportPackage string
// RequestType is the type of the request
RequestType *types.Type
// RESTType is the type of the request handler
RESTType *types.Type
}
// Controller contains information of a controller.
type Controller struct {
Target schema.GroupVersionKind
Resource string
Pkg *types.Package
Repo string
}

View File

@@ -0,0 +1,102 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package general
import (
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"path/filepath"
"strings"
)
// isGoFile filters files from parsing.
func isGoFile(f os.FileInfo) bool {
// ignore non-Go or Go test files
name := f.Name()
return !f.IsDir() &&
!strings.HasPrefix(name, ".") &&
!strings.HasSuffix(name, "_test.go") &&
strings.HasSuffix(name, ".go")
}
// GetAnnotation extracts the annotation from comment text.
// It will return "foo" for comment "+kubebuilder:webhook:foo" .
func GetAnnotation(c, name string) string {
prefix := fmt.Sprintf("+%s:", name)
if strings.HasPrefix(c, prefix) {
return strings.TrimPrefix(c, prefix)
}
return ""
}
// ParseKV parses key-value string formatted as "foo=bar" and returns key and value.
func ParseKV(s string) (key, value string, err error) {
kv := strings.Split(s, "=")
if len(kv) != 2 {
err = fmt.Errorf("invalid key value pair")
return key, value, err
}
key, value = kv[0], kv[1]
if strings.HasPrefix(value, "\"") && strings.HasSuffix(value, "\"") {
value = value[1 : len(value)-1]
}
return key, value, err
}
// ParseDir parses the Go files under given directory and parses the annotation by
// invoking the parseFn function on each comment group (multi-lines comments).
// TODO(droot): extend it to multiple dirs
func ParseDir(dir string, parseFn func(string) error) error {
fset := token.NewFileSet()
err := filepath.Walk(dir,
func(path string, info os.FileInfo, _ error) error {
if !isGoFile(info) {
// TODO(droot): enable this output based on verbose flag
// fmt.Println("skipping non-go file", path)
return nil
}
return ParseFile(fset, path, nil, parseFn)
})
return err
}
// ParseFile parses given filename or content src and parses annotations by
// invoking the parseFn function on each comment group (multi-lines comments).
func ParseFile(fset *token.FileSet, filename string, src interface{}, parseFn func(string) error) error {
f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
if err != nil {
fmt.Printf("error from parse.ParseFile: %v", err)
return err
}
// using commentMaps here because it sanitizes the comment text by removing
// comment markers, compresses newlines etc.
cmap := ast.NewCommentMap(fset, f, f.Comments)
for _, commentGroup := range cmap.Comments() {
err = parseFn(commentGroup.Text())
if err != nil {
fmt.Print("error when parsing annotation")
return err
}
}
return nil
}

View File

@@ -0,0 +1,170 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbac
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/ghodss/yaml"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-tools/pkg/internal/general"
)
// ManifestOptions represent options for generating the RBAC manifests.
type ManifestOptions struct {
InputDir string
OutputDir string
RoleFile string
BindingFile string
Name string
ServiceAccount string
Namespace string
Labels map[string]string
}
// SetDefaults sets up the default options for RBAC Manifest generator.
func (o *ManifestOptions) SetDefaults() {
o.Name = "manager"
o.InputDir = filepath.Join(".", "pkg")
o.OutputDir = filepath.Join(".", "config", "rbac")
o.ServiceAccount = "default"
o.Namespace = "system"
}
// RoleName returns the RBAC role name to be used in the manifests.
func (o *ManifestOptions) RoleName() string {
return o.Name + "-role"
}
// RoleFileName returns the name of the manifest file to use for the role.
func (o *ManifestOptions) RoleFileName() string {
if len(o.RoleFile) == 0 {
return "rbac_role.yaml"
}
// TODO: validate file name
return o.RoleFile
}
// RoleBindingName returns the RBAC role binding name to be used in the manifests.
func (o *ManifestOptions) RoleBindingName() string {
return o.Name + "-rolebinding"
}
// RoleBindingFileName returns the name of the manifest file to use for the role binding.
func (o *ManifestOptions) RoleBindingFileName() string {
if len(o.BindingFile) == 0 {
return "rbac_role_binding.yaml"
}
// TODO: validate file name
return o.BindingFile
}
// Validate validates the input options.
func (o *ManifestOptions) Validate() error {
if _, err := os.Stat(o.InputDir); err != nil {
return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err)
}
return nil
}
// Generate generates RBAC manifests by parsing the RBAC annotations in Go source
// files specified in the input directory.
func Generate(o *ManifestOptions) error {
if err := o.Validate(); err != nil {
return err
}
ops := parserOptions{
rules: []rbacv1.PolicyRule{},
}
err := general.ParseDir(o.InputDir, ops.parseAnnotation)
if err != nil {
return fmt.Errorf("failed to parse the input dir %v", err)
}
if len(ops.rules) == 0 {
return nil
}
roleManifest, err := getClusterRoleManifest(ops.rules, o)
if err != nil {
return fmt.Errorf("failed to generate role manifest %v", err)
}
roleBindingManifest, err := getClusterRoleBindingManifest(o)
if err != nil {
return fmt.Errorf("failed to generate role binding manifests %v", err)
}
err = os.MkdirAll(o.OutputDir, os.ModePerm)
if err != nil {
return fmt.Errorf("failed to create output dir %v", err)
}
roleManifestFile := filepath.Join(o.OutputDir, o.RoleFileName())
if err := ioutil.WriteFile(roleManifestFile, roleManifest, 0666); err != nil {
return fmt.Errorf("failed to write role manifest YAML file %v", err)
}
roleBindingManifestFile := filepath.Join(o.OutputDir, o.RoleBindingFileName())
if err := ioutil.WriteFile(roleBindingManifestFile, roleBindingManifest, 0666); err != nil {
return fmt.Errorf("failed to write role manifest YAML file %v", err)
}
return nil
}
func getClusterRoleManifest(rules []rbacv1.PolicyRule, o *ManifestOptions) ([]byte, error) {
role := rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterRole",
APIVersion: "rbac.authorization.k8s.io/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: o.RoleName(),
Labels: o.Labels,
},
Rules: rules,
}
return yaml.Marshal(role)
}
func getClusterRoleBindingManifest(o *ManifestOptions) ([]byte, error) {
rolebinding := &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: o.RoleBindingName(),
Labels: o.Labels,
},
Subjects: []rbacv1.Subject{
{
Name: o.ServiceAccount,
Namespace: o.Namespace,
Kind: "ServiceAccount",
},
},
RoleRef: rbacv1.RoleRef{
Name: o.RoleName(),
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
}
return yaml.Marshal(rolebinding)
}

83
vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go generated vendored Normal file
View File

@@ -0,0 +1,83 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rbac contain libraries for generating RBAC manifests from RBAC
// annotations in Go source files.
package rbac
import (
"log"
"strings"
rbacv1 "k8s.io/api/rbac/v1"
"sigs.k8s.io/controller-tools/pkg/internal/general"
)
type parserOptions struct {
rules []rbacv1.PolicyRule
}
// parseAnnotation parses RBAC annotations
func (o *parserOptions) parseAnnotation(commentText string) error {
for _, comment := range strings.Split(commentText, "\n") {
comment := strings.TrimSpace(comment)
if strings.HasPrefix(comment, "+rbac") {
if ann := general.GetAnnotation(comment, "rbac"); ann != "" {
o.rules = append(o.rules, parseRBACTag(ann))
}
}
if strings.HasPrefix(comment, "+kubebuilder:rbac") {
if ann := general.GetAnnotation(comment, "kubebuilder:rbac"); ann != "" {
o.rules = append(o.rules, parseRBACTag(ann))
}
}
}
return nil
}
// parseRBACTag parses the given RBAC annotation in to an RBAC PolicyRule.
// This is copied from Kubebuilder code.
func parseRBACTag(tag string) rbacv1.PolicyRule {
result := rbacv1.PolicyRule{}
for _, elem := range strings.Split(tag, ",") {
key, value, err := general.ParseKV(elem)
if err != nil {
log.Fatalf("// +kubebuilder:rbac: tags must be key value pairs. Expected "+
"keys [groups=<group1;group2>,resources=<resource1;resource2>,verbs=<verb1;verb2>] "+
"Got string: [%s]", tag)
}
values := strings.Split(value, ";")
switch key {
case "groups":
normalized := []string{}
for _, v := range values {
if v == "core" {
normalized = append(normalized, "")
} else {
normalized = append(normalized, v)
}
}
result.APIGroups = normalized
case "resources":
result.Resources = values
case "verbs":
result.Verbs = values
case "urls":
result.NonResourceURLs = values
}
}
return result
}

77
vendor/sigs.k8s.io/controller-tools/pkg/util/util.go generated vendored Normal file
View File

@@ -0,0 +1,77 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"io"
"log"
"os"
"path/filepath"
"github.com/spf13/afero"
)
// FileWriter is a io wrapper to write files
type FileWriter struct {
Fs afero.Fs
}
// WriteCloser returns a WriteCloser to write to given path
func (fw *FileWriter) WriteCloser(path string) (io.Writer, error) {
if fw.Fs == nil {
fw.Fs = afero.NewOsFs()
}
dir := filepath.Dir(path)
err := fw.Fs.MkdirAll(dir, 0700)
if err != nil {
return nil, err
}
fi, err := fw.Fs.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return nil, err
}
return fi, nil
}
// WriteFile write given content to the file path
func (fw *FileWriter) WriteFile(filePath string, content []byte) error {
if fw.Fs == nil {
fw.Fs = afero.NewOsFs()
}
f, err := fw.WriteCloser(filePath)
if err != nil {
return fmt.Errorf("failed to create %s: %v", filePath, err)
}
if c, ok := f.(io.Closer); ok {
defer func() {
if err := c.Close(); err != nil {
log.Fatal(err)
}
}()
}
_, err = f.Write(content)
if err != nil {
return fmt.Errorf("failed to write %s: %v", filePath, err)
}
return nil
}

View File

@@ -0,0 +1,89 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
import (
"errors"
"fmt"
"regexp"
"strings"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// admissionWebhook contains bits needed for generating a admissionWebhook Configuration
type admissionWebhook struct {
// name is the name of the webhook
name string
// typ is the webhook type, i.e. mutating, validating
typ webhookType
// path is the path this webhook will serve.
path string
// rules maps to the rules field in admissionregistrationv1beta1.admissionWebhook
rules []admissionregistrationv1beta1.RuleWithOperations
// failurePolicy maps to the failurePolicy field in admissionregistrationv1beta1.admissionWebhook
// This optional. If not set, will be defaulted to Ignore (fail-open) by the server.
// More details: https://github.com/kubernetes/api/blob/f5c295feaba2cbc946f0bbb8b535fc5f6a0345ee/admissionregistration/v1beta1/types.go#L144-L147
failurePolicy *admissionregistrationv1beta1.FailurePolicyType
// namespaceSelector maps to the namespaceSelector field in admissionregistrationv1beta1.admissionWebhook
// This optional.
namespaceSelector *metav1.LabelSelector
}
func (w *admissionWebhook) setDefaults() {
if len(w.path) == 0 {
if len(w.rules) == 0 || len(w.rules[0].Resources) == 0 {
// can't do defaulting, skip it.
return
}
if w.typ == mutatingWebhook {
w.path = "/mutate-" + w.rules[0].Resources[0]
} else if w.typ == validatingWebhook {
w.path = "/validate-" + w.rules[0].Resources[0]
}
}
if len(w.name) == 0 {
reg := regexp.MustCompile("[^a-zA-Z0-9]+")
processedPath := strings.ToLower(reg.ReplaceAllString(w.path, ""))
w.name = processedPath + ".example.com"
}
}
var _ webhook = &admissionWebhook{}
// GetType returns the type of the webhook.
func (w *admissionWebhook) GetType() webhookType {
return w.typ
}
// Validate validates if the webhook is valid.
func (w *admissionWebhook) Validate() error {
if len(w.rules) == 0 {
return errors.New("field rules should not be empty")
}
if len(w.name) == 0 {
return errors.New("field name should not be empty")
}
if w.typ != mutatingWebhook && w.typ != validatingWebhook {
return fmt.Errorf("unsupported Type: %v, only mutatingWebhook and validatingWebhook are supported", w.typ)
}
if len(w.path) == 0 {
return errors.New("field path should not be empty")
}
return nil
}

View File

@@ -0,0 +1,334 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
import (
"errors"
"net"
"net/url"
"path"
"sort"
"strconv"
"k8s.io/api/admissionregistration/v1beta1"
admissionregistration "k8s.io/api/admissionregistration/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
apitypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
type generatorOptions struct {
// webhooks maps a path to a webhoook.
webhooks map[string]webhook
// port is the port number that the server will serve.
// It will be defaulted to 443 if unspecified.
port int32
// certDir is the directory that contains the server key and certificate.
certDir string
// mutatingWebhookConfigName is the name that used for creating the MutatingWebhookConfiguration object.
mutatingWebhookConfigName string
// validatingWebhookConfigName is the name that used for creating the ValidatingWebhookConfiguration object.
validatingWebhookConfigName string
// secret is the location for storing the certificate for the admission server.
// The server should have permission to create a secret in the namespace.
secret *apitypes.NamespacedName
// service is a k8s service fronting the webhook server pod(s).
// One and only one of service and host can be set.
// This maps to field .Webhooks.ClientConfig.Service
// https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L260
service *service
// host is the host name of .Webhooks.ClientConfig.URL
// https://github.com/kubernetes/api/blob/183f3326a9353bd6d41430fc80f96259331d029c/admissionregistration/v1beta1/types.go#L250
// One and only one of service and host can be set.
// If neither service nor host is unspecified, host will be defaulted to "localhost".
host *string
}
// service contains information for creating a Service
type service struct {
// name of the Service
name string
// namespace of the Service
namespace string
// selectors is the selector of the Service.
// This must select the pods that runs this webhook server.
selectors map[string]string
}
// setDefaults does defaulting for the generatorOptions.
func (o *generatorOptions) setDefaults() {
if o.webhooks == nil {
o.webhooks = map[string]webhook{}
}
if o.port <= 0 {
o.port = 443
}
if len(o.certDir) == 0 {
o.certDir = path.Join("/tmp", "k8s-webhook-server", "serving-certs")
}
if len(o.mutatingWebhookConfigName) == 0 {
o.mutatingWebhookConfigName = "mutating-webhook-configuration"
}
if len(o.validatingWebhookConfigName) == 0 {
o.validatingWebhookConfigName = "validating-webhook-configuration"
}
if o.host == nil && o.service == nil {
varString := "localhost"
o.host = &varString
}
}
// Generate creates the AdmissionWebhookConfiguration objects and Service if any.
// It also provisions the certificate for the admission server.
func (o *generatorOptions) Generate() ([]runtime.Object, error) {
// do defaulting if necessary
o.setDefaults()
webhookConfigurations, err := o.whConfigs()
if err != nil {
return nil, err
}
svc := o.getService()
objects := append(webhookConfigurations, svc)
return objects, nil
}
// whConfigs creates a mutatingWebhookConfiguration and(or) a validatingWebhookConfiguration.
func (o *generatorOptions) whConfigs() ([]runtime.Object, error) {
for _, webhook := range o.webhooks {
if err := webhook.Validate(); err != nil {
return nil, err
}
}
objs := []runtime.Object{}
mutatingWH, err := o.mutatingWHConfig()
if err != nil {
return nil, err
}
if mutatingWH != nil {
objs = append(objs, mutatingWH)
}
validatingWH, err := o.validatingWHConfigs()
if err != nil {
return nil, err
}
if validatingWH != nil {
objs = append(objs, validatingWH)
}
return objs, nil
}
// mutatingWHConfig creates mutatingWebhookConfiguration.
func (o *generatorOptions) mutatingWHConfig() (runtime.Object, error) {
mutatingWebhooks := []v1beta1.Webhook{}
for path, webhook := range o.webhooks {
if webhook.GetType() != mutatingWebhook {
continue
}
aw := webhook.(*admissionWebhook)
wh, err := o.admissionWebhook(path, aw)
if err != nil {
return nil, err
}
mutatingWebhooks = append(mutatingWebhooks, *wh)
}
sort.Slice(mutatingWebhooks, func(i, j int) bool {
return mutatingWebhooks[i].Name < mutatingWebhooks[j].Name
})
if len(mutatingWebhooks) > 0 {
return &admissionregistration.MutatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(),
Kind: "MutatingWebhookConfiguration",
},
ObjectMeta: metav1.ObjectMeta{
Name: o.mutatingWebhookConfigName,
Annotations: map[string]string{
// TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use.
"alpha.admissionwebhook.cert-manager.io": "true",
},
},
Webhooks: mutatingWebhooks,
}, nil
}
return nil, nil
}
func (o *generatorOptions) validatingWHConfigs() (runtime.Object, error) {
validatingWebhooks := []v1beta1.Webhook{}
for path, webhook := range o.webhooks {
var aw *admissionWebhook
if webhook.GetType() != validatingWebhook {
continue
}
aw = webhook.(*admissionWebhook)
wh, err := o.admissionWebhook(path, aw)
if err != nil {
return nil, err
}
validatingWebhooks = append(validatingWebhooks, *wh)
}
sort.Slice(validatingWebhooks, func(i, j int) bool {
return validatingWebhooks[i].Name < validatingWebhooks[j].Name
})
if len(validatingWebhooks) > 0 {
return &admissionregistration.ValidatingWebhookConfiguration{
TypeMeta: metav1.TypeMeta{
APIVersion: metav1.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}.String(),
Kind: "ValidatingWebhookConfiguration",
},
ObjectMeta: metav1.ObjectMeta{
Name: o.validatingWebhookConfigName,
Annotations: map[string]string{
// TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use.
"alpha.admissionwebhook.cert-manager.io": "true",
},
},
Webhooks: validatingWebhooks,
}, nil
}
return nil, nil
}
func (o *generatorOptions) admissionWebhook(path string, wh *admissionWebhook) (*admissionregistration.Webhook, error) {
if wh.namespaceSelector == nil && o.service != nil && len(o.service.namespace) > 0 {
wh.namespaceSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "control-plane",
Operator: metav1.LabelSelectorOpDoesNotExist,
},
},
}
}
webhook := &admissionregistration.Webhook{
Name: wh.name,
Rules: wh.rules,
FailurePolicy: wh.failurePolicy,
NamespaceSelector: wh.namespaceSelector,
}
cc, err := o.getClientConfigWithPath(path)
if err != nil {
return nil, err
}
webhook.ClientConfig = *cc
return webhook, nil
}
// getClientConfigWithPath constructs a WebhookClientConfig based on the server generatorOptions.
// It will use path to the set the path in WebhookClientConfig.
func (o *generatorOptions) getClientConfigWithPath(path string) (*admissionregistration.WebhookClientConfig, error) {
cc, err := o.getClientConfig()
if err != nil {
return nil, err
}
return cc, setPath(cc, path)
}
func (o *generatorOptions) getClientConfig() (*admissionregistration.WebhookClientConfig, error) {
if o.host != nil && o.service != nil {
return nil, errors.New("URL and service can't be set at the same time")
}
cc := &admissionregistration.WebhookClientConfig{
// Put an non-empty and not harmful CABundle here.
// Not doing this will cause the field
CABundle: []byte(`\n`),
}
if o.host != nil {
u := url.URL{
Scheme: "https",
Host: net.JoinHostPort(*o.host, strconv.Itoa(int(o.port))),
}
urlString := u.String()
cc.URL = &urlString
}
if o.service != nil {
cc.Service = &admissionregistration.ServiceReference{
Name: o.service.name,
Namespace: o.service.namespace,
// Path will be set later
}
}
return cc, nil
}
// setPath sets the path in the WebhookClientConfig.
func setPath(cc *admissionregistration.WebhookClientConfig, path string) error {
if cc.URL != nil {
u, err := url.Parse(*cc.URL)
if err != nil {
return err
}
u.Path = path
urlString := u.String()
cc.URL = &urlString
}
if cc.Service != nil {
cc.Service.Path = &path
}
return nil
}
// getService creates a corev1.Service object fronting the admission server.
func (o *generatorOptions) getService() runtime.Object {
if o.service == nil {
return nil
}
svc := &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: o.service.name,
Namespace: o.service.namespace,
Annotations: map[string]string{
// Secret here only need name, since it will be in the same namespace as the service.
// TODO(DirectXMan12): Change the annotation to the format that cert-manager decides to use.
"alpha.service.cert-manager.io/serving-cert-secret-name": o.secret.Name,
},
},
Spec: corev1.ServiceSpec{
Selector: o.service.selectors,
Ports: []corev1.ServicePort{
{
// When using service, kube-apiserver will send admission request to port 443.
Port: 443,
TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: o.port},
},
},
},
}
return svc
}

View File

@@ -0,0 +1,151 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
import (
"bytes"
"fmt"
"path"
"strings"
"text/template"
"github.com/ghodss/yaml"
"github.com/spf13/afero"
"sigs.k8s.io/controller-tools/pkg/internal/general"
)
// Options represent options for generating the webhook manifests.
type Options struct {
// WriterOptions specifies the input and output
WriterOptions
generatorOptions
}
// Generate generates RBAC manifests by parsing the RBAC annotations in Go source
// files specified in the input directory.
func Generate(o *Options) error {
if err := o.WriterOptions.Validate(); err != nil {
return err
}
err := general.ParseDir(o.InputDir, o.parseAnnotation)
if err != nil {
return fmt.Errorf("failed to parse the input dir: %v", err)
}
if len(o.webhooks) == 0 {
return nil
}
objs, err := o.Generate()
if err != nil {
return err
}
err = o.WriteObjectsToDisk(objs...)
if err != nil {
return err
}
return o.controllerManagerPatch()
}
func (o *Options) controllerManagerPatch() error {
var kustomizeLabelPatch = `apiVersion: apps/v1
kind: StatefulSet
metadata:
name: controller-manager
spec:
template:
metadata:
{{- with .Labels }}
labels:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
containers:
- name: manager
ports:
- containerPort: {{ .Port }}
name: webhook-server
protocol: TCP
volumeMounts:
- mountPath: {{ .CertDir }}
name: cert
readOnly: true
volumes:
- name: cert
secret:
defaultMode: 420
secretName: {{ .SecretName }}
`
type KustomizeLabelPatch struct {
Labels map[string]string
SecretName string
Port int32
CertDir string
}
p := KustomizeLabelPatch{
Labels: o.service.selectors,
SecretName: o.secret.Name,
Port: o.port,
CertDir: o.certDir,
}
funcMap := template.FuncMap{
"toYaml": toYAML,
"indent": indent,
}
temp, err := template.New("kustomizeLabelPatch").Funcs(funcMap).Parse(kustomizeLabelPatch)
if err != nil {
return err
}
buf := bytes.NewBuffer(nil)
if err := temp.Execute(buf, p); err != nil {
return err
}
return afero.WriteFile(o.outFs, path.Join(o.PatchOutputDir, "manager_patch.yaml"), buf.Bytes(), 0644)
}
func toYAML(m map[string]string) (string, error) {
d, err := yaml.Marshal(m)
return string(d), err
}
func indent(n int, s string) (string, error) {
buf := bytes.NewBuffer(nil)
for _, elem := range strings.Split(s, "\n") {
for i := 0; i < n; i++ {
_, err := buf.WriteRune(' ')
if err != nil {
return "", err
}
}
_, err := buf.WriteString(elem)
if err != nil {
return "", err
}
_, err = buf.WriteRune('\n')
if err != nil {
return "", err
}
}
return strings.TrimRight(buf.String(), " \n"), nil
}

View File

@@ -0,0 +1,236 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
import (
"errors"
"fmt"
"log"
"strconv"
"strings"
admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-tools/pkg/internal/general"
)
const webhookAnnotationPrefix = "kubebuilder:webhook"
var (
webhookTags = sets.NewString([]string{"groups", "versions", "resources", "verbs", "type", "name", "path", "failure-policy"}...)
serverTags = sets.NewString([]string{"port", "cert-dir", "service", "selector", "secret", "host", "mutating-webhook-config-name", "validating-webhook-config-name"}...)
)
// parseAnnotation parses webhook annotations
func (o *Options) parseAnnotation(commentText string) error {
webhookKVMap, serverKVMap := map[string]string{}, map[string]string{}
for _, comment := range strings.Split(commentText, "\n") {
comment := strings.TrimSpace(comment)
anno := general.GetAnnotation(comment, webhookAnnotationPrefix)
if len(anno) == 0 {
continue
}
for _, elem := range strings.Split(anno, ",") {
key, value, err := general.ParseKV(elem)
if err != nil {
log.Fatalf("// +kubebuilder:webhook: tags must be key value pairs. Example "+
"keys [groups=<group1;group2>,resources=<resource1;resource2>,verbs=<verb1;verb2>] "+
"Got string: [%s]", anno)
}
switch {
case webhookTags.Has(key):
webhookKVMap[key] = value
case serverTags.Has(key):
serverKVMap[key] = value
}
}
}
if err := o.parseWebhookAnnotation(webhookKVMap); err != nil {
return err
}
return o.parseServerAnnotation(serverKVMap)
}
// parseWebhookAnnotation parses webhook annotations in the same comment group
// nolint: gocyclo
func (o *Options) parseWebhookAnnotation(kvMap map[string]string) error {
if len(kvMap) == 0 {
return nil
}
rule := admissionregistrationv1beta1.RuleWithOperations{}
w := &admissionWebhook{}
for key, value := range kvMap {
switch key {
case "groups":
values := strings.Split(value, ";")
normalized := []string{}
for _, v := range values {
if v == "core" {
normalized = append(normalized, "")
} else {
normalized = append(normalized, v)
}
}
rule.APIGroups = values
case "versions":
values := strings.Split(value, ";")
rule.APIVersions = values
case "resources":
values := strings.Split(value, ";")
rule.Resources = values
case "verbs":
values := strings.Split(value, ";")
var ops []admissionregistrationv1beta1.OperationType
for _, v := range values {
switch strings.ToLower(v) {
case strings.ToLower(string(admissionregistrationv1beta1.Create)):
ops = append(ops, admissionregistrationv1beta1.Create)
case strings.ToLower(string(admissionregistrationv1beta1.Update)):
ops = append(ops, admissionregistrationv1beta1.Update)
case strings.ToLower(string(admissionregistrationv1beta1.Delete)):
ops = append(ops, admissionregistrationv1beta1.Delete)
case strings.ToLower(string(admissionregistrationv1beta1.Connect)):
ops = append(ops, admissionregistrationv1beta1.Connect)
case strings.ToLower(string(admissionregistrationv1beta1.OperationAll)):
ops = append(ops, admissionregistrationv1beta1.OperationAll)
default:
return fmt.Errorf("unknown operation: %v", v)
}
}
rule.Operations = ops
case "type":
switch strings.ToLower(value) {
case "mutating":
w.typ = mutatingWebhook
case "validating":
w.typ = validatingWebhook
default:
return fmt.Errorf("unknown webhook type: %v", value)
}
case "name":
w.name = value
case "path":
w.path = value
case "failure-policy":
switch strings.ToLower(value) {
case strings.ToLower(string(admissionregistrationv1beta1.Ignore)):
fp := admissionregistrationv1beta1.Ignore
w.failurePolicy = &fp
case strings.ToLower(string(admissionregistrationv1beta1.Fail)):
fp := admissionregistrationv1beta1.Fail
w.failurePolicy = &fp
default:
return fmt.Errorf("unknown webhook failure policy: %v", value)
}
}
}
w.rules = []admissionregistrationv1beta1.RuleWithOperations{rule}
if o.webhooks == nil {
o.webhooks = map[string]webhook{}
}
o.webhooks[w.path] = w
return nil
}
// parseWebhookAnnotation parses webhook server annotations in the same comment group
// nolint: gocyclo
func (o *Options) parseServerAnnotation(kvMap map[string]string) error {
if len(kvMap) == 0 {
return nil
}
for key, value := range kvMap {
switch key {
case "port":
port, err := strconv.Atoi(value)
if err != nil {
return err
}
o.port = int32(port)
case "cert-dir":
o.certDir = value
case "service":
// format: <service=namespace:name>
split := strings.Split(value, ":")
if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 {
return fmt.Errorf("invalid service format: expect <namespace:name>, but got %q", value)
}
if o.service == nil {
o.service = &service{}
}
o.service.namespace = split[0]
o.service.name = split[1]
case "selector":
// selector of the service. Format: <selector=label1:value1;label2:value2>
split := strings.Split(value, ";")
if len(split) == 0 {
return fmt.Errorf("invalid selector format: expect <label1:value1;label2:value2>, but got %q", value)
}
if o.service == nil {
o.service = &service{}
}
for _, v := range split {
l := strings.Split(v, ":")
if len(l) != 2 || len(l[0]) == 0 || len(l[1]) == 0 {
return fmt.Errorf("invalid selector format: expect <label1:value1;label2:value2>, but got %q", value)
}
if o.service.selectors == nil {
o.service.selectors = map[string]string{}
}
o.service.selectors[l[0]] = l[1]
}
case "host":
if len(value) == 0 {
return errors.New("host should not be empty if specified")
}
o.host = &value
case "mutating-webhook-config-name":
if len(value) == 0 {
return errors.New("mutating-webhook-config-name should not be empty if specified")
}
o.mutatingWebhookConfigName = value
case "validating-webhook-config-name":
if len(value) == 0 {
return errors.New("validating-webhook-config-name should not be empty if specified")
}
o.validatingWebhookConfigName = value
case "secret":
// format: <secret=namespace:name>
split := strings.Split(value, ":")
if len(split) != 2 || len(split[0]) == 0 || len(split[1]) == 0 {
return fmt.Errorf("invalid secret format: expect <namespace:name>, but got %q", value)
}
if o.secret == nil {
o.secret = &types.NamespacedName{}
}
o.secret.Namespace = split[0]
o.secret.Name = split[1]
}
}
return nil
}

View File

@@ -0,0 +1,38 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package webhook
// webhookType defines the type of a webhook
type webhookType int
const (
_ = iota
// mutatingWebhook represents mutating type webhook
mutatingWebhook webhookType = iota
// validatingWebhook represents validating type webhook
validatingWebhook
)
// webhook defines the basics that a webhook should support.
type webhook interface {
// GetType returns the Type of the webhook.
// e.g. mutating or validating
GetType() webhookType
// Validate validates if the webhook itself is valid.
// If invalid, a non-nil error will be returned.
Validate() error
}

View File

@@ -0,0 +1,92 @@
package webhook
import (
"bytes"
"fmt"
"path"
"path/filepath"
"github.com/ghodss/yaml"
"github.com/spf13/afero"
"k8s.io/apimachinery/pkg/runtime"
)
// WriterOptions specifies the input and output.
type WriterOptions struct {
InputDir string
OutputDir string
PatchOutputDir string
// inFs is filesystem to be used for reading input
inFs afero.Fs
// outFs is filesystem to be used for writing out the result
outFs afero.Fs
}
// SetDefaults sets up the default options for RBAC Manifest generator.
func (o *WriterOptions) SetDefaults() {
if o.inFs == nil {
o.inFs = afero.NewOsFs()
}
if o.outFs == nil {
o.outFs = afero.NewOsFs()
}
if len(o.InputDir) == 0 {
o.InputDir = filepath.Join(".", "pkg", "webhook")
}
if len(o.OutputDir) == 0 {
o.OutputDir = filepath.Join(".", "config", "webhook")
}
if len(o.PatchOutputDir) == 0 {
o.PatchOutputDir = filepath.Join(".", "config", "default")
}
}
// Validate validates the input options.
func (o *WriterOptions) Validate() error {
if _, err := o.inFs.Stat(o.InputDir); err != nil {
return fmt.Errorf("invalid input directory '%s' %v", o.InputDir, err)
}
return nil
}
// WriteObjectsToDisk writes object to the location specified in WriterOptions.
func (o *WriterOptions) WriteObjectsToDisk(objects ...runtime.Object) error {
exists, err := afero.DirExists(o.outFs, o.OutputDir)
if err != nil {
return err
}
if !exists {
err = o.outFs.MkdirAll(o.OutputDir, 0766)
if err != nil {
return err
}
}
var buf bytes.Buffer
isFirstObject := true
for _, obj := range objects {
if !isFirstObject {
_, err = buf.WriteString("---\n")
if err != nil {
return err
}
}
marshalled, err := yaml.Marshal(obj)
if err != nil {
return err
}
_, err = buf.Write(marshalled)
if err != nil {
return err
}
isFirstObject = false
}
err = afero.WriteFile(o.outFs, path.Join(o.OutputDir, "webhookmanifests.yaml"), buf.Bytes(), 0644)
if err != nil {
return err
}
return nil
}